repo_name
stringlengths 5
100
| path
stringlengths 4
254
| copies
stringlengths 1
5
| size
stringlengths 4
7
| content
stringlengths 681
1M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,298,349B
| line_mean
float64 3.5
100
| line_max
int64 15
1k
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class | ratio
float64 1.5
8.15
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Grarak/grakernel-msm8930 | scripts/gcc-wrapper.py | 364 | 3936 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Linux Foundation nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Invoke gcc, looking for warnings, and causing a failure if there are
# non-whitelisted warnings.
import errno
import re
import os
import sys
import subprocess
# Note that gcc uses unicode, which may depend on the locale. TODO:
# force LANG to be set to en_US.UTF-8 to get consistent warnings.
allowed_warnings = set([
"alignment.c:327",
"mmu.c:602",
"return_address.c:62",
"swab.h:49",
"SemaLambda.cpp:946",
"CGObjCGNU.cpp:1414",
"BugReporter.h:146",
"RegionStore.cpp:1904",
"SymbolManager.cpp:484",
"RewriteObjCFoundationAPI.cpp:737",
"RewriteObjCFoundationAPI.cpp:696",
"CommentParser.cpp:394",
"CommentParser.cpp:391",
"CommentParser.cpp:356",
"LegalizeDAG.cpp:3646",
"IRBuilder.h:844",
"DataLayout.cpp:193",
"transport.c:653",
"xt_socket.c:307",
"xt_socket.c:161",
"inet_hashtables.h:356",
"xc4000.c:1049",
"xc4000.c:1063",
])
# Capture the name of the object file, can find it.
ofile = None
warning_re = re.compile(r'''(.*/|)([^/]+\.[a-z]+:\d+):(\d+:)? warning:''')
def interpret_warning(line):
"""Decode the message from gcc. The messages we care about have a filename, and a warning"""
line = line.rstrip('\n')
m = warning_re.match(line)
if m and m.group(2) not in allowed_warnings:
print "error, forbidden warning:", m.group(2)
# If there is a warning, remove any object if it exists.
if ofile:
try:
os.remove(ofile)
except OSError:
pass
sys.exit(1)
def run_gcc():
args = sys.argv[1:]
# Look for -o
try:
i = args.index('-o')
global ofile
ofile = args[i+1]
except (ValueError, IndexError):
pass
compiler = sys.argv[0]
try:
proc = subprocess.Popen(args, stderr=subprocess.PIPE)
for line in proc.stderr:
print line,
interpret_warning(line)
result = proc.wait()
except OSError as e:
result = e.errno
if result == errno.ENOENT:
print args[0] + ':',e.strerror
print 'Is your PATH set correctly?'
else:
print ' '.join(args), str(e)
return result
if __name__ == '__main__':
status = run_gcc()
sys.exit(status)
| gpl-2.0 | -4,037,045,619,782,924,300 | 32.355932 | 97 | 0.663872 | false | 3.77373 | false | false | false |
landryb/QGIS | python/plugins/processing/algs/exampleprovider/ProcessingExampleProviderPlugin.py | 26 | 1655 | # -*- coding: utf-8 -*-
"""
***************************************************************************
__init__.py
---------------------
Date : July 2013
Copyright : (C) 2013 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'July 2013'
__copyright__ = '(C) 2013, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import sys
import inspect
from processing.core.Processing import Processing
from exampleprovider.ExampleAlgorithmProvider import ExampleAlgorithmProvider
cmd_folder = os.path.split(inspect.getfile(inspect.currentframe()))[0]
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
class ProcessingExampleProviderPlugin:
def __init__(self):
self.provider = ExampleAlgorithmProvider()
def initGui(self):
Processing.addProvider(self.provider)
def unload(self):
Processing.removeProvider(self.provider)
| gpl-2.0 | 6,290,126,447,250,553,000 | 32.1 | 77 | 0.49426 | false | 4.811047 | false | false | false |
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-2.6/Lib/curses/has_key.py | 81 | 5633 |
#
# Emulation of has_key() function for platforms that don't use ncurses
#
import _curses
# Table mapping curses keys to the terminfo capability name
_capability_names = {
_curses.KEY_A1: 'ka1',
_curses.KEY_A3: 'ka3',
_curses.KEY_B2: 'kb2',
_curses.KEY_BACKSPACE: 'kbs',
_curses.KEY_BEG: 'kbeg',
_curses.KEY_BTAB: 'kcbt',
_curses.KEY_C1: 'kc1',
_curses.KEY_C3: 'kc3',
_curses.KEY_CANCEL: 'kcan',
_curses.KEY_CATAB: 'ktbc',
_curses.KEY_CLEAR: 'kclr',
_curses.KEY_CLOSE: 'kclo',
_curses.KEY_COMMAND: 'kcmd',
_curses.KEY_COPY: 'kcpy',
_curses.KEY_CREATE: 'kcrt',
_curses.KEY_CTAB: 'kctab',
_curses.KEY_DC: 'kdch1',
_curses.KEY_DL: 'kdl1',
_curses.KEY_DOWN: 'kcud1',
_curses.KEY_EIC: 'krmir',
_curses.KEY_END: 'kend',
_curses.KEY_ENTER: 'kent',
_curses.KEY_EOL: 'kel',
_curses.KEY_EOS: 'ked',
_curses.KEY_EXIT: 'kext',
_curses.KEY_F0: 'kf0',
_curses.KEY_F1: 'kf1',
_curses.KEY_F10: 'kf10',
_curses.KEY_F11: 'kf11',
_curses.KEY_F12: 'kf12',
_curses.KEY_F13: 'kf13',
_curses.KEY_F14: 'kf14',
_curses.KEY_F15: 'kf15',
_curses.KEY_F16: 'kf16',
_curses.KEY_F17: 'kf17',
_curses.KEY_F18: 'kf18',
_curses.KEY_F19: 'kf19',
_curses.KEY_F2: 'kf2',
_curses.KEY_F20: 'kf20',
_curses.KEY_F21: 'kf21',
_curses.KEY_F22: 'kf22',
_curses.KEY_F23: 'kf23',
_curses.KEY_F24: 'kf24',
_curses.KEY_F25: 'kf25',
_curses.KEY_F26: 'kf26',
_curses.KEY_F27: 'kf27',
_curses.KEY_F28: 'kf28',
_curses.KEY_F29: 'kf29',
_curses.KEY_F3: 'kf3',
_curses.KEY_F30: 'kf30',
_curses.KEY_F31: 'kf31',
_curses.KEY_F32: 'kf32',
_curses.KEY_F33: 'kf33',
_curses.KEY_F34: 'kf34',
_curses.KEY_F35: 'kf35',
_curses.KEY_F36: 'kf36',
_curses.KEY_F37: 'kf37',
_curses.KEY_F38: 'kf38',
_curses.KEY_F39: 'kf39',
_curses.KEY_F4: 'kf4',
_curses.KEY_F40: 'kf40',
_curses.KEY_F41: 'kf41',
_curses.KEY_F42: 'kf42',
_curses.KEY_F43: 'kf43',
_curses.KEY_F44: 'kf44',
_curses.KEY_F45: 'kf45',
_curses.KEY_F46: 'kf46',
_curses.KEY_F47: 'kf47',
_curses.KEY_F48: 'kf48',
_curses.KEY_F49: 'kf49',
_curses.KEY_F5: 'kf5',
_curses.KEY_F50: 'kf50',
_curses.KEY_F51: 'kf51',
_curses.KEY_F52: 'kf52',
_curses.KEY_F53: 'kf53',
_curses.KEY_F54: 'kf54',
_curses.KEY_F55: 'kf55',
_curses.KEY_F56: 'kf56',
_curses.KEY_F57: 'kf57',
_curses.KEY_F58: 'kf58',
_curses.KEY_F59: 'kf59',
_curses.KEY_F6: 'kf6',
_curses.KEY_F60: 'kf60',
_curses.KEY_F61: 'kf61',
_curses.KEY_F62: 'kf62',
_curses.KEY_F63: 'kf63',
_curses.KEY_F7: 'kf7',
_curses.KEY_F8: 'kf8',
_curses.KEY_F9: 'kf9',
_curses.KEY_FIND: 'kfnd',
_curses.KEY_HELP: 'khlp',
_curses.KEY_HOME: 'khome',
_curses.KEY_IC: 'kich1',
_curses.KEY_IL: 'kil1',
_curses.KEY_LEFT: 'kcub1',
_curses.KEY_LL: 'kll',
_curses.KEY_MARK: 'kmrk',
_curses.KEY_MESSAGE: 'kmsg',
_curses.KEY_MOVE: 'kmov',
_curses.KEY_NEXT: 'knxt',
_curses.KEY_NPAGE: 'knp',
_curses.KEY_OPEN: 'kopn',
_curses.KEY_OPTIONS: 'kopt',
_curses.KEY_PPAGE: 'kpp',
_curses.KEY_PREVIOUS: 'kprv',
_curses.KEY_PRINT: 'kprt',
_curses.KEY_REDO: 'krdo',
_curses.KEY_REFERENCE: 'kref',
_curses.KEY_REFRESH: 'krfr',
_curses.KEY_REPLACE: 'krpl',
_curses.KEY_RESTART: 'krst',
_curses.KEY_RESUME: 'kres',
_curses.KEY_RIGHT: 'kcuf1',
_curses.KEY_SAVE: 'ksav',
_curses.KEY_SBEG: 'kBEG',
_curses.KEY_SCANCEL: 'kCAN',
_curses.KEY_SCOMMAND: 'kCMD',
_curses.KEY_SCOPY: 'kCPY',
_curses.KEY_SCREATE: 'kCRT',
_curses.KEY_SDC: 'kDC',
_curses.KEY_SDL: 'kDL',
_curses.KEY_SELECT: 'kslt',
_curses.KEY_SEND: 'kEND',
_curses.KEY_SEOL: 'kEOL',
_curses.KEY_SEXIT: 'kEXT',
_curses.KEY_SF: 'kind',
_curses.KEY_SFIND: 'kFND',
_curses.KEY_SHELP: 'kHLP',
_curses.KEY_SHOME: 'kHOM',
_curses.KEY_SIC: 'kIC',
_curses.KEY_SLEFT: 'kLFT',
_curses.KEY_SMESSAGE: 'kMSG',
_curses.KEY_SMOVE: 'kMOV',
_curses.KEY_SNEXT: 'kNXT',
_curses.KEY_SOPTIONS: 'kOPT',
_curses.KEY_SPREVIOUS: 'kPRV',
_curses.KEY_SPRINT: 'kPRT',
_curses.KEY_SR: 'kri',
_curses.KEY_SREDO: 'kRDO',
_curses.KEY_SREPLACE: 'kRPL',
_curses.KEY_SRIGHT: 'kRIT',
_curses.KEY_SRSUME: 'kRES',
_curses.KEY_SSAVE: 'kSAV',
_curses.KEY_SSUSPEND: 'kSPD',
_curses.KEY_STAB: 'khts',
_curses.KEY_SUNDO: 'kUND',
_curses.KEY_SUSPEND: 'kspd',
_curses.KEY_UNDO: 'kund',
_curses.KEY_UP: 'kcuu1'
}
def has_key(ch):
if isinstance(ch, str):
ch = ord(ch)
# Figure out the correct capability name for the keycode.
capability_name = _capability_names.get(ch)
if capability_name is None:
return False
#Check the current terminal description for that capability;
#if present, return true, else return false.
if _curses.tigetstr( capability_name ):
return True
else:
return False
if __name__ == '__main__':
# Compare the output of this implementation and the ncurses has_key,
# on platforms where has_key is already available
try:
L = []
_curses.initscr()
for key in _capability_names.keys():
system = _curses.has_key(key)
python = has_key(key)
if system != python:
L.append( 'Mismatch for key %s, system=%i, Python=%i'
% (_curses.keyname( key ), system, python) )
finally:
_curses.endwin()
for i in L: print i
| mit | -3,731,281,184,704,951,300 | 28.338542 | 72 | 0.571987 | false | 2.509131 | false | false | false |
PySide/Shiboken | tests/samplebinding/multi_cpp_inheritance_test.py | 6 | 3178 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of the Shiboken Python Bindings Generator project.
#
# Copyright (C) 2013 Digia Plc and/or its subsidiary(-ies).
#
# Contact: PySide team <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# version 2.1 as published by the Free Software Foundation. Please
# review the following information to ensure the GNU Lesser General
# Public License version 2.1 requirements will be met:
# http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
# #
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
'''Test cases for multiple inheritance'''
import sys
import unittest
from sample import *
class SimpleUseCase(ObjectType, Str):
def __init__(self, name):
ObjectType.__init__(self)
Str.__init__(self, name)
class SimpleUseCaseReverse(Str, ObjectType):
def __init__(self, name):
ObjectType.__init__(self)
Str.__init__(self, name)
class SimpleUseCase2(SimpleUseCase):
def __init__(self, name):
SimpleUseCase.__init__(self, name)
class ComplexUseCase(SimpleUseCase2, Point):
def __init__(self, name):
SimpleUseCase2.__init__(self, name)
Point.__init__(self)
class ComplexUseCaseReverse(Point, SimpleUseCase2):
def __init__(self, name):
SimpleUseCase2.__init__(self, name)
Point.__init__(self)
class MultipleCppDerivedTest(unittest.TestCase):
def testInstanciation(self):
s = SimpleUseCase("Hi")
self.assertEqual(s, "Hi")
s.setObjectName(s)
self.assertEqual(s.objectName(), "Hi")
def testInstanciation2(self):
s = SimpleUseCase2("Hi")
self.assertEqual(s, "Hi")
s.setObjectName(s)
self.assertEqual(s.objectName(), "Hi")
def testComplexInstanciation(self):
c = ComplexUseCase("Hi")
self.assertEqual(c, "Hi")
c.setObjectName(c)
self.assertEqual(c.objectName(), "Hi")
c.setX(2);
self.assertEqual(c.x(), 2)
class MultipleCppDerivedReverseTest(unittest.TestCase):
def testInstanciation(self):
s = SimpleUseCaseReverse("Hi")
self.assertEqual(s, "Hi")
s.setObjectName(s)
self.assertEqual(s.objectName(), "Hi")
def testInstanciation2(self):
s = SimpleUseCase2("Hi")
self.assertEqual(s, "Hi")
s.setObjectName(s)
self.assertEqual(s.objectName(), "Hi")
def testComplexInstanciation(self):
c = ComplexUseCaseReverse("Hi")
c.setObjectName(c)
self.assertEqual(c.objectName(), "Hi")
c.setX(2);
self.assertEqual(c, Point(2, 0))
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | 8,331,902,494,146,692,000 | 30.78 | 70 | 0.662052 | false | 3.640321 | true | false | false |
bazitur/brackets-python-tools | pythonfiles/parso/python/token.py | 2 | 1777 | from __future__ import absolute_import
from itertools import count
from token import *
from parso._compatibility import py_version
_counter = count(N_TOKENS)
# Never want to see this thing again.
del N_TOKENS
COMMENT = next(_counter)
tok_name[COMMENT] = 'COMMENT'
NL = next(_counter)
tok_name[NL] = 'NL'
# Sets the attributes that don't exist in these tok_name versions.
if py_version >= 30:
BACKQUOTE = next(_counter)
tok_name[BACKQUOTE] = 'BACKQUOTE'
else:
RARROW = next(_counter)
tok_name[RARROW] = 'RARROW'
ELLIPSIS = next(_counter)
tok_name[ELLIPSIS] = 'ELLIPSIS'
if py_version < 35:
ATEQUAL = next(_counter)
tok_name[ATEQUAL] = 'ATEQUAL'
ERROR_DEDENT = next(_counter)
tok_name[ERROR_DEDENT] = 'ERROR_DEDENT'
# Map from operator to number (since tokenize doesn't do this)
opmap_raw = """\
( LPAR
) RPAR
[ LSQB
] RSQB
: COLON
, COMMA
; SEMI
+ PLUS
- MINUS
* STAR
/ SLASH
| VBAR
& AMPER
< LESS
> GREATER
= EQUAL
. DOT
% PERCENT
` BACKQUOTE
{ LBRACE
} RBRACE
@ AT
== EQEQUAL
!= NOTEQUAL
<> NOTEQUAL
<= LESSEQUAL
>= GREATEREQUAL
~ TILDE
^ CIRCUMFLEX
<< LEFTSHIFT
>> RIGHTSHIFT
** DOUBLESTAR
+= PLUSEQUAL
-= MINEQUAL
*= STAREQUAL
/= SLASHEQUAL
%= PERCENTEQUAL
&= AMPEREQUAL
|= VBAREQUAL
@= ATEQUAL
^= CIRCUMFLEXEQUAL
<<= LEFTSHIFTEQUAL
>>= RIGHTSHIFTEQUAL
**= DOUBLESTAREQUAL
// DOUBLESLASH
//= DOUBLESLASHEQUAL
-> RARROW
... ELLIPSIS
"""
opmap = {}
for line in opmap_raw.splitlines():
op, name = line.split()
opmap[op] = globals()[name]
def generate_token_id(string):
"""
Uses a token in the grammar (e.g. `'+'` or `'and'`returns the corresponding
ID for it. The strings are part of the grammar file.
"""
try:
return opmap[string]
except KeyError:
pass
return globals()[string]
| mit | -2,425,001,939,370,699,300 | 16.086538 | 79 | 0.665729 | false | 2.889431 | false | false | false |
glaubitz/fs-uae-debian | arcade/launcher/settings/settings_header.py | 2 | 1384 | import fsui
from fsbc.util import unused
from launcher.ui.skin import Skin
class SettingsHeader(fsui.Group):
ICON_LEFT = 0
ICON_RIGHT = 1
def __init__(
self, parent, icon, title, subtitle="", icon_position=ICON_RIGHT
):
unused(subtitle)
fsui.Group.__init__(self, parent)
self.layout = fsui.HorizontalLayout()
image = icon.image(48)
self.image_view = fsui.ImageView(self, image)
if icon_position == self.ICON_LEFT:
self.layout.add(self.image_view)
self.layout.add_spacer(20)
# vert_layout = fsui.VerticalLayout()
# self.layout.add(
# vert_layout, expand=True, fill=False, valign=0.5)
self.title_label = fsui.HeadingLabel(self, title)
if Skin.fws() or True:
font = fsui.Font("Roboto", 26)
self.title_label.set_font(font)
self.layout.add(
self.title_label, expand=True, fill=False, valign=0.0
)
else:
font = self.title_label.get_font()
font.increase_size(3)
self.title_label.set_font(font)
self.layout.add(
self.title_label, expand=True, fill=False, valign=0.5
)
if icon_position == self.ICON_RIGHT:
self.layout.add_spacer(20)
self.layout.add(self.image_view)
| gpl-2.0 | -7,743,135,693,777,367,000 | 30.454545 | 72 | 0.567197 | false | 3.51269 | false | false | false |
djw8605/htcondor | src/condor_contrib/condor_pigeon/src/condor_pigeon_client/skype_linux_tools/find_chat.py | 10 | 4722 | #! /usr/bin/python
#
# Displays all multi-chats and allows to open them.
#
# (c) Copyright 2007, Vincent Oberle, [email protected]
#
# This software may be used and distributed according to the terms
# of the GNU Public License, incorporated herein by reference.
import sys
import re
from optparse import OptionParser
from skype_api import *
appname = 'chat_finder'
class SkypeChat:
def __init__(self, _chunk_size = 5, debug = False):
self.ids = None
self.chunk = 0
self.chunk_size = _chunk_size
self.topics = {}
self.members = {}
self.friendlyname = {}
self.api = SkypeAPI(appname, debug)
def init_chat_ids(self):
ret = self.api.send_and_block('SEARCH CHATS')
r = re.search (r'CHATS (.*)', ret)
if r:
self.ids = r.group(1).strip().split(', ')
# convert percentage to actual value
self.chunk_size = len(self.ids) * self.chunk_size / 100
# Gets chat info by chunks
def get_all_chats(self):
if not self.ids:
self.init_chat_ids()
lo = self.chunk
self.hi = min([self.chunk + self.chunk_size - 1, len(self.ids) - 1])
for i in self.ids[lo:self.hi]:
self.api.send('GET CHAT ' + i + ' TOPIC')
self.api.send('GET CHAT ' + i + ' MEMBERS')
self.api.send('GET CHAT ' + i + ' FRIENDLYNAME')
while True:
msgs = self.api.response_wait(1)
if not msgs: break
for reply in msgs:
r = re.search (r'CHAT (\S+) TOPIC (.+)', reply)
if r: self.topics[r.group(1).strip()] = r.group(2).strip()
r = re.search (r'CHAT (\S+) MEMBERS (.+)', reply)
if r: self.members[r.group(1).strip()] = r.group(2).strip()
r = re.search (r'CHAT (\S+) FRIENDLYNAME (.+)', reply)
if r: self.friendlyname[r.group(1).strip()] = r.group(2).strip()
self.chunk = min([self.chunk + self.chunk_size, len(self.ids) - 1])
def open_chat(self, name):
self.api.send('OPEN CHAT ' + name)
def open_chat_by_index(self, index):
self.open_chat(self.ids[index])
def print_chat_list(self, filter):
for i in self.ids[:self.hi]:
# display only multi-chats
if not self.members.has_key(i) or len(self.members[i].split()) < 3:
continue
# string we filter on
search_str = ''
if self.topics.has_key(i):
search_str = search_str + self.topics[i]
elif self.friendlyname.has_key(i):
search_str = search_str + self.friendlyname[i]
if self.members.has_key(i):
search_str = search_str + self.members[i]
search_str = search_str.lower()
if not filter or search_str.find(filter) >= 0:
t = ''
if self.topics.has_key(i): t = '**' + self.topics[i] + '**'
elif self.friendlyname.has_key(i): t = '"' + self.friendlyname[i] + '"'
if self.members.has_key(i): t = t + ' ' + self.members[i]
if t:
print str(self.ids.index(i)) + ': ' + t
def perc_loaded(self):
return str( int(float(self.chunk) / len(self.ids) * 100) )
if __name__ == "__main__":
parser = OptionParser('%prog [options]')
parser.add_option('-c', '--chunk', dest='chunk',
default = 5, help='Percentage of total number of chats to load each time (100 for all)')
parser.add_option('-d', '--debug', action='store_true', dest='debug',
default = False, help='Print debug messages')
options, args = parser.parse_args()
if len(args):
parser.print_help()
sys.exit(0)
try:
api = SkypeChat(int(options.chunk), options.debug)
except StandardError:
print 'Could not connect to Skype. Check if "' + appname + '" is authorized to connect to Skype (Options - Public API)'
sys.exit(0)
print 'Please patient while chats are being loaded...'
api.get_all_chats()
filter = None
refresh = True
while True:
if refresh:
print ''
api.print_chat_list(filter)
refresh = False
print 'Loaded: ' + api.perc_loaded() + ' %'
print 'Quick help: "/word" filter by word; "/" clean filter; "m" load more chats; number to open a chat; "q" to quit'
print '> ',
sys.stdout.flush()
result = sys.stdin.readline().strip().lower()
if not result: continue
elif result == '/':
filter = None
refresh = True
elif result[0] == '/':
filter = result[1:]
refresh = True
elif result.isdigit():
api.open_chat_by_index(int(result))
elif result[0] == '#':
api.open_chat(result)
elif result == 'm':
print 'Loading more chats...'
api.get_all_chats()
refresh = True
elif result == 'h':
print 'Skype Chat Finder: Finds old multi-chats and allows to open them'
print 'Commands:'
print ' /word Filters chat topic and member list with "word"'
print ' / Show all chats (reset filter)'
print ' m Loads more chats'
print ' Chat number Opens the chat window in Skype'
print ' q Quits'
elif result == 'q':
break
| apache-2.0 | 1,836,465,713,021,663,000 | 28.5125 | 121 | 0.623041 | false | 2.879268 | false | false | false |
dangtrinhnt/django-saml2 | djangosaml2/templatetags/idplist.py | 8 | 1419 | # Copyright (C) 2011-2012 Yaco Sistemas (http://www.yaco.es)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django import template
from djangosaml2.conf import config_settings_loader
register = template.Library()
class IdPListNode(template.Node):
def __init__(self, variable_name):
self.variable_name = variable_name
def render(self, context):
conf = config_settings_loader()
context[self.variable_name] = conf.idps()
return ''
@register.tag
def idplist(parser, token):
try:
tag_name, as_part, variable = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError(
'%r tag requires two arguments' % token.contents.split()[0])
if not as_part == 'as':
raise template.TemplateSyntaxError(
'%r tag first argument must be the literal "as"' % tag_name)
return IdPListNode(variable)
| apache-2.0 | -4,295,575,218,021,276,700 | 31.25 | 74 | 0.696265 | false | 3.952646 | false | false | false |
nevir/plexability | extern/depot_tools/third_party/boto/mashups/server.py | 91 | 14043 | # Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
High-level abstraction of an EC2 server
"""
import boto
import boto.utils
from boto.mashups.iobject import IObject
from boto.pyami.config import Config, BotoConfigPath
from boto.mashups.interactive import interactive_shell
from boto.sdb.db.model import Model
from boto.sdb.db.property import StringProperty
import os
import StringIO
class ServerSet(list):
def __getattr__(self, name):
results = []
is_callable = False
for server in self:
try:
val = getattr(server, name)
if callable(val):
is_callable = True
results.append(val)
except:
results.append(None)
if is_callable:
self.map_list = results
return self.map
return results
def map(self, *args):
results = []
for fn in self.map_list:
results.append(fn(*args))
return results
class Server(Model):
@property
def ec2(self):
if self._ec2 is None:
self._ec2 = boto.connect_ec2()
return self._ec2
@classmethod
def Inventory(cls):
"""
Returns a list of Server instances, one for each Server object
persisted in the db
"""
l = ServerSet()
rs = cls.find()
for server in rs:
l.append(server)
return l
@classmethod
def Register(cls, name, instance_id, description=''):
s = cls()
s.name = name
s.instance_id = instance_id
s.description = description
s.save()
return s
def __init__(self, id=None, **kw):
Model.__init__(self, id, **kw)
self._reservation = None
self._instance = None
self._ssh_client = None
self._pkey = None
self._config = None
self._ec2 = None
name = StringProperty(unique=True, verbose_name="Name")
instance_id = StringProperty(verbose_name="Instance ID")
config_uri = StringProperty()
ami_id = StringProperty(verbose_name="AMI ID")
zone = StringProperty(verbose_name="Availability Zone")
security_group = StringProperty(verbose_name="Security Group", default="default")
key_name = StringProperty(verbose_name="Key Name")
elastic_ip = StringProperty(verbose_name="Elastic IP")
instance_type = StringProperty(verbose_name="Instance Type")
description = StringProperty(verbose_name="Description")
log = StringProperty()
def setReadOnly(self, value):
raise AttributeError
def getInstance(self):
if not self._instance:
if self.instance_id:
try:
rs = self.ec2.get_all_instances([self.instance_id])
except:
return None
if len(rs) > 0:
self._reservation = rs[0]
self._instance = self._reservation.instances[0]
return self._instance
instance = property(getInstance, setReadOnly, None, 'The Instance for the server')
def getAMI(self):
if self.instance:
return self.instance.image_id
ami = property(getAMI, setReadOnly, None, 'The AMI for the server')
def getStatus(self):
if self.instance:
self.instance.update()
return self.instance.state
status = property(getStatus, setReadOnly, None,
'The status of the server')
def getHostname(self):
if self.instance:
return self.instance.public_dns_name
hostname = property(getHostname, setReadOnly, None,
'The public DNS name of the server')
def getPrivateHostname(self):
if self.instance:
return self.instance.private_dns_name
private_hostname = property(getPrivateHostname, setReadOnly, None,
'The private DNS name of the server')
def getLaunchTime(self):
if self.instance:
return self.instance.launch_time
launch_time = property(getLaunchTime, setReadOnly, None,
'The time the Server was started')
def getConsoleOutput(self):
if self.instance:
return self.instance.get_console_output()
console_output = property(getConsoleOutput, setReadOnly, None,
'Retrieve the console output for server')
def getGroups(self):
if self._reservation:
return self._reservation.groups
else:
return None
groups = property(getGroups, setReadOnly, None,
'The Security Groups controlling access to this server')
def getConfig(self):
if not self._config:
remote_file = BotoConfigPath
local_file = '%s.ini' % self.instance.id
self.get_file(remote_file, local_file)
self._config = Config(local_file)
return self._config
def setConfig(self, config):
local_file = '%s.ini' % self.instance.id
fp = open(local_file)
config.write(fp)
fp.close()
self.put_file(local_file, BotoConfigPath)
self._config = config
config = property(getConfig, setConfig, None,
'The instance data for this server')
def set_config(self, config):
"""
Set SDB based config
"""
self._config = config
self._config.dump_to_sdb("botoConfigs", self.id)
def load_config(self):
self._config = Config(do_load=False)
self._config.load_from_sdb("botoConfigs", self.id)
def stop(self):
if self.instance:
self.instance.stop()
def start(self):
self.stop()
ec2 = boto.connect_ec2()
ami = ec2.get_all_images(image_ids = [str(self.ami_id)])[0]
groups = ec2.get_all_security_groups(groupnames=[str(self.security_group)])
if not self._config:
self.load_config()
if not self._config.has_section("Credentials"):
self._config.add_section("Credentials")
self._config.set("Credentials", "aws_access_key_id", ec2.aws_access_key_id)
self._config.set("Credentials", "aws_secret_access_key", ec2.aws_secret_access_key)
if not self._config.has_section("Pyami"):
self._config.add_section("Pyami")
if self._manager.domain:
self._config.set('Pyami', 'server_sdb_domain', self._manager.domain.name)
self._config.set("Pyami", 'server_sdb_name', self.name)
cfg = StringIO.StringIO()
self._config.write(cfg)
cfg = cfg.getvalue()
r = ami.run(min_count=1,
max_count=1,
key_name=self.key_name,
security_groups = groups,
instance_type = self.instance_type,
placement = self.zone,
user_data = cfg)
i = r.instances[0]
self.instance_id = i.id
self.put()
if self.elastic_ip:
ec2.associate_address(self.instance_id, self.elastic_ip)
def reboot(self):
if self.instance:
self.instance.reboot()
def get_ssh_client(self, key_file=None, host_key_file='~/.ssh/known_hosts',
uname='root'):
import paramiko
if not self.instance:
print 'No instance yet!'
return
if not self._ssh_client:
if not key_file:
iobject = IObject()
key_file = iobject.get_filename('Path to OpenSSH Key file')
self._pkey = paramiko.RSAKey.from_private_key_file(key_file)
self._ssh_client = paramiko.SSHClient()
self._ssh_client.load_system_host_keys()
self._ssh_client.load_host_keys(os.path.expanduser(host_key_file))
self._ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self._ssh_client.connect(self.instance.public_dns_name,
username=uname, pkey=self._pkey)
return self._ssh_client
def get_file(self, remotepath, localpath):
ssh_client = self.get_ssh_client()
sftp_client = ssh_client.open_sftp()
sftp_client.get(remotepath, localpath)
def put_file(self, localpath, remotepath):
ssh_client = self.get_ssh_client()
sftp_client = ssh_client.open_sftp()
sftp_client.put(localpath, remotepath)
def listdir(self, remotepath):
ssh_client = self.get_ssh_client()
sftp_client = ssh_client.open_sftp()
return sftp_client.listdir(remotepath)
def shell(self, key_file=None):
ssh_client = self.get_ssh_client(key_file)
channel = ssh_client.invoke_shell()
interactive_shell(channel)
def bundle_image(self, prefix, key_file, cert_file, size):
print 'bundling image...'
print '\tcopying cert and pk over to /mnt directory on server'
ssh_client = self.get_ssh_client()
sftp_client = ssh_client.open_sftp()
path, name = os.path.split(key_file)
remote_key_file = '/mnt/%s' % name
self.put_file(key_file, remote_key_file)
path, name = os.path.split(cert_file)
remote_cert_file = '/mnt/%s' % name
self.put_file(cert_file, remote_cert_file)
print '\tdeleting %s' % BotoConfigPath
# delete the metadata.ini file if it exists
try:
sftp_client.remove(BotoConfigPath)
except:
pass
command = 'sudo ec2-bundle-vol '
command += '-c %s -k %s ' % (remote_cert_file, remote_key_file)
command += '-u %s ' % self._reservation.owner_id
command += '-p %s ' % prefix
command += '-s %d ' % size
command += '-d /mnt '
if self.instance.instance_type == 'm1.small' or self.instance_type == 'c1.medium':
command += '-r i386'
else:
command += '-r x86_64'
print '\t%s' % command
t = ssh_client.exec_command(command)
response = t[1].read()
print '\t%s' % response
print '\t%s' % t[2].read()
print '...complete!'
def upload_bundle(self, bucket, prefix):
print 'uploading bundle...'
command = 'ec2-upload-bundle '
command += '-m /mnt/%s.manifest.xml ' % prefix
command += '-b %s ' % bucket
command += '-a %s ' % self.ec2.aws_access_key_id
command += '-s %s ' % self.ec2.aws_secret_access_key
print '\t%s' % command
ssh_client = self.get_ssh_client()
t = ssh_client.exec_command(command)
response = t[1].read()
print '\t%s' % response
print '\t%s' % t[2].read()
print '...complete!'
def create_image(self, bucket=None, prefix=None, key_file=None, cert_file=None, size=None):
iobject = IObject()
if not bucket:
bucket = iobject.get_string('Name of S3 bucket')
if not prefix:
prefix = iobject.get_string('Prefix for AMI file')
if not key_file:
key_file = iobject.get_filename('Path to RSA private key file')
if not cert_file:
cert_file = iobject.get_filename('Path to RSA public cert file')
if not size:
size = iobject.get_int('Size (in MB) of bundled image')
self.bundle_image(prefix, key_file, cert_file, size)
self.upload_bundle(bucket, prefix)
print 'registering image...'
self.image_id = self.ec2.register_image('%s/%s.manifest.xml' % (bucket, prefix))
return self.image_id
def attach_volume(self, volume, device="/dev/sdp"):
"""
Attach an EBS volume to this server
:param volume: EBS Volume to attach
:type volume: boto.ec2.volume.Volume
:param device: Device to attach to (default to /dev/sdp)
:type device: string
"""
if hasattr(volume, "id"):
volume_id = volume.id
else:
volume_id = volume
return self.ec2.attach_volume(volume_id=volume_id, instance_id=self.instance_id, device=device)
def detach_volume(self, volume):
"""
Detach an EBS volume from this server
:param volume: EBS Volume to detach
:type volume: boto.ec2.volume.Volume
"""
if hasattr(volume, "id"):
volume_id = volume.id
else:
volume_id = volume
return self.ec2.detach_volume(volume_id=volume_id, instance_id=self.instance_id)
def install_package(self, package_name):
print 'installing %s...' % package_name
command = 'yum -y install %s' % package_name
print '\t%s' % command
ssh_client = self.get_ssh_client()
t = ssh_client.exec_command(command)
response = t[1].read()
print '\t%s' % response
print '\t%s' % t[2].read()
print '...complete!'
| gpl-2.0 | 551,411,079,860,995,260 | 34.551899 | 103 | 0.588407 | false | 3.941342 | true | false | false |
kalahbrown/HueBigSQL | desktop/core/ext-py/django-extensions-1.5.0/docs/conf.py | 33 | 6439 | # -*- coding: utf-8 -*-
#
# django-extensions documentation build configuration file, created by
# sphinx-quickstart on Wed Apr 1 20:39:40 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
#import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-extensions'
copyright = u'Copyright (C) 2008-2015 Michael Trier, Bas van Oostveen and contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.5'
# The full version, including alpha/beta/rc tags.
release = '1.5.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-extensionsdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [(
'index', 'django-extensions.tex', u'django-extensions Documentation',
u'Michael Trier, Bas van Oostveen, and contributors', 'manual'
), ]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
| apache-2.0 | 4,065,926,971,494,356,000 | 32.190722 | 87 | 0.714397 | false | 3.756709 | true | false | false |
kriswuollett/grpc | examples/python/multiplex/route_guide_resources.py | 115 | 2221 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Common resources used in the gRPC route guide example."""
import json
import route_guide_pb2
def read_route_guide_database():
"""Reads the route guide database.
Returns:
The full contents of the route guide database as a sequence of
route_guide_pb2.Features.
"""
feature_list = []
with open("route_guide_db.json") as route_guide_db_file:
for item in json.load(route_guide_db_file):
feature = route_guide_pb2.Feature(
name=item["name"],
location=route_guide_pb2.Point(
latitude=item["location"]["latitude"],
longitude=item["location"]["longitude"]))
feature_list.append(feature)
return feature_list
| bsd-3-clause | 145,743,123,724,359,040 | 40.90566 | 72 | 0.742909 | false | 4.442 | false | false | false |
leiferikb/bitpop | src/third_party/pyftpdlib/src/demo/basic_ftpd.py | 4 | 2796 | #!/usr/bin/env python
# $Id: basic_ftpd.py 977 2012-01-22 23:05:09Z g.rodola $
# pyftpdlib is released under the MIT license, reproduced below:
# ======================================================================
# Copyright (C) 2007-2012 Giampaolo Rodola' <[email protected]>
#
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# ======================================================================
"""A basic FTP server which uses a DummyAuthorizer for managing 'virtual
users', setting a limit for incoming connections.
"""
import os
from pyftpdlib import ftpserver
def main():
# Instantiate a dummy authorizer for managing 'virtual' users
authorizer = ftpserver.DummyAuthorizer()
# Define a new user having full r/w permissions and a read-only
# anonymous user
authorizer.add_user('user', '12345', os.getcwd(), perm='elradfmwM')
authorizer.add_anonymous(os.getcwd())
# Instantiate FTP handler class
ftp_handler = ftpserver.FTPHandler
ftp_handler.authorizer = authorizer
# Define a customized banner (string returned when client connects)
ftp_handler.banner = "pyftpdlib %s based ftpd ready." %ftpserver.__ver__
# Specify a masquerade address and the range of ports to use for
# passive connections. Decomment in case you're behind a NAT.
#ftp_handler.masquerade_address = '151.25.42.11'
#ftp_handler.passive_ports = range(60000, 65535)
# Instantiate FTP server class and listen to 0.0.0.0:21
address = ('', 21)
ftpd = ftpserver.FTPServer(address, ftp_handler)
# set a limit for connections
ftpd.max_cons = 256
ftpd.max_cons_per_ip = 5
# start ftp server
ftpd.serve_forever()
if __name__ == '__main__':
main()
| gpl-3.0 | -593,978,601,895,551,600 | 36.28 | 76 | 0.683834 | false | 4.005731 | false | false | false |
joachimmetz/plaso | plaso/parsers/asl.py | 2 | 11003 | # -*- coding: utf-8 -*-
"""The Apple System Log (ASL) file parser."""
import os
from dfdatetime import posix_time as dfdatetime_posix_time
from dfdatetime import semantic_time as dfdatetime_semantic_time
from plaso.containers import events
from plaso.containers import time_events
from plaso.lib import definitions
from plaso.lib import dtfabric_helper
from plaso.lib import errors
from plaso.lib import specification
from plaso.parsers import interface
from plaso.parsers import manager
class ASLEventData(events.EventData):
"""Apple System Log (ASL) event data.
Attributes:
computer_name (str): name of the host.
extra_information (str): extra fields associated to the event.
facility (str): facility.
group_id (int): group identifier (GID).
level (str): level of criticality of the event.
message_id (int): message identifier.
message (str): message of the event.
pid (int): process identifier (PID).
read_uid (int): user identifier that can read this file, where -1
represents all.
read_gid (int): the group identifier that can read this file, where -1
represents all.
record_position (int): position of the event record.
sender (str): sender or process that created the event.
user_sid (str): user identifier (UID).
"""
DATA_TYPE = 'mac:asl:event'
def __init__(self):
"""Initializes event data."""
super(ASLEventData, self).__init__(data_type=self.DATA_TYPE)
self.computer_name = None
self.extra_information = None
self.facility = None
self.group_id = None
self.level = None
self.message_id = None
self.message = None
self.pid = None
self.read_gid = None
self.read_uid = None
self.record_position = None
self.sender = None
self.user_sid = None
class ASLFileEventData(events.EventData):
"""Apple System Log (ASL) file event data.
Attributes:
format_version (int): ASL file format version.
is_dirty (bool): True if the last log entry offset does not match value
in file header and the file is considered dirty.
"""
DATA_TYPE = 'mac:asl:file'
def __init__(self):
"""Initializes event data."""
super(ASLFileEventData, self).__init__(data_type=self.DATA_TYPE)
self.format_version = None
self.is_dirty = None
class ASLParser(interface.FileObjectParser, dtfabric_helper.DtFabricHelper):
"""Parser for Apple System Log (ASL) files."""
NAME = 'asl_log'
DATA_FORMAT = 'Apple System Log (ASL) file'
_DEFINITION_FILE = os.path.join(
os.path.dirname(__file__), 'asl.yaml')
# Most significant bit of a 64-bit string offset.
_STRING_OFFSET_MSB = 1 << 63
def _ParseRecord(self, parser_mediator, file_object, record_offset):
"""Parses a record and produces events.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (file): file-like object.
record_offset (int): offset of the record relative to the start of
the file.
Returns:
int: next record offset.
Raises:
ParseError: if the record cannot be parsed.
"""
record_map = self._GetDataTypeMap('asl_record')
try:
record, record_data_size = self._ReadStructureFromFileObject(
file_object, record_offset, record_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError((
'Unable to parse record at offset: 0x{0:08x} with error: '
'{1!s}').format(record_offset, exception))
hostname = self._ParseRecordString(
file_object, record.hostname_string_offset)
sender = self._ParseRecordString(
file_object, record.sender_string_offset)
facility = self._ParseRecordString(
file_object, record.facility_string_offset)
message = self._ParseRecordString(
file_object, record.message_string_offset)
file_offset = record_offset + record_data_size
additional_data_size = record.data_size + 6 - record_data_size
if additional_data_size % 8 != 0:
raise errors.ParseError(
'Invalid record additional data size: {0:d}.'.format(
additional_data_size))
additional_data = self._ReadData(
file_object, file_offset, additional_data_size)
extra_fields = {}
for additional_data_offset in range(0, additional_data_size - 8, 16):
record_extra_field = self._ParseRecordExtraField(
additional_data[additional_data_offset:], file_offset)
file_offset += 16
name = self._ParseRecordString(
file_object, record_extra_field.name_string_offset)
value = self._ParseRecordString(
file_object, record_extra_field.value_string_offset)
if name is not None:
extra_fields[name] = value
# TODO: implement determine previous record offset
event_data = ASLEventData()
event_data.computer_name = hostname
event_data.extra_information = ', '.join([
'{0:s}: {1!s}'.format(name, value)
for name, value in sorted(extra_fields.items())])
event_data.facility = facility
event_data.group_id = record.group_identifier
event_data.level = record.alert_level
event_data.message_id = record.message_identifier
event_data.message = message
event_data.pid = record.process_identifier
event_data.read_gid = record.real_group_identifier
event_data.read_uid = record.real_user_identifier
event_data.record_position = record_offset
event_data.sender = sender
# Note that the user_sid value is expected to be a string.
event_data.user_sid = '{0:d}'.format(record.user_identifier)
timestamp = (
(record.written_time * 1000000000) + record.written_time_nanoseconds)
date_time = dfdatetime_posix_time.PosixTimeInNanoseconds(
timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
return record.next_record_offset
def _ParseRecordExtraField(self, byte_stream, file_offset):
"""Parses a record extra field.
Args:
byte_stream (bytes): byte stream.
file_offset (int): offset of the record extra field relative to
the start of the file.
Returns:
asl_record_extra_field: record extra field.
Raises:
ParseError: if the record extra field cannot be parsed.
"""
extra_field_map = self._GetDataTypeMap('asl_record_extra_field')
try:
record_extra_field = self._ReadStructureFromByteStream(
byte_stream, file_offset, extra_field_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError((
'Unable to parse record extra field at offset: 0x{0:08x} with error: '
'{1!s}').format(file_offset, exception))
return record_extra_field
def _ParseRecordString(self, file_object, string_offset):
"""Parses a record string.
Args:
file_object (file): file-like object.
string_offset (int): offset of the string relative to the start of
the file.
Returns:
str: record string or None if string offset is 0.
Raises:
ParseError: if the record string cannot be parsed.
"""
if string_offset == 0:
return None
if string_offset & self._STRING_OFFSET_MSB:
if (string_offset >> 60) != 8:
raise errors.ParseError('Invalid inline record string flag.')
string_size = (string_offset >> 56) & 0x0f
if string_size >= 8:
raise errors.ParseError('Invalid inline record string size.')
string_data = bytes(bytearray([
string_offset >> (8 * byte_index) & 0xff
for byte_index in range(6, -1, -1)]))
try:
return string_data[:string_size].decode('utf-8')
except UnicodeDecodeError as exception:
raise errors.ParseError(
'Unable to decode inline record string with error: {0!s}.'.format(
exception))
record_string_map = self._GetDataTypeMap('asl_record_string')
try:
record_string, _ = self._ReadStructureFromFileObject(
file_object, string_offset, record_string_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError((
'Unable to parse record string at offset: 0x{0:08x} with error: '
'{1!s}').format(string_offset, exception))
return record_string.string.rstrip('\x00')
@classmethod
def GetFormatSpecification(cls):
"""Retrieves the format specification.
Returns:
FormatSpecification: format specification.
"""
format_specification = specification.FormatSpecification(cls.NAME)
format_specification.AddNewSignature(
b'ASL DB\x00\x00\x00\x00\x00\x00', offset=0)
return format_specification
def ParseFileObject(self, parser_mediator, file_object):
"""Parses an ASL file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
"""
file_header_map = self._GetDataTypeMap('asl_file_header')
try:
file_header, _ = self._ReadStructureFromFileObject(
file_object, 0, file_header_map)
except (ValueError, errors.ParseError) as exception:
raise errors.UnableToParseFile(
'Unable to parse file header with error: {0!s}'.format(
exception))
is_dirty = False
file_size = file_object.get_size()
if file_header.first_log_entry_offset > 0:
last_log_entry_offset = 0
file_offset = file_header.first_log_entry_offset
while file_offset < file_size:
last_log_entry_offset = file_offset
try:
file_offset = self._ParseRecord(
parser_mediator, file_object, file_offset)
except errors.ParseError as exception:
parser_mediator.ProduceExtractionWarning(
'unable to parse record with error: {0!s}'.format(exception))
return
if file_offset == 0:
break
if last_log_entry_offset != file_header.last_log_entry_offset:
is_dirty = True
parser_mediator.ProduceRecoveryWarning(
'last log entry offset does not match value in file header.')
event_data = ASLFileEventData()
event_data.format_version = file_header.format_version
event_data.is_dirty = is_dirty
if file_header.creation_time:
date_time = dfdatetime_posix_time.PosixTime(
timestamp=file_header.creation_time)
else:
date_time = dfdatetime_semantic_time.NotSet()
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_CREATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
manager.ParsersManager.RegisterParser(ASLParser)
| apache-2.0 | 137,836,419,511,823,490 | 31.943114 | 80 | 0.669908 | false | 3.860702 | false | false | false |
SamReha/Concepts-Game | languages/pt-br.py | 88 | 7249 | # -*- coding: utf-8 -*-
{
'!langcode!': 'pt-br',
'!langname!': 'Português (do Brasil)',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" é uma expressão opcional como "campo1=\'novovalor\'". Você não pode atualizar ou apagar os resultados de um JOIN',
'%s %%{row} deleted': '%s linhas apagadas',
'%s %%{row} updated': '%s linhas atualizadas',
'%s selected': '%s selecionado',
'%Y-%m-%d': '%d-%m-%Y',
'%Y-%m-%d %H:%M:%S': '%d-%m-%Y %H:%M:%S',
'About': 'Sobre',
'Access Control': 'Controle de Acesso',
'Administrative Interface': 'Interface Administrativa',
'@markmin\x01An error occured, please [[reload %s]] the page': 'Ocorreu um erro, por favor [[reload %s]] a página',
'Administrative interface': 'Interface administrativa',
'Ajax Recipes': 'Receitas de Ajax',
'appadmin is disabled because insecure channel': 'Administração desativada porque o canal não é seguro',
'Are you sure you want to delete this object?': 'Você está certo que deseja apagar este objeto?',
'Available Databases and Tables': 'Bancos de dados e tabelas disponíveis',
'Buy this book': 'Compre o livro',
'cache': 'cache',
'Cache': 'Cache',
'Cache Keys': 'Chaves de cache',
'Cannot be empty': 'Não pode ser vazio',
'change password': 'modificar senha',
'Check to delete': 'Marque para apagar',
'Clear CACHE?': 'Limpar CACHE?',
'Clear DISK': 'Limpar DISCO',
'Clear RAM': 'Limpar memória RAM',
'Client IP': 'IP do cliente',
'Community': 'Comunidade',
'Components and Plugins': 'Componentes e Plugins',
'Controller': 'Controlador',
'Copyright': 'Copyright',
'Current request': 'Requisição atual',
'Current response': 'Resposta atual',
'Current session': 'Sessão atual',
'customize me!': 'Personalize-me!',
'data uploaded': 'dados enviados',
'Database': 'banco de dados',
'Database %s select': 'Selecionar banco de dados %s',
'db': 'bd',
'DB Model': 'Modelo BD',
'Delete:': 'Apagar:',
'Demo': 'Demo',
'Deployment Recipes': 'Receitas de deploy',
'Description': 'Descrição',
'design': 'projeto',
'DISK': 'DISK',
'Disk Cache Keys': 'Chaves do Cache de Disco',
'Disk Cleared': 'Disco Limpo',
'Documentation': 'Documentação',
"Don't know what to do?": "Não sabe o que fazer?",
'done!': 'concluído!',
'Download': 'Download',
'E-mail': 'E-mail',
'Edit': 'Editar',
'Edit current record': 'Editar o registro atual',
'edit profile': 'editar perfil',
'Edit This App': 'Editar esta aplicação',
'Email and SMS': 'Email e SMS',
'Errors': 'Erros',
'Enter an integer between %(min)g and %(max)g': 'Informe um valor inteiro entre %(min)g e %(max)g',
'export as csv file': 'exportar como um arquivo csv',
'FAQ': 'Perguntas frequentes',
'First name': 'Nome',
'Forms and Validators': 'Formulários e Validadores',
'Free Applications': 'Aplicações gratuitas',
'Group ID': 'ID do Grupo',
'Groups': 'Grupos',
'Hello World': 'Olá Mundo',
'Home': 'Principal',
'How did you get here?': 'Como você chegou aqui?',
'import': 'importar',
'Import/Export': 'Importar/Exportar',
'Index': 'Início',
'insert new': 'inserir novo',
'insert new %s': 'inserir novo %s',
'Internal State': 'Estado Interno',
'Introduction': 'Introdução',
'Invalid email': 'Email inválido',
'Invalid Query': 'Consulta Inválida',
'invalid request': 'requisição inválida',
'Key': 'Chave',
'Last name': 'Sobrenome',
'Layout': 'Layout',
'Layout Plugins': 'Plugins de Layout',
'Layouts': 'Layouts',
'Live chat': 'Chat ao vivo',
'Live Chat': 'Chat ao vivo',
'login': 'Entrar',
'Login': 'Autentique-se',
'logout': 'Sair',
'Lost Password': 'Esqueceu sua senha?',
'lost password?': 'esqueceu sua senha?',
'Main Menu': 'Menu Principal',
'Manage Cache': 'Gerenciar Cache',
'Menu Model': 'Modelo de Menu',
'My Sites': 'Meus sites',
'Name': 'Nome',
'New Record': 'Novo Registro',
'new record inserted': 'novo registro inserido',
'next 100 rows': 'próximas 100 linhas',
'No databases in this application': 'Não há bancos de dados nesta aplicação',
'Object or table name': 'Nome do objeto do da tabela',
'Online examples': 'Exemplos online',
'or import from csv file': 'ou importar de um arquivo csv',
'Origin': 'Origem',
'Other Plugins': 'Outros Plugins',
'Other Recipes': 'Outras Receitas',
'Overview': 'Visão Geral',
'Password': 'Senha',
'Plugins': 'Plugins',
'Powered by': 'Desenvolvido com',
'Preface': 'Prefácio',
'previous 100 rows': '100 linhas anteriores',
'Python': 'Python',
'Query:': 'Consulta:',
'Quick Examples': 'Exemplos rápidos',
'RAM': 'RAM',
'RAM Cache Keys': 'RAM Cache Keys',
'Ram Cleared': 'Ram Cleared',
'Recipes': 'Receitas',
'Record': 'Registro',
'record does not exist': 'registro não existe',
'Record ID': 'ID do Registro',
'Record id': 'id do registro',
'Register': 'Registre-se',
'register': 'Registre-se',
'Registration key': 'Chave de registro',
'Reset Password key': 'Resetar chave de senha',
'Resources': 'Recursos',
'Role': 'Papel',
'Registration identifier': 'Idenficador de registro',
'Rows in Table': 'Linhas na tabela',
'Rows selected': 'Linhas selecionadas',
'Semantic': 'Semântico',
'Services': 'Serviço',
'Size of cache:': 'Tamanho do cache:',
'state': 'estado',
'Statistics': 'Estatísticas',
'Stylesheet': 'Folha de estilo',
'submit': 'enviar',
'Support': 'Suporte',
'Sure you want to delete this object?': 'Está certo(a) que deseja apagar este objeto?',
'Table': 'Tabela',
'Table name': 'Nome da tabela',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'Uma "consulta" é uma condição como "db.tabela1.campo1==\'valor\'". Expressões como "db.tabela1.campo1==db.tabela2.campo2" resultam em um JOIN SQL.',
'The Core': 'The Core',
'The output of the file is a dictionary that was rendered by the view %s': 'A saída do arquivo é um dicionário que foi apresentado pela visão %s',
'The Views': 'As views',
'This App': 'Esta aplicação',
'This email already has an account': 'Este email já tem uma conta',
'This is a copy of the scaffolding application': 'Isto é uma cópia da aplicação modelo',
'Time in Cache (h:m:s)': 'Tempo em Cache (h:m:s)',
'Timestamp': 'Timestamp',
'Twitter': 'Twitter',
'unable to parse csv file': 'não foi possível analisar arquivo csv',
'Update:': 'Atualizar:',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Use (...)&(...) para AND, (...)|(...) para OR, e ~(...) para NOT para construir consultas mais complexas.',
'User ID': 'ID do Usuário',
'User Voice': 'Opinião dos usuários',
'Videos': 'Vídeos',
'View': 'Visualização',
'Web2py': 'Web2py',
'Welcome': 'Bem-vindo',
'Welcome %s': 'Bem-vindo %s',
'Welcome to web2py': 'Bem-vindo ao web2py',
'Welcome to web2py!': 'Bem-vindo ao web2py!',
'Which called the function %s located in the file %s': 'Que chamou a função %s localizada no arquivo %s',
'You are successfully running web2py': 'Você está executando o web2py com sucesso',
'You are successfully running web2py.': 'Você está executando o web2py com sucesso.',
'You can modify this application and adapt it to your needs': 'Você pode modificar esta aplicação e adaptá-la às suas necessidades',
'You visited the url %s': 'Você acessou a url %s',
'Working...': 'Trabalhando...',
}
| gpl-2.0 | 7,410,207,669,485,902,000 | 39.693182 | 290 | 0.683608 | false | 2.711852 | false | false | false |
ubic135/odoo-design | addons/auth_signup/__openerp__.py | 313 | 1571 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2009-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
{
'name': 'Signup',
'description': """
Allow users to sign up and reset their password
===============================================
""",
'author': 'OpenERP SA',
'version': '1.0',
'category': 'Authentication',
'website': 'https://www.odoo.com',
'installable': True,
'auto_install': True,
'depends': [
'base_setup',
'email_template',
'web',
],
'data': [
'auth_signup_data.xml',
'res_config.xml',
'res_users_view.xml',
'views/auth_signup_login.xml',
],
'bootstrap': True,
}
| agpl-3.0 | 9,124,591,899,266,322,000 | 33.152174 | 78 | 0.552514 | false | 4.257453 | false | false | false |
dlazz/ansible | lib/ansible/plugins/connection/funcd.py | 59 | 3533 | # Based on local.py (c) 2012, Michael DeHaan <[email protected]>
# Based on chroot.py (c) 2013, Maykel Moya <[email protected]>
# Copyright (c) 2013, Michael Scherer <[email protected]>
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
author: Michael Scherer (@msherer) <[email protected]>
connection: funcd
short_description: Use funcd to connect to target
description:
- This transport permits you to use Ansible over Func.
- For people who have already setup func and that wish to play with ansible,
this permit to move gradually to ansible without having to redo completely the setup of the network.
version_added: "1.1"
options:
remote_addr:
description:
- The path of the chroot you want to access.
default: inventory_hostname
vars:
- name: ansible_host
- name: ansible_func_host
"""
HAVE_FUNC = False
try:
import func.overlord.client as fc
HAVE_FUNC = True
except ImportError:
pass
import os
import tempfile
import shutil
from ansible.errors import AnsibleError
from ansible.utils.display import Display
display = Display()
class Connection(object):
''' Func-based connections '''
has_pipelining = False
def __init__(self, runner, host, port, *args, **kwargs):
self.runner = runner
self.host = host
# port is unused, this go on func
self.port = port
def connect(self, port=None):
if not HAVE_FUNC:
raise AnsibleError("func is not installed")
self.client = fc.Client(self.host)
return self
def exec_command(self, cmd, become_user=None, sudoable=False, executable='/bin/sh', in_data=None):
''' run a command on the remote minion '''
if in_data:
raise AnsibleError("Internal Error: this module does not support optimized module pipelining")
# totally ignores privlege escalation
display.vvv("EXEC %s" % (cmd), host=self.host)
p = self.client.command.run(cmd)[self.host]
return (p[0], p[1], p[2])
def _normalize_path(self, path, prefix):
if not path.startswith(os.path.sep):
path = os.path.join(os.path.sep, path)
normpath = os.path.normpath(path)
return os.path.join(prefix, normpath[1:])
def put_file(self, in_path, out_path):
''' transfer a file from local to remote '''
out_path = self._normalize_path(out_path, '/')
display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
self.client.local.copyfile.send(in_path, out_path)
def fetch_file(self, in_path, out_path):
''' fetch a file from remote to local '''
in_path = self._normalize_path(in_path, '/')
display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
# need to use a tmp dir due to difference of semantic for getfile
# ( who take a # directory as destination) and fetch_file, who
# take a file directly
tmpdir = tempfile.mkdtemp(prefix="func_ansible")
self.client.local.getfile.get(in_path, tmpdir)
shutil.move(os.path.join(tmpdir, self.host, os.path.basename(in_path)), out_path)
shutil.rmtree(tmpdir)
def close(self):
''' terminate the connection; nothing to do here '''
pass
| gpl-3.0 | 8,316,919,214,908,818,000 | 33.300971 | 110 | 0.638834 | false | 3.70724 | false | false | false |
titasakgm/brc-stock | openerp/addons/report_geraldo/lib/geraldo/site/newsite/django_1_0/tests/regressiontests/null_queries/models.py | 16 | 1789 | from django.db import models
class Poll(models.Model):
question = models.CharField(max_length=200)
def __unicode__(self):
return u"Q: %s " % self.question
class Choice(models.Model):
poll = models.ForeignKey(Poll)
choice = models.CharField(max_length=200)
def __unicode__(self):
return u"Choice: %s in poll %s" % (self.choice, self.poll)
__test__ = {'API_TESTS':"""
# Regression test for the use of None as a query value. None is interpreted as
# an SQL NULL, but only in __exact queries.
# Set up some initial polls and choices
>>> p1 = Poll(question='Why?')
>>> p1.save()
>>> c1 = Choice(poll=p1, choice='Because.')
>>> c1.save()
>>> c2 = Choice(poll=p1, choice='Why Not?')
>>> c2.save()
# Exact query with value None returns nothing ("is NULL" in sql, but every 'id'
# field has a value).
>>> Choice.objects.filter(choice__exact=None)
[]
Excluding the previous result returns everything.
>>> Choice.objects.exclude(choice=None).order_by('id')
[<Choice: Choice: Because. in poll Q: Why? >, <Choice: Choice: Why Not? in poll Q: Why? >]
# Valid query, but fails because foo isn't a keyword
>>> Choice.objects.filter(foo__exact=None)
Traceback (most recent call last):
...
FieldError: Cannot resolve keyword 'foo' into field. Choices are: choice, id, poll
# Can't use None on anything other than __exact
>>> Choice.objects.filter(id__gt=None)
Traceback (most recent call last):
...
ValueError: Cannot use None as a query value
# Can't use None on anything other than __exact
>>> Choice.objects.filter(foo__gt=None)
Traceback (most recent call last):
...
ValueError: Cannot use None as a query value
# Related managers use __exact=None implicitly if the object hasn't been saved.
>>> p2 = Poll(question="How?")
>>> p2.choice_set.all()
[]
"""}
| agpl-3.0 | -7,627,443,570,640,662,000 | 29.322034 | 90 | 0.681386 | false | 3.325279 | false | false | false |
snnn/tensorflow | tensorflow/contrib/losses/python/losses/loss_ops.py | 19 | 28132 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Loss operations for use in neural networks.
Note: All the losses are added to the `GraphKeys.LOSSES` collection.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework.python.ops import add_arg_scope
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import nn_ops
from tensorflow.python.util.deprecation import deprecated
from tensorflow.python.util.deprecation import deprecated_args
from tensorflow.python.util.deprecation import deprecated_argument_lookup
__all__ = [
"absolute_difference", "add_loss", "cosine_distance",
"compute_weighted_loss", "get_losses", "get_regularization_losses",
"get_total_loss", "hinge_loss", "log_loss", "mean_pairwise_squared_error",
"mean_squared_error", "sigmoid_cross_entropy", "softmax_cross_entropy",
"sparse_softmax_cross_entropy"
]
def _scale_losses(losses, weights):
"""Computes the scaled loss.
Args:
losses: A `Tensor` of size [batch_size, d1, ... dN].
weights: A `Tensor` of size [1], [batch_size] or [batch_size, d1, ... dN].
The `losses` are reduced (tf.reduce_sum) until its dimension matches
that of `weights` at which point the reduced `losses` are element-wise
multiplied by `weights` and a final reduce_sum is computed on the result.
Conceptually, this operation is equivalent to broadcasting (tiling)
`weights` to be the same size as `losses`, performing an element-wise
multiplication, and summing the result.
Returns:
A scalar tf.float32 `Tensor` whose value represents the sum of the scaled
`losses`.
"""
# First, compute the sum of the losses over all elements:
start_index = max(0, weights.get_shape().ndims)
reduction_indices = list(range(start_index, losses.get_shape().ndims))
reduced_losses = math_ops.reduce_sum(
losses, reduction_indices=reduction_indices)
reduced_losses = math_ops.multiply(reduced_losses, weights)
return math_ops.reduce_sum(reduced_losses)
def _safe_div(numerator, denominator, name="value"):
"""Computes a safe divide which returns 0 if the denominator is zero.
Note that the function contains an additional conditional check that is
necessary for avoiding situations where the loss is zero causing NaNs to
creep into the gradient computation.
Args:
numerator: An arbitrary `Tensor`.
denominator: A `Tensor` whose shape matches `numerator` and whose values are
assumed to be non-negative.
name: An optional name for the returned op.
Returns:
The element-wise value of the numerator divided by the denominator.
"""
return array_ops.where(
math_ops.greater(denominator, 0),
math_ops.div(numerator,
array_ops.where(
math_ops.equal(denominator, 0),
array_ops.ones_like(denominator), denominator)),
array_ops.zeros_like(numerator),
name=name)
def _safe_mean(losses, num_present):
"""Computes a safe mean of the losses.
Args:
losses: A tensor whose elements contain individual loss measurements.
num_present: The number of measurable losses in the tensor.
Returns:
A scalar representing the mean of the losses. If `num_present` is zero,
then zero is returned.
"""
total_loss = math_ops.reduce_sum(losses)
return _safe_div(total_loss, num_present)
@deprecated("2016-12-30", "Use tf.losses.compute_weighted_loss instead.")
def compute_weighted_loss(losses, weights=1.0, scope=None):
"""Computes the weighted loss.
Args:
losses: A tensor of size [batch_size, d1, ... dN].
weights: A tensor of size [1] or [batch_size, d1, ... dK] where K < N.
scope: the scope for the operations performed in computing the loss.
Returns:
A scalar `Tensor` that returns the weighted loss.
Raises:
ValueError: If `weights` is `None` or the shape is not compatible with
`losses`, or if the number of dimensions (rank) of either `losses` or
`weights` is missing.
"""
with ops.name_scope(scope, "weighted_loss", [losses, weights]):
losses = ops.convert_to_tensor(losses)
input_dtype = losses.dtype
losses = math_ops.to_float(losses)
weights = math_ops.to_float(ops.convert_to_tensor(weights))
if losses.get_shape().ndims is None:
raise ValueError("losses.get_shape().ndims cannot be None")
weights_shape = weights.get_shape()
if weights_shape.ndims is None:
raise ValueError("weights.get_shape().ndims cannot be None")
if weights_shape.ndims > 1 and weights_shape.dims[-1].is_compatible_with(1):
weights = array_ops.squeeze(weights, [-1])
total_loss = _scale_losses(losses, weights)
num_present = _num_present(losses, weights)
mean_loss = _safe_mean(total_loss, num_present)
# convert the result back to the input type
mean_loss = math_ops.cast(mean_loss, input_dtype)
add_loss(mean_loss)
return mean_loss
def _num_present(losses, weights, per_batch=False):
"""Computes the number of elements in the loss function induced by `weights`.
A given weights tensor induces different numbers of usable elements in the
`losses` tensor. The `weights` tensor is broadcast across `losses` for all
possible dimensions. For example, if `losses` is a tensor of dimension
[4, 5, 6, 3] and `weights` is a tensor of size [4, 5], then `weights` is, in
effect, tiled to match the size of `losses`. Following this effective tile,
the total number of present elements is the number of non-zero weights.
Args:
losses: A tensor of size [batch_size, d1, ... dN].
weights: A tensor of size [1] or [batch_size, d1, ... dK] where K < N.
per_batch: Whether to return the number of elements per batch or as a sum
total.
Returns:
The number of present (non-zero) elements in the losses tensor. If
`per_batch` is True, the value is returned as a tensor of size
[batch_size]. Otherwise, a single scalar tensor is returned.
"""
# If weights is a scalar, its easy to compute:
if weights.get_shape().ndims == 0:
batch_size = array_ops.reshape(
array_ops.slice(array_ops.shape(losses), [0], [1]), [])
num_per_batch = math_ops.div(
math_ops.to_float(array_ops.size(losses)),
math_ops.to_float(batch_size))
num_per_batch = array_ops.where(
math_ops.equal(weights, 0), 0.0, num_per_batch)
num_per_batch = math_ops.multiply(
array_ops.ones(array_ops.reshape(batch_size, [1])), num_per_batch)
return num_per_batch if per_batch else math_ops.reduce_sum(num_per_batch)
# First, count the number of nonzero weights:
if weights.get_shape().ndims >= 1:
reduction_indices = list(range(1, weights.get_shape().ndims))
num_nonzero_per_batch = math_ops.reduce_sum(
math_ops.to_float(math_ops.not_equal(weights, 0)),
reduction_indices=reduction_indices)
# Next, determine the number of elements that weights would broadcast to:
broadcast_dims = array_ops.slice(
array_ops.shape(losses), [weights.get_shape().ndims], [-1])
num_to_broadcast = math_ops.to_float(math_ops.reduce_prod(broadcast_dims))
num_per_batch = math_ops.multiply(num_nonzero_per_batch, num_to_broadcast)
return num_per_batch if per_batch else math_ops.reduce_sum(num_per_batch)
@deprecated("2016-12-30", "Use tf.losses.add_loss instead.")
@add_arg_scope
def add_loss(loss, loss_collection=ops.GraphKeys.LOSSES):
"""Adds a externally defined loss to the collection of losses.
Args:
loss: A loss `Tensor`.
loss_collection: Optional collection to add the loss to.
"""
if loss_collection:
ops.add_to_collection(loss_collection, loss)
@deprecated("2016-12-30", "Use tf.losses.get_losses instead.")
def get_losses(scope=None, loss_collection=ops.GraphKeys.LOSSES):
"""Gets the list of losses from the loss_collection.
Args:
scope: an optional scope for filtering the losses to return.
loss_collection: Optional losses collection.
Returns:
a list of loss tensors.
"""
return ops.get_collection(loss_collection, scope)
@deprecated("2016-12-30", "Use tf.losses.get_regularization_losses instead.")
def get_regularization_losses(scope=None):
"""Gets the regularization losses.
Args:
scope: an optional scope for filtering the losses to return.
Returns:
A list of regularization losses as Tensors.
"""
return ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES, scope)
@deprecated("2016-12-30", "Use tf.losses.get_total_loss instead.")
def get_total_loss(add_regularization_losses=True, name="total_loss"):
"""Returns a tensor whose value represents the total loss.
Notice that the function adds the given losses to the regularization losses.
Args:
add_regularization_losses: A boolean indicating whether or not to use the
regularization losses in the sum.
name: The name of the returned tensor.
Returns:
A `Tensor` whose value represents the total loss.
Raises:
ValueError: if `losses` is not iterable.
"""
losses = get_losses()
if add_regularization_losses:
losses += get_regularization_losses()
return math_ops.add_n(losses, name=name)
@deprecated("2016-12-30", "Use tf.losses.absolute_difference instead.")
def absolute_difference(predictions, labels=None, weights=1.0, scope=None):
"""Adds an Absolute Difference loss to the training procedure.
`weights` acts as a coefficient for the loss. If a scalar is provided, then
the loss is simply scaled by the given value. If `weights` is a tensor of size
[batch_size], then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weights` vector. If the shape of
`weights` matches the shape of `predictions`, then the loss of each
measurable element of `predictions` is scaled by the corresponding value of
`weights`.
Args:
predictions: The predicted outputs.
labels: The ground truth output tensor, same dimensions as 'predictions'.
weights: Coefficients for the loss a scalar, a tensor of shape
[batch_size] or a tensor whose shape matches `predictions`.
scope: The scope for the operations performed in computing the loss.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
if the shape of `weights` is invalid.
"""
with ops.name_scope(scope, "absolute_difference",
[predictions, labels, weights]) as scope:
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
predictions = math_ops.to_float(predictions)
labels = math_ops.to_float(labels)
losses = math_ops.abs(math_ops.subtract(predictions, labels))
return compute_weighted_loss(losses, weights, scope=scope)
@deprecated("2016-12-30",
"Use tf.losses.sigmoid_cross_entropy instead. Note that the order "
"of the predictions and labels arguments has been changed.")
def sigmoid_cross_entropy(logits,
multi_class_labels,
weights=1.0,
label_smoothing=0,
scope=None):
"""Creates a cross-entropy loss using tf.nn.sigmoid_cross_entropy_with_logits.
`weights` acts as a coefficient for the loss. If a scalar is provided,
then the loss is simply scaled by the given value. If `weights` is a
tensor of size [`batch_size`], then the loss weights apply to each
corresponding sample.
If `label_smoothing` is nonzero, smooth the labels towards 1/2:
new_multiclass_labels = multiclass_labels * (1 - label_smoothing)
+ 0.5 * label_smoothing
Args:
logits: [batch_size, num_classes] logits outputs of the network .
multi_class_labels: [batch_size, num_classes] labels in (0, 1).
weights: Coefficients for the loss. The tensor must be a scalar, a tensor of
shape [batch_size] or shape [batch_size, num_classes].
label_smoothing: If greater than 0 then smooth the labels.
scope: The scope for the operations performed in computing the loss.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shape of `logits` doesn't match that of
`multi_class_labels` or if the shape of `weights` is invalid, or if
`weights` is None.
"""
with ops.name_scope(scope, "sigmoid_cross_entropy_loss",
[logits, multi_class_labels, weights]) as scope:
logits.get_shape().assert_is_compatible_with(multi_class_labels.get_shape())
multi_class_labels = math_ops.cast(multi_class_labels, logits.dtype)
if label_smoothing > 0:
multi_class_labels = (
multi_class_labels * (1 - label_smoothing) + 0.5 * label_smoothing)
losses = nn.sigmoid_cross_entropy_with_logits(
labels=multi_class_labels, logits=logits, name="xentropy")
return compute_weighted_loss(losses, weights, scope=scope)
@deprecated("2016-12-30",
"Use tf.losses.softmax_cross_entropy instead. Note that the order "
"of the logits and labels arguments has been changed.")
def softmax_cross_entropy(logits,
onehot_labels,
weights=1.0,
label_smoothing=0,
scope=None):
"""Creates a cross-entropy loss using tf.nn.softmax_cross_entropy_with_logits.
`weights` acts as a coefficient for the loss. If a scalar is provided,
then the loss is simply scaled by the given value. If `weights` is a
tensor of size [`batch_size`], then the loss weights apply to each
corresponding sample.
If `label_smoothing` is nonzero, smooth the labels towards 1/num_classes:
new_onehot_labels = onehot_labels * (1 - label_smoothing)
+ label_smoothing / num_classes
Args:
logits: [batch_size, num_classes] logits outputs of the network .
onehot_labels: [batch_size, num_classes] one-hot-encoded labels.
weights: Coefficients for the loss. The tensor must be a scalar or a tensor
of shape [batch_size].
label_smoothing: If greater than 0 then smooth the labels.
scope: the scope for the operations performed in computing the loss.
Returns:
A scalar `Tensor` representing the mean loss value.
Raises:
ValueError: If the shape of `logits` doesn't match that of `onehot_labels`
or if the shape of `weights` is invalid or if `weights` is None.
"""
with ops.name_scope(scope, "softmax_cross_entropy_loss",
[logits, onehot_labels, weights]) as scope:
logits.get_shape().assert_is_compatible_with(onehot_labels.get_shape())
onehot_labels = math_ops.cast(onehot_labels, logits.dtype)
if label_smoothing > 0:
num_classes = math_ops.cast(
array_ops.shape(onehot_labels)[1], logits.dtype)
smooth_positives = 1.0 - label_smoothing
smooth_negatives = label_smoothing / num_classes
onehot_labels = onehot_labels * smooth_positives + smooth_negatives
losses = nn.softmax_cross_entropy_with_logits(
labels=onehot_labels, logits=logits, name="xentropy")
return compute_weighted_loss(losses, weights, scope=scope)
@deprecated("2016-12-30",
"Use tf.losses.sparse_softmax_cross_entropy instead. Note that "
"the order of the logits and labels arguments has been changed.")
def sparse_softmax_cross_entropy(logits, labels, weights=1.0, scope=None):
"""Cross-entropy loss using `tf.nn.sparse_softmax_cross_entropy_with_logits`.
`weights` acts as a coefficient for the loss. If a scalar is provided,
then the loss is simply scaled by the given value. If `weights` is a
tensor of size [`batch_size`], then the loss weights apply to each
corresponding sample.
Args:
logits: [batch_size, num_classes] logits outputs of the network .
labels: [batch_size, 1] or [batch_size] labels of dtype `int32` or `int64`
in the range `[0, num_classes)`.
weights: Coefficients for the loss. The tensor must be a scalar or a tensor
of shape [batch_size] or [batch_size, 1].
scope: the scope for the operations performed in computing the loss.
Returns:
A scalar `Tensor` representing the mean loss value.
Raises:
ValueError: If the shapes of `logits`, `labels`, and `weights` are
incompatible, or if `weights` is None.
"""
with ops.name_scope(scope, "sparse_softmax_cross_entropy_loss",
[logits, labels, weights]) as scope:
labels = array_ops.reshape(labels, shape=[array_ops.shape(labels)[0]])
losses = nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name="xentropy")
return compute_weighted_loss(losses, weights, scope=scope)
@deprecated("2016-12-30",
"Use tf.losses.log_loss instead. Note that the order of the "
"predictions and labels arguments has been changed.")
def log_loss(predictions, labels=None, weights=1.0, epsilon=1e-7, scope=None):
"""Adds a Log Loss term to the training procedure.
`weights` acts as a coefficient for the loss. If a scalar is provided, then
the loss is simply scaled by the given value. If `weights` is a tensor of size
[batch_size], then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weights` vector. If the shape of
`weights` matches the shape of `predictions`, then the loss of each
measurable element of `predictions` is scaled by the corresponding value of
`weights`.
Args:
predictions: The predicted outputs.
labels: The ground truth output tensor, same dimensions as 'predictions'.
weights: Coefficients for the loss a scalar, a tensor of shape
[batch_size] or a tensor whose shape matches `predictions`.
epsilon: A small increment to add to avoid taking a log of zero.
scope: The scope for the operations performed in computing the loss.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
if the shape of `weights` is invalid.
"""
with ops.name_scope(scope, "log_loss",
[predictions, labels, weights]) as scope:
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
predictions = math_ops.to_float(predictions)
labels = math_ops.to_float(labels)
losses = -math_ops.multiply(
labels, math_ops.log(predictions + epsilon)) - math_ops.multiply(
(1 - labels), math_ops.log(1 - predictions + epsilon))
return compute_weighted_loss(losses, weights, scope=scope)
@deprecated("2016-12-30",
"Use tf.losses.hinge_loss instead. Note that the order of the "
"logits and labels arguments has been changed, and to stay "
"unweighted, reduction=Reduction.NONE")
def hinge_loss(logits, labels=None, scope=None):
"""Method that returns the loss tensor for hinge loss.
Args:
logits: The logits, a float tensor. Note that logits are assumed to be
unbounded and 0-centered. A value > 0 (resp. < 0) is considered a positive
(resp. negative) binary prediction.
labels: The ground truth output tensor. Its shape should match the shape of
logits. The values of the tensor are expected to be 0.0 or 1.0. Internally
the {0,1} labels are converted to {-1,1} when calculating the hinge loss.
scope: The scope for the operations performed in computing the loss.
Returns:
An unweighted `Tensor` of same shape as `logits` and `labels` representing
the
loss values across the batch.
Raises:
ValueError: If the shapes of `logits` and `labels` don't match.
"""
with ops.name_scope(scope, "hinge_loss", [logits, labels]) as scope:
logits.get_shape().assert_is_compatible_with(labels.get_shape())
# We first need to convert binary labels to -1/1 labels (as floats).
labels = math_ops.to_float(labels)
all_ones = array_ops.ones_like(labels)
labels = math_ops.subtract(2 * labels, all_ones)
return nn_ops.relu(
math_ops.subtract(all_ones, math_ops.multiply(labels, logits)))
@deprecated("2016-12-30", "Use tf.losses.mean_squared_error instead.")
def mean_squared_error(predictions, labels=None, weights=1.0, scope=None):
"""Adds a Sum-of-Squares loss to the training procedure.
`weights` acts as a coefficient for the loss. If a scalar is provided, then
the loss is simply scaled by the given value. If `weights` is a tensor of size
[batch_size], then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weights` vector. If the shape of
`weights` matches the shape of `predictions`, then the loss of each
measurable element of `predictions` is scaled by the corresponding value of
`weights`.
Args:
predictions: The predicted outputs.
labels: The ground truth output tensor, same dimensions as 'predictions'.
weights: Coefficients for the loss a scalar, a tensor of shape
[batch_size] or a tensor whose shape matches `predictions`.
scope: The scope for the operations performed in computing the loss.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
if the shape of `weights` is invalid.
"""
with ops.name_scope(scope, "mean_squared_error",
[predictions, labels, weights]) as scope:
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
predictions = math_ops.to_float(predictions)
labels = math_ops.to_float(labels)
losses = math_ops.square(math_ops.subtract(predictions, labels))
return compute_weighted_loss(losses, weights, scope=scope)
@deprecated("2016-12-30",
"Use tf.losses.mean_pairwise_squared_error instead. Note that the "
"order of the predictions and labels arguments has been changed.")
def mean_pairwise_squared_error(predictions,
labels=None,
weights=1.0,
scope=None):
"""Adds a pairwise-errors-squared loss to the training procedure.
Unlike `mean_squared_error`, which is a measure of the differences between
corresponding elements of `predictions` and `labels`,
`mean_pairwise_squared_error` is a measure of the differences between pairs of
corresponding elements of `predictions` and `labels`.
For example, if `labels`=[a, b, c] and `predictions`=[x, y, z], there are
three pairs of differences are summed to compute the loss:
loss = [ ((a-b) - (x-y)).^2 + ((a-c) - (x-z)).^2 + ((b-c) - (y-z)).^2 ] / 3
Note that since the inputs are of size [batch_size, d0, ... dN], the
corresponding pairs are computed within each batch sample but not across
samples within a batch. For example, if `predictions` represents a batch of
16 grayscale images of dimension [batch_size, 100, 200], then the set of pairs
is drawn from each image, but not across images.
`weights` acts as a coefficient for the loss. If a scalar is provided, then
the loss is simply scaled by the given value. If `weights` is a tensor of size
[batch_size], then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weights` vector.
Args:
predictions: The predicted outputs, a tensor of size [batch_size, d0, .. dN]
where N+1 is the total number of dimensions in `predictions`.
labels: The ground truth output tensor, whose shape must match the shape of
the `predictions` tensor.
weights: Coefficients for the loss a scalar, a tensor of shape [batch_size]
or a tensor whose shape matches `predictions`.
scope: The scope for the operations performed in computing the loss.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
if the shape of `weights` is invalid.
"""
with ops.name_scope(scope, "mean_pairwise_squared_error",
[predictions, labels, weights]) as scope:
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
predictions = math_ops.to_float(predictions)
labels = math_ops.to_float(labels)
weights = math_ops.to_float(ops.convert_to_tensor(weights))
diffs = math_ops.subtract(predictions, labels)
# Need to verify here since the function doesn't use compute_weighted_loss
if diffs.get_shape().ndims is None:
raise ValueError("diffs.get_shape().ndims cannot be None")
if weights.get_shape().ndims is None:
raise ValueError("weights.get_shape().ndims cannot be None")
reduction_indices = list(range(1, diffs.get_shape().ndims))
sum_squares_diff_per_batch = math_ops.reduce_sum(
math_ops.square(diffs), reduction_indices=reduction_indices)
num_present_per_batch = _num_present(diffs, weights, per_batch=True)
term1 = 2.0 * _safe_div(sum_squares_diff_per_batch, num_present_per_batch)
sum_diff = math_ops.reduce_sum(diffs, reduction_indices=reduction_indices)
term2 = 2.0 * _safe_div(
math_ops.square(sum_diff), math_ops.square(num_present_per_batch))
loss = _scale_losses(term1 - term2, weights)
mean_loss = array_ops.where(
math_ops.reduce_sum(num_present_per_batch) > 0,
loss,
array_ops.zeros_like(loss),
name="value")
add_loss(mean_loss)
return mean_loss
@deprecated("2016-12-30", "Use tf.losses.cosine_distance instead.")
@deprecated_args(None, "dim is deprecated, use axis instead", "dim")
def cosine_distance(predictions,
labels=None,
axis=None,
weights=1.0,
scope=None,
dim=None):
"""Adds a cosine-distance loss to the training procedure.
Note that the function assumes that `predictions` and `labels` are already
unit-normalized.
Args:
predictions: An arbitrary matrix.
labels: A `Tensor` whose shape matches 'predictions'
axis: The dimension along which the cosine distance is computed.
weights: Coefficients for the loss a scalar, a tensor of shape
[batch_size] or a tensor whose shape matches `predictions`.
scope: The scope for the operations performed in computing the loss.
dim: The old (deprecated) name for `axis`.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If `predictions` shape doesn't match `labels` shape, or
`weights` is `None`.
"""
axis = deprecated_argument_lookup(
"axis", axis, "dim", dim)
if axis is None:
raise ValueError("You must specify 'axis'.")
with ops.name_scope(scope, "cosine_distance_loss",
[predictions, labels, weights]) as scope:
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
predictions = math_ops.to_float(predictions)
labels = math_ops.to_float(labels)
radial_diffs = math_ops.multiply(predictions, labels)
losses = 1 - math_ops.reduce_sum(
radial_diffs, reduction_indices=[
axis,
])
return compute_weighted_loss(losses, weights, scope=scope)
| apache-2.0 | -8,104,154,935,110,994,000 | 40.738872 | 80 | 0.688789 | false | 3.864286 | false | false | false |
TaylorOshan/pysal | pysal/contrib/glm/links.py | 35 | 20123 | '''
Defines the link functions to be used with GLM and GEE families.
'''
import numpy as np
import scipy.stats
FLOAT_EPS = np.finfo(float).eps
class Link(object):
"""
A generic link function for one-parameter exponential family.
`Link` does nothing, but lays out the methods expected of any subclass.
"""
def __call__(self, p):
"""
Return the value of the link function. This is just a placeholder.
Parameters
----------
p : array-like
Probabilities
Returns
-------
g(p) : array-like
The value of the link function g(p) = z
"""
return NotImplementedError
def inverse(self, z):
"""
Inverse of the link function. Just a placeholder.
Parameters
----------
z : array-like
`z` is usually the linear predictor of the transformed variable
in the IRLS algorithm for GLM.
Returns
-------
g^(-1)(z) : array
The value of the inverse of the link function g^(-1)(z) = p
"""
return NotImplementedError
def deriv(self, p):
"""
Derivative of the link function g'(p). Just a placeholder.
Parameters
----------
p : array-like
Returns
-------
g'(p) : array
The value of the derivative of the link function g'(p)
"""
return NotImplementedError
def deriv2(self, p):
"""Second derivative of the link function g''(p)
implemented through numerical differentiation
"""
from statsmodels.tools.numdiff import approx_fprime_cs
# TODO: workaround proplem with numdiff for 1d
return np.diag(approx_fprime_cs(p, self.deriv))
def inverse_deriv(self, z):
"""
Derivative of the inverse link function g^(-1)(z).
Notes
-----
This reference implementation gives the correct result but is
inefficient, so it can be overriden in subclasses.
Parameters
----------
z : array-like
`z` is usually the linear predictor for a GLM or GEE model.
Returns
-------
g'^(-1)(z) : array
The value of the derivative of the inverse of the link function
"""
return 1 / self.deriv(self.inverse(z))
class Logit(Link):
"""
The logit transform
Notes
-----
call and derivative use a private method _clean to make trim p by
machine epsilon so that p is in (0,1)
Alias of Logit:
logit = Logit()
"""
def _clean(self, p):
"""
Clip logistic values to range (eps, 1-eps)
Parameters
-----------
p : array-like
Probabilities
Returns
--------
pclip : array
Clipped probabilities
"""
return np.clip(p, FLOAT_EPS, 1. - FLOAT_EPS)
def __call__(self, p):
"""
The logit transform
Parameters
----------
p : array-like
Probabilities
Returns
-------
z : array
Logit transform of `p`
Notes
-----
g(p) = log(p / (1 - p))
"""
p = self._clean(p)
return np.log(p / (1. - p))
def inverse(self, z):
"""
Inverse of the logit transform
Parameters
----------
z : array-like
The value of the logit transform at `p`
Returns
-------
p : array
Probabilities
Notes
-----
g^(-1)(z) = exp(z)/(1+exp(z))
"""
z = np.asarray(z)
t = np.exp(-z)
return 1. / (1. + t)
def deriv(self, p):
"""
Derivative of the logit transform
Parameters
----------
p: array-like
Probabilities
Returns
-------
g'(p) : array
Value of the derivative of logit transform at `p`
Notes
-----
g'(p) = 1 / (p * (1 - p))
Alias for `Logit`:
logit = Logit()
"""
p = self._clean(p)
return 1. / (p * (1 - p))
def inverse_deriv(self, z):
"""
Derivative of the inverse of the logit transform
Parameters
----------
z : array-like
`z` is usually the linear predictor for a GLM or GEE model.
Returns
-------
g'^(-1)(z) : array
The value of the derivative of the inverse of the logit function
"""
t = np.exp(z)
return t/(1 + t)**2
def deriv2(self, p):
"""
Second derivative of the logit function.
Parameters
----------
p : array-like
probabilities
Returns
-------
g''(z) : array
The value of the second derivative of the logit function
"""
v = p * (1 - p)
return (2*p - 1) / v**2
class logit(Logit):
pass
class Power(Link):
"""
The power transform
Parameters
----------
power : float
The exponent of the power transform
Notes
-----
Aliases of Power:
inverse = Power(power=-1)
sqrt = Power(power=.5)
inverse_squared = Power(power=-2.)
identity = Power(power=1.)
"""
def __init__(self, power=1.):
self.power = power
def __call__(self, p):
"""
Power transform link function
Parameters
----------
p : array-like
Mean parameters
Returns
-------
z : array-like
Power transform of x
Notes
-----
g(p) = x**self.power
"""
z = np.power(p, self.power)
return z
def inverse(self, z):
"""
Inverse of the power transform link function
Parameters
----------
`z` : array-like
Value of the transformed mean parameters at `p`
Returns
-------
`p` : array
Mean parameters
Notes
-----
g^(-1)(z`) = `z`**(1/`power`)
"""
p = np.power(z, 1. / self.power)
return p
def deriv(self, p):
"""
Derivative of the power transform
Parameters
----------
p : array-like
Mean parameters
Returns
--------
g'(p) : array
Derivative of power transform of `p`
Notes
-----
g'(`p`) = `power` * `p`**(`power` - 1)
"""
return self.power * np.power(p, self.power - 1)
def deriv2(self, p):
"""
Second derivative of the power transform
Parameters
----------
p : array-like
Mean parameters
Returns
--------
g''(p) : array
Second derivative of the power transform of `p`
Notes
-----
g''(`p`) = `power` * (`power` - 1) * `p`**(`power` - 2)
"""
return self.power * (self.power - 1) * np.power(p, self.power - 2)
def inverse_deriv(self, z):
"""
Derivative of the inverse of the power transform
Parameters
----------
z : array-like
`z` is usually the linear predictor for a GLM or GEE model.
Returns
-------
g^(-1)'(z) : array
The value of the derivative of the inverse of the power transform
function
"""
return np.power(z, (1 - self.power)/self.power) / self.power
class inverse_power(Power):
"""
The inverse transform
Notes
-----
g(p) = 1/p
Alias of statsmodels.family.links.Power(power=-1.)
"""
def __init__(self):
super(inverse_power, self).__init__(power=-1.)
class sqrt(Power):
"""
The square-root transform
Notes
-----
g(`p`) = sqrt(`p`)
Alias of statsmodels.family.links.Power(power=.5)
"""
def __init__(self):
super(sqrt, self).__init__(power=.5)
class inverse_squared(Power):
"""
The inverse squared transform
Notes
-----
g(`p`) = 1/(`p`\ \*\*2)
Alias of statsmodels.family.links.Power(power=2.)
"""
def __init__(self):
super(inverse_squared, self).__init__(power=-2.)
class identity(Power):
"""
The identity transform
Notes
-----
g(`p`) = `p`
Alias of statsmodels.family.links.Power(power=1.)
"""
def __init__(self):
super(identity, self).__init__(power=1.)
class Log(Link):
"""
The log transform
Notes
-----
call and derivative call a private method _clean to trim the data by
machine epsilon so that p is in (0,1). log is an alias of Log.
"""
def _clean(self, x):
return np.clip(x, FLOAT_EPS, np.inf)
def __call__(self, p, **extra):
"""
Log transform link function
Parameters
----------
x : array-like
Mean parameters
Returns
-------
z : array
log(x)
Notes
-----
g(p) = log(p)
"""
x = self._clean(p)
return np.log(x)
def inverse(self, z):
"""
Inverse of log transform link function
Parameters
----------
z : array
The inverse of the link function at `p`
Returns
-------
p : array
The mean probabilities given the value of the inverse `z`
Notes
-----
g^{-1}(z) = exp(z)
"""
return np.exp(z)
def deriv(self, p):
"""
Derivative of log transform link function
Parameters
----------
p : array-like
Mean parameters
Returns
-------
g'(p) : array
derivative of log transform of x
Notes
-----
g'(x) = 1/x
"""
p = self._clean(p)
return 1. / p
def deriv2(self, p):
"""
Second derivative of the log transform link function
Parameters
----------
p : array-like
Mean parameters
Returns
-------
g''(p) : array
Second derivative of log transform of x
Notes
-----
g''(x) = -1/x^2
"""
p = self._clean(p)
return -1. / p**2
def inverse_deriv(self, z):
"""
Derivative of the inverse of the log transform link function
Parameters
----------
z : array
The inverse of the link function at `p`
Returns
-------
g^(-1)'(z) : array
The value of the derivative of the inverse of the log function,
the exponential function
"""
return np.exp(z)
class log(Log):
"""
The log transform
Notes
-----
log is a an alias of Log.
"""
pass
# TODO: the CDFLink is untested
class CDFLink(Logit):
"""
The use the CDF of a scipy.stats distribution
CDFLink is a subclass of logit in order to use its _clean method
for the link and its derivative.
Parameters
----------
dbn : scipy.stats distribution
Default is dbn=scipy.stats.norm
Notes
-----
The CDF link is untested.
"""
def __init__(self, dbn=scipy.stats.norm):
self.dbn = dbn
def __call__(self, p):
"""
CDF link function
Parameters
----------
p : array-like
Mean parameters
Returns
-------
z : array
(ppf) inverse of CDF transform of p
Notes
-----
g(`p`) = `dbn`.ppf(`p`)
"""
p = self._clean(p)
return self.dbn.ppf(p)
def inverse(self, z):
"""
The inverse of the CDF link
Parameters
----------
z : array-like
The value of the inverse of the link function at `p`
Returns
-------
p : array
Mean probabilities. The value of the inverse of CDF link of `z`
Notes
-----
g^(-1)(`z`) = `dbn`.cdf(`z`)
"""
return self.dbn.cdf(z)
def deriv(self, p):
"""
Derivative of CDF link
Parameters
----------
p : array-like
mean parameters
Returns
-------
g'(p) : array
The derivative of CDF transform at `p`
Notes
-----
g'(`p`) = 1./ `dbn`.pdf(`dbn`.ppf(`p`))
"""
p = self._clean(p)
return 1. / self.dbn.pdf(self.dbn.ppf(p))
def deriv2(self, p):
"""
Second derivative of the link function g''(p)
implemented through numerical differentiation
"""
from statsmodels.tools.numdiff import approx_fprime
p = np.atleast_1d(p)
# Note: special function for norm.ppf does not support complex
return np.diag(approx_fprime(p, self.deriv, centered=True))
def inverse_deriv(self, z):
"""
Derivative of the inverse of the CDF transformation link function
Parameters
----------
z : array
The inverse of the link function at `p`
Returns
-------
g^(-1)'(z) : array
The value of the derivative of the inverse of the logit function
"""
return 1/self.deriv(self.inverse(z))
class probit(CDFLink):
"""
The probit (standard normal CDF) transform
Notes
--------
g(p) = scipy.stats.norm.ppf(p)
probit is an alias of CDFLink.
"""
pass
class cauchy(CDFLink):
"""
The Cauchy (standard Cauchy CDF) transform
Notes
-----
g(p) = scipy.stats.cauchy.ppf(p)
cauchy is an alias of CDFLink with dbn=scipy.stats.cauchy
"""
def __init__(self):
super(cauchy, self).__init__(dbn=scipy.stats.cauchy)
def deriv2(self, p):
"""
Second derivative of the Cauchy link function.
Parameters
----------
p: array-like
Probabilities
Returns
-------
g''(p) : array
Value of the second derivative of Cauchy link function at `p`
"""
a = np.pi * (p - 0.5)
d2 = 2 * np.pi**2 * np.sin(a) / np.cos(a)**3
return d2
class CLogLog(Logit):
"""
The complementary log-log transform
CLogLog inherits from Logit in order to have access to its _clean method
for the link and its derivative.
Notes
-----
CLogLog is untested.
"""
def __call__(self, p):
"""
C-Log-Log transform link function
Parameters
----------
p : array
Mean parameters
Returns
-------
z : array
The CLogLog transform of `p`
Notes
-----
g(p) = log(-log(1-p))
"""
p = self._clean(p)
return np.log(-np.log(1 - p))
def inverse(self, z):
"""
Inverse of C-Log-Log transform link function
Parameters
----------
z : array-like
The value of the inverse of the CLogLog link function at `p`
Returns
-------
p : array
Mean parameters
Notes
-----
g^(-1)(`z`) = 1-exp(-exp(`z`))
"""
return 1 - np.exp(-np.exp(z))
def deriv(self, p):
"""
Derivative of C-Log-Log transform link function
Parameters
----------
p : array-like
Mean parameters
Returns
-------
g'(p) : array
The derivative of the CLogLog transform link function
Notes
-----
g'(p) = - 1 / ((p-1)*log(1-p))
"""
p = self._clean(p)
return 1. / ((p - 1) * (np.log(1 - p)))
def deriv2(self, p):
"""
Second derivative of the C-Log-Log ink function
Parameters
----------
p : array-like
Mean parameters
Returns
-------
g''(p) : array
The second derivative of the CLogLog link function
"""
p = self._clean(p)
fl = np.log(1 - p)
d2 = -1 / ((1 - p)**2 * fl)
d2 *= 1 + 1 / fl
return d2
def inverse_deriv(self, z):
"""
Derivative of the inverse of the C-Log-Log transform link function
Parameters
----------
z : array-like
The value of the inverse of the CLogLog link function at `p`
Returns
-------
g^(-1)'(z) : array
The derivative of the inverse of the CLogLog link function
"""
return np.exp(z - np.exp(z))
class cloglog(CLogLog):
"""
The CLogLog transform link function.
Notes
-----
g(`p`) = log(-log(1-`p`))
cloglog is an alias for CLogLog
cloglog = CLogLog()
"""
pass
class NegativeBinomial(object):
'''
The negative binomial link function
Parameters
----------
alpha : float, optional
Alpha is the ancillary parameter of the Negative Binomial link
function. It is assumed to be nonstochastic. The default value is 1.
Permissible values are usually assumed to be in (.01, 2).
'''
def __init__(self, alpha=1.):
self.alpha = alpha
def _clean(self, x):
return np.clip(x, FLOAT_EPS, np.inf)
def __call__(self, p):
'''
Negative Binomial transform link function
Parameters
----------
p : array-like
Mean parameters
Returns
-------
z : array
The negative binomial transform of `p`
Notes
-----
g(p) = log(p/(p + 1/alpha))
'''
p = self._clean(p)
return np.log(p/(p + 1/self.alpha))
def inverse(self, z):
'''
Inverse of the negative binomial transform
Parameters
-----------
z : array-like
The value of the inverse of the negative binomial link at `p`.
Returns
-------
p : array
Mean parameters
Notes
-----
g^(-1)(z) = exp(z)/(alpha*(1-exp(z)))
'''
return -1/(self.alpha * (1 - np.exp(-z)))
def deriv(self, p):
'''
Derivative of the negative binomial transform
Parameters
----------
p : array-like
Mean parameters
Returns
-------
g'(p) : array
The derivative of the negative binomial transform link function
Notes
-----
g'(x) = 1/(x+alpha*x^2)
'''
return 1/(p + self.alpha * p**2)
def deriv2(self,p):
'''
Second derivative of the negative binomial link function.
Parameters
----------
p : array-like
Mean parameters
Returns
-------
g''(p) : array
The second derivative of the negative binomial transform link
function
Notes
-----
g''(x) = -(1+2*alpha*x)/(x+alpha*x^2)^2
'''
numer = -(1 + 2 * self.alpha * p)
denom = (p + self.alpha * p**2)**2
return numer / denom
def inverse_deriv(self, z):
'''
Derivative of the inverse of the negative binomial transform
Parameters
-----------
z : array-like
Usually the linear predictor for a GLM or GEE model
Returns
-------
g^(-1)'(z) : array
The value of the derivative of the inverse of the negative
binomial link
'''
t = np.exp(z)
return t / (self.alpha * (1-t)**2)
class nbinom(NegativeBinomial):
"""
The negative binomial link function.
Notes
-----
g(p) = log(p/(p + 1/alpha))
nbinom is an alias of NegativeBinomial.
nbinom = NegativeBinomial(alpha=1.)
"""
pass
| bsd-3-clause | -4,901,116,892,870,221,000 | 20.115425 | 77 | 0.476917 | false | 4.201921 | false | false | false |
athena-voice/athena-voice-client | athena/modules/active/athena_control.py | 1 | 1425 | """
Finds and returns the latest bitcoin price
Usage Examples:
- "What is the price of bitcoin?"
- "How much is a bitcoin worth?"
"""
from athena.classes.module import Module
from athena.classes.task import ActiveTask
from athena import brain
class QuitTask(ActiveTask):
def __init__(self):
super().__init__(patterns=[r'\b(athena )?(quit|stop)\b.*'])
def action(self, text):
brain.inst.quit()
class ListModulesTask(ActiveTask):
def __init__(self):
super().__init__(words=['list modules', 'list mods'])
def action(self, text):
brain.inst.list_mods()
class ToggleModuleTask(ActiveTask):
def __init__(self):
super().__init__(patterns=[r'.*\b(enable|add|disable|remove) (.*)'])
self.groups = {1: 'enable', 2: 'module'}
def match(self, text):
return self.match_and_save_groups(text, self.groups)
def action(self, text):
mod_name = self.module.lower().strip().replace(' ', '_')
if 'disable' in self.enable.lower() or 'remove' in self.enable.lower():
brain.inst.disable_mod(mod_name)
else:
brain.inst.enable_mod(mod_name)
class AthenaControl(Module):
def __init__(self):
tasks = [QuitTask(), ListModulesTask(), ToggleModuleTask()]
super().__init__('athena_control', tasks, priority=3)
| gpl-3.0 | 8,207,512,318,211,203,000 | 24.886792 | 79 | 0.582456 | false | 3.67268 | false | false | false |
TimBuckley/effective_django | tests/custom_pk/fields.py | 33 | 1629 | import random
import string
from django.db import models
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class MyWrapper(object):
def __init__(self, value):
self.value = value
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.value)
def __str__(self):
return self.value
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.value == other.value
return self.value == other
class MyAutoField(six.with_metaclass(models.SubfieldBase, models.CharField)):
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 10
super(MyAutoField, self).__init__(*args, **kwargs)
def pre_save(self, instance, add):
value = getattr(instance, self.attname, None)
if not value:
value = MyWrapper(''.join(random.sample(string.ascii_lowercase, 10)))
setattr(instance, self.attname, value)
return value
def to_python(self, value):
if not value:
return
if not isinstance(value, MyWrapper):
value = MyWrapper(value)
return value
def get_db_prep_save(self, value, connection):
if not value:
return
if isinstance(value, MyWrapper):
return six.text_type(value)
return value
def get_db_prep_value(self, value, connection, prepared=False):
if not value:
return
if isinstance(value, MyWrapper):
return six.text_type(value)
return value
| bsd-3-clause | 3,720,848,702,035,211,300 | 27.086207 | 81 | 0.601596 | false | 4.103275 | false | false | false |
CeltonMcGrath/TACTIC | 3rd_party/CherryPy/cherrypy/test/modfcgid.py | 6 | 4258 | """Wrapper for mod_fcgid, for use as a CherryPy HTTP server when testing.
To autostart fcgid, the "apache" executable or script must be
on your system path, or you must override the global APACHE_PATH.
On some platforms, "apache" may be called "apachectl", "apache2ctl",
or "httpd"--create a symlink to them if needed.
You'll also need the WSGIServer from flup.servers.
See http://projects.amor.org/misc/wiki/ModPythonGateway
KNOWN BUGS
==========
1. Apache processes Range headers automatically; CherryPy's truncated
output is then truncated again by Apache. See test_core.testRanges.
This was worked around in http://www.cherrypy.org/changeset/1319.
2. Apache does not allow custom HTTP methods like CONNECT as per the spec.
See test_core.testHTTPMethods.
3. Max request header and body settings do not work with Apache.
4. Apache replaces status "reason phrases" automatically. For example,
CherryPy may set "304 Not modified" but Apache will write out
"304 Not Modified" (capital "M").
5. Apache does not allow custom error codes as per the spec.
6. Apache (or perhaps modpython, or modpython_gateway) unquotes %xx in the
Request-URI too early.
7. mod_python will not read request bodies which use the "chunked"
transfer-coding (it passes REQUEST_CHUNKED_ERROR to ap_setup_client_block
instead of REQUEST_CHUNKED_DECHUNK, see Apache2's http_protocol.c and
mod_python's requestobject.c).
8. Apache will output a "Content-Length: 0" response header even if there's
no response entity body. This isn't really a bug; it just differs from
the CherryPy default.
"""
import os
curdir = os.path.join(os.getcwd(), os.path.dirname(__file__))
import re
import sys
import time
import cherrypy
from cherrypy.process import plugins, servers
from cherrypy.test import test
def read_process(cmd, args=""):
pipein, pipeout = os.popen4("%s %s" % (cmd, args))
try:
firstline = pipeout.readline()
if (re.search(r"(not recognized|No such file|not found)", firstline,
re.IGNORECASE)):
raise IOError('%s must be on your system path.' % cmd)
output = firstline + pipeout.read()
finally:
pipeout.close()
return output
APACHE_PATH = "httpd"
CONF_PATH = "fcgi.conf"
conf_fcgid = """
# Apache2 server conf file for testing CherryPy with mod_fcgid.
DocumentRoot "%(root)s"
ServerName 127.0.0.1
Listen %(port)s
LoadModule fastcgi_module modules/mod_fastcgi.dll
LoadModule rewrite_module modules/mod_rewrite.so
Options ExecCGI
SetHandler fastcgi-script
RewriteEngine On
RewriteRule ^(.*)$ /fastcgi.pyc [L]
FastCgiExternalServer "%(server)s" -host 127.0.0.1:4000
"""
class ModFCGISupervisor(test.LocalSupervisor):
using_apache = True
using_wsgi = True
template = conf_fcgid
def __str__(self):
return "FCGI Server on %s:%s" % (self.host, self.port)
def start(self, modulename):
cherrypy.server.httpserver = servers.FlupFCGIServer(
application=cherrypy.tree, bindAddress=('127.0.0.1', 4000))
cherrypy.server.httpserver.bind_addr = ('127.0.0.1', 4000)
# For FCGI, we both start apache...
self.start_apache()
# ...and our local server
test.LocalServer.start(self, modulename)
def start_apache(self):
fcgiconf = CONF_PATH
if not os.path.isabs(fcgiconf):
fcgiconf = os.path.join(curdir, fcgiconf)
# Write the Apache conf file.
f = open(fcgiconf, 'wb')
try:
server = repr(os.path.join(curdir, 'fastcgi.pyc'))[1:-1]
output = self.template % {'port': self.port, 'root': curdir,
'server': server}
output = output.replace('\r\n', '\n')
f.write(output)
finally:
f.close()
result = read_process(APACHE_PATH, "-k start -f %s" % fcgiconf)
if result:
print(result)
def stop(self):
"""Gracefully shutdown a server that is serving forever."""
read_process(APACHE_PATH, "-k stop")
test.LocalServer.stop(self)
def sync_apps(self):
cherrypy.server.httpserver.fcgiserver.application = self.get_app()
| epl-1.0 | -2,568,828,127,213,745,700 | 33.33871 | 77 | 0.662987 | false | 3.651801 | true | false | false |
FrankTsui/robust_rescaled_svm | common.py | 1 | 1636 | import numpy as np
import matplotlib.pyplot as plt
def plot_decision_function(classifier, fea, gnd, title):
'''
plot the decision function in 2-d plane
classifiers: the svm models
fea: array like, shape = (smp_num, fea_num)
gnd: array like, shape = (smp_num,)
title: title of plot
'''
fea_min = fea.min(axis = 0)
fea_max = fea.max(axis = 0)
mesh_num = 100
# meshgrid
xx, yy = np.meshgrid(np.linspace(fea_min[0], fea_max[0], mesh_num), \
np.linspace(fea_min[1], fea_max[1], mesh_num))
Z = classifier.decision_function(np.c_[xx.ravel(), yy.ravel()], last_model_flag = False)
Z_first = Z[:, 0].copy()
Z_last = Z[:, -1].copy()
Z_first = Z_first.reshape(xx.shape)
Z_last = Z_last.reshape(xx.shape)
del Z
# plot the line, the points
leg_svm = plt.contour(xx, yy, Z_first, levels = [0.0], colors = 'k')
leg_rsvm = plt.contour(xx, yy, Z_last, levels = [0.0], colors = 'r')
posi_index = gnd == 1
nega_index = gnd == -1
marker_size = 70
plt.scatter(fea[:, 0], fea[:, 1], marker = 'o', \
s = classifier.smp_weights_mat[:, -1] * marker_size * 4, c = 'w', alpha = 1.0, edgecolors = 'm', label = 'weights')
plt.scatter(fea[posi_index, 0], fea[posi_index, 1], marker = '^', s = marker_size, c = 'g', alpha = 0.8, label = 'posi')
plt.scatter(fea[nega_index, 0], fea[nega_index, 1], marker = 'x', s = marker_size, c = 'b', label = 'nega')
leg_svm.collections[0].set_label('svm')
leg_rsvm.collections[0].set_label('rsvm')
plt.legend(loc = 'upper left')
plt.axis('on')
plt.title(title) | apache-2.0 | -7,288,017,175,188,139,000 | 39.925 | 124 | 0.579462 | false | 2.806175 | false | false | false |
barryHub20/ServerAss2 | lib/itsdangerous.py | 626 | 31840 | # -*- coding: utf-8 -*-
"""
itsdangerous
~~~~~~~~~~~~
A module that implements various functions to deal with untrusted
sources. Mainly useful for web applications.
:copyright: (c) 2014 by Armin Ronacher and the Django Software Foundation.
:license: BSD, see LICENSE for more details.
"""
import sys
import hmac
import zlib
import time
import base64
import hashlib
import operator
from datetime import datetime
PY2 = sys.version_info[0] == 2
if PY2:
from itertools import izip
text_type = unicode
int_to_byte = chr
number_types = (int, long, float)
else:
from functools import reduce
izip = zip
text_type = str
int_to_byte = operator.methodcaller('to_bytes', 1, 'big')
number_types = (int, float)
try:
import simplejson as json
except ImportError:
import json
class _CompactJSON(object):
"""Wrapper around simplejson that strips whitespace.
"""
def loads(self, payload):
return json.loads(payload)
def dumps(self, obj):
return json.dumps(obj, separators=(',', ':'))
compact_json = _CompactJSON()
# 2011/01/01 in UTC
EPOCH = 1293840000
def want_bytes(s, encoding='utf-8', errors='strict'):
if isinstance(s, text_type):
s = s.encode(encoding, errors)
return s
def is_text_serializer(serializer):
"""Checks wheather a serializer generates text or binary."""
return isinstance(serializer.dumps({}), text_type)
# Starting with 3.3 the standard library has a c-implementation for
# constant time string compares.
_builtin_constant_time_compare = getattr(hmac, 'compare_digest', None)
def constant_time_compare(val1, val2):
"""Returns True if the two strings are equal, False otherwise.
The time taken is independent of the number of characters that match. Do
not use this function for anything else than comparision with known
length targets.
This is should be implemented in C in order to get it completely right.
"""
if _builtin_constant_time_compare is not None:
return _builtin_constant_time_compare(val1, val2)
len_eq = len(val1) == len(val2)
if len_eq:
result = 0
left = val1
else:
result = 1
left = val2
for x, y in izip(bytearray(left), bytearray(val2)):
result |= x ^ y
return result == 0
class BadData(Exception):
"""Raised if bad data of any sort was encountered. This is the
base for all exceptions that itsdangerous is currently using.
.. versionadded:: 0.15
"""
message = None
def __init__(self, message):
Exception.__init__(self, message)
self.message = message
def __str__(self):
return text_type(self.message)
if PY2:
__unicode__ = __str__
def __str__(self):
return self.__unicode__().encode('utf-8')
class BadPayload(BadData):
"""This error is raised in situations when payload is loaded without
checking the signature first and an exception happend as a result of
that. The original exception that caused that will be stored on the
exception as :attr:`original_error`.
This can also happen with a :class:`JSONWebSignatureSerializer` that
is subclassed and uses a different serializer for the payload than
the expected one.
.. versionadded:: 0.15
"""
def __init__(self, message, original_error=None):
BadData.__init__(self, message)
#: If available, the error that indicates why the payload
#: was not valid. This might be `None`.
self.original_error = original_error
class BadSignature(BadData):
"""This error is raised if a signature does not match. As of
itsdangerous 0.14 there are helpful attributes on the exception
instances. You can also catch down the baseclass :exc:`BadData`.
"""
def __init__(self, message, payload=None):
BadData.__init__(self, message)
#: The payload that failed the signature test. In some
#: situations you might still want to inspect this, even if
#: you know it was tampered with.
#:
#: .. versionadded:: 0.14
self.payload = payload
class BadTimeSignature(BadSignature):
"""Raised for time based signatures that fail. This is a subclass
of :class:`BadSignature` so you can catch those down as well.
"""
def __init__(self, message, payload=None, date_signed=None):
BadSignature.__init__(self, message, payload)
#: If the signature expired this exposes the date of when the
#: signature was created. This can be helpful in order to
#: tell the user how long a link has been gone stale.
#:
#: .. versionadded:: 0.14
self.date_signed = date_signed
class BadHeader(BadSignature):
"""Raised if a signed header is invalid in some form. This only
happens for serializers that have a header that goes with the
signature.
.. versionadded:: 0.24
"""
def __init__(self, message, payload=None, header=None,
original_error=None):
BadSignature.__init__(self, message, payload)
#: If the header is actually available but just malformed it
#: might be stored here.
self.header = header
#: If available, the error that indicates why the payload
#: was not valid. This might be `None`.
self.original_error = original_error
class SignatureExpired(BadTimeSignature):
"""Signature timestamp is older than required max_age. This is a
subclass of :exc:`BadTimeSignature` so you can use the baseclass for
catching the error.
"""
def base64_encode(string):
"""base64 encodes a single bytestring (and is tolerant to getting
called with a unicode string).
The resulting bytestring is safe for putting into URLs.
"""
string = want_bytes(string)
return base64.urlsafe_b64encode(string).strip(b'=')
def base64_decode(string):
"""base64 decodes a single bytestring (and is tolerant to getting
called with a unicode string).
The result is also a bytestring.
"""
string = want_bytes(string, encoding='ascii', errors='ignore')
return base64.urlsafe_b64decode(string + b'=' * (-len(string) % 4))
def int_to_bytes(num):
assert num >= 0
rv = []
while num:
rv.append(int_to_byte(num & 0xff))
num >>= 8
return b''.join(reversed(rv))
def bytes_to_int(bytestr):
return reduce(lambda a, b: a << 8 | b, bytearray(bytestr), 0)
class SigningAlgorithm(object):
"""Subclasses of `SigningAlgorithm` have to implement `get_signature` to
provide signature generation functionality.
"""
def get_signature(self, key, value):
"""Returns the signature for the given key and value"""
raise NotImplementedError()
def verify_signature(self, key, value, sig):
"""Verifies the given signature matches the expected signature"""
return constant_time_compare(sig, self.get_signature(key, value))
class NoneAlgorithm(SigningAlgorithm):
"""This class provides a algorithm that does not perform any signing and
returns an empty signature.
"""
def get_signature(self, key, value):
return b''
class HMACAlgorithm(SigningAlgorithm):
"""This class provides signature generation using HMACs."""
#: The digest method to use with the MAC algorithm. This defaults to sha1
#: but can be changed for any other function in the hashlib module.
default_digest_method = staticmethod(hashlib.sha1)
def __init__(self, digest_method=None):
if digest_method is None:
digest_method = self.default_digest_method
self.digest_method = digest_method
def get_signature(self, key, value):
mac = hmac.new(key, msg=value, digestmod=self.digest_method)
return mac.digest()
class Signer(object):
"""This class can sign bytes and unsign it and validate the signature
provided.
Salt can be used to namespace the hash, so that a signed string is only
valid for a given namespace. Leaving this at the default value or re-using
a salt value across different parts of your application where the same
signed value in one part can mean something different in another part
is a security risk.
See :ref:`the-salt` for an example of what the salt is doing and how you
can utilize it.
.. versionadded:: 0.14
`key_derivation` and `digest_method` were added as arguments to the
class constructor.
.. versionadded:: 0.18
`algorithm` was added as an argument to the class constructor.
"""
#: The digest method to use for the signer. This defaults to sha1 but can
#: be changed for any other function in the hashlib module.
#:
#: .. versionchanged:: 0.14
default_digest_method = staticmethod(hashlib.sha1)
#: Controls how the key is derived. The default is Django style
#: concatenation. Possible values are ``concat``, ``django-concat``
#: and ``hmac``. This is used for deriving a key from the secret key
#: with an added salt.
#:
#: .. versionadded:: 0.14
default_key_derivation = 'django-concat'
def __init__(self, secret_key, salt=None, sep='.', key_derivation=None,
digest_method=None, algorithm=None):
self.secret_key = want_bytes(secret_key)
self.sep = sep
self.salt = 'itsdangerous.Signer' if salt is None else salt
if key_derivation is None:
key_derivation = self.default_key_derivation
self.key_derivation = key_derivation
if digest_method is None:
digest_method = self.default_digest_method
self.digest_method = digest_method
if algorithm is None:
algorithm = HMACAlgorithm(self.digest_method)
self.algorithm = algorithm
def derive_key(self):
"""This method is called to derive the key. If you're unhappy with
the default key derivation choices you can override them here.
Keep in mind that the key derivation in itsdangerous is not intended
to be used as a security method to make a complex key out of a short
password. Instead you should use large random secret keys.
"""
salt = want_bytes(self.salt)
if self.key_derivation == 'concat':
return self.digest_method(salt + self.secret_key).digest()
elif self.key_derivation == 'django-concat':
return self.digest_method(salt + b'signer' +
self.secret_key).digest()
elif self.key_derivation == 'hmac':
mac = hmac.new(self.secret_key, digestmod=self.digest_method)
mac.update(salt)
return mac.digest()
elif self.key_derivation == 'none':
return self.secret_key
else:
raise TypeError('Unknown key derivation method')
def get_signature(self, value):
"""Returns the signature for the given value"""
value = want_bytes(value)
key = self.derive_key()
sig = self.algorithm.get_signature(key, value)
return base64_encode(sig)
def sign(self, value):
"""Signs the given string."""
return value + want_bytes(self.sep) + self.get_signature(value)
def verify_signature(self, value, sig):
"""Verifies the signature for the given value."""
key = self.derive_key()
try:
sig = base64_decode(sig)
except Exception:
return False
return self.algorithm.verify_signature(key, value, sig)
def unsign(self, signed_value):
"""Unsigns the given string."""
signed_value = want_bytes(signed_value)
sep = want_bytes(self.sep)
if sep not in signed_value:
raise BadSignature('No %r found in value' % self.sep)
value, sig = signed_value.rsplit(sep, 1)
if self.verify_signature(value, sig):
return value
raise BadSignature('Signature %r does not match' % sig,
payload=value)
def validate(self, signed_value):
"""Just validates the given signed value. Returns `True` if the
signature exists and is valid, `False` otherwise."""
try:
self.unsign(signed_value)
return True
except BadSignature:
return False
class TimestampSigner(Signer):
"""Works like the regular :class:`Signer` but also records the time
of the signing and can be used to expire signatures. The unsign
method can rause a :exc:`SignatureExpired` method if the unsigning
failed because the signature is expired. This exception is a subclass
of :exc:`BadSignature`.
"""
def get_timestamp(self):
"""Returns the current timestamp. This implementation returns the
seconds since 1/1/2011. The function must return an integer.
"""
return int(time.time() - EPOCH)
def timestamp_to_datetime(self, ts):
"""Used to convert the timestamp from `get_timestamp` into a
datetime object.
"""
return datetime.utcfromtimestamp(ts + EPOCH)
def sign(self, value):
"""Signs the given string and also attaches a time information."""
value = want_bytes(value)
timestamp = base64_encode(int_to_bytes(self.get_timestamp()))
sep = want_bytes(self.sep)
value = value + sep + timestamp
return value + sep + self.get_signature(value)
def unsign(self, value, max_age=None, return_timestamp=False):
"""Works like the regular :meth:`~Signer.unsign` but can also
validate the time. See the base docstring of the class for
the general behavior. If `return_timestamp` is set to `True`
the timestamp of the signature will be returned as naive
:class:`datetime.datetime` object in UTC.
"""
try:
result = Signer.unsign(self, value)
sig_error = None
except BadSignature as e:
sig_error = e
result = e.payload or b''
sep = want_bytes(self.sep)
# If there is no timestamp in the result there is something
# seriously wrong. In case there was a signature error, we raise
# that one directly, otherwise we have a weird situation in which
# we shouldn't have come except someone uses a time-based serializer
# on non-timestamp data, so catch that.
if not sep in result:
if sig_error:
raise sig_error
raise BadTimeSignature('timestamp missing', payload=result)
value, timestamp = result.rsplit(sep, 1)
try:
timestamp = bytes_to_int(base64_decode(timestamp))
except Exception:
timestamp = None
# Signature is *not* okay. Raise a proper error now that we have
# split the value and the timestamp.
if sig_error is not None:
raise BadTimeSignature(text_type(sig_error), payload=value,
date_signed=timestamp)
# Signature was okay but the timestamp is actually not there or
# malformed. Should not happen, but well. We handle it nonetheless
if timestamp is None:
raise BadTimeSignature('Malformed timestamp', payload=value)
# Check timestamp is not older than max_age
if max_age is not None:
age = self.get_timestamp() - timestamp
if age > max_age:
raise SignatureExpired(
'Signature age %s > %s seconds' % (age, max_age),
payload=value,
date_signed=self.timestamp_to_datetime(timestamp))
if return_timestamp:
return value, self.timestamp_to_datetime(timestamp)
return value
def validate(self, signed_value, max_age=None):
"""Just validates the given signed value. Returns `True` if the
signature exists and is valid, `False` otherwise."""
try:
self.unsign(signed_value, max_age=max_age)
return True
except BadSignature:
return False
class Serializer(object):
"""This class provides a serialization interface on top of the
signer. It provides a similar API to json/pickle and other modules but is
slightly differently structured internally. If you want to change the
underlying implementation for parsing and loading you have to override the
:meth:`load_payload` and :meth:`dump_payload` functions.
This implementation uses simplejson if available for dumping and loading
and will fall back to the standard library's json module if it's not
available.
Starting with 0.14 you do not need to subclass this class in order to
switch out or customer the :class:`Signer`. You can instead also pass a
different class to the constructor as well as keyword arguments as
dictionary that should be forwarded::
s = Serializer(signer_kwargs={'key_derivation': 'hmac'})
.. versionchanged:: 0.14:
The `signer` and `signer_kwargs` parameters were added to the
constructor.
"""
#: If a serializer module or class is not passed to the constructor
#: this one is picked up. This currently defaults to :mod:`json`.
default_serializer = json
#: The default :class:`Signer` class that is being used by this
#: serializer.
#:
#: .. versionadded:: 0.14
default_signer = Signer
def __init__(self, secret_key, salt=b'itsdangerous', serializer=None,
signer=None, signer_kwargs=None):
self.secret_key = want_bytes(secret_key)
self.salt = want_bytes(salt)
if serializer is None:
serializer = self.default_serializer
self.serializer = serializer
self.is_text_serializer = is_text_serializer(serializer)
if signer is None:
signer = self.default_signer
self.signer = signer
self.signer_kwargs = signer_kwargs or {}
def load_payload(self, payload, serializer=None):
"""Loads the encoded object. This function raises :class:`BadPayload`
if the payload is not valid. The `serializer` parameter can be used to
override the serializer stored on the class. The encoded payload is
always byte based.
"""
if serializer is None:
serializer = self.serializer
is_text = self.is_text_serializer
else:
is_text = is_text_serializer(serializer)
try:
if is_text:
payload = payload.decode('utf-8')
return serializer.loads(payload)
except Exception as e:
raise BadPayload('Could not load the payload because an '
'exception occurred on unserializing the data',
original_error=e)
def dump_payload(self, obj):
"""Dumps the encoded object. The return value is always a
bytestring. If the internal serializer is text based the value
will automatically be encoded to utf-8.
"""
return want_bytes(self.serializer.dumps(obj))
def make_signer(self, salt=None):
"""A method that creates a new instance of the signer to be used.
The default implementation uses the :class:`Signer` baseclass.
"""
if salt is None:
salt = self.salt
return self.signer(self.secret_key, salt=salt, **self.signer_kwargs)
def dumps(self, obj, salt=None):
"""Returns a signed string serialized with the internal serializer.
The return value can be either a byte or unicode string depending
on the format of the internal serializer.
"""
payload = want_bytes(self.dump_payload(obj))
rv = self.make_signer(salt).sign(payload)
if self.is_text_serializer:
rv = rv.decode('utf-8')
return rv
def dump(self, obj, f, salt=None):
"""Like :meth:`dumps` but dumps into a file. The file handle has
to be compatible with what the internal serializer expects.
"""
f.write(self.dumps(obj, salt))
def loads(self, s, salt=None):
"""Reverse of :meth:`dumps`, raises :exc:`BadSignature` if the
signature validation fails.
"""
s = want_bytes(s)
return self.load_payload(self.make_signer(salt).unsign(s))
def load(self, f, salt=None):
"""Like :meth:`loads` but loads from a file."""
return self.loads(f.read(), salt)
def loads_unsafe(self, s, salt=None):
"""Like :meth:`loads` but without verifying the signature. This is
potentially very dangerous to use depending on how your serializer
works. The return value is ``(signature_okay, payload)`` instead of
just the payload. The first item will be a boolean that indicates
if the signature is okay (``True``) or if it failed. This function
never fails.
Use it for debugging only and if you know that your serializer module
is not exploitable (eg: do not use it with a pickle serializer).
.. versionadded:: 0.15
"""
return self._loads_unsafe_impl(s, salt)
def _loads_unsafe_impl(self, s, salt, load_kwargs=None,
load_payload_kwargs=None):
"""Lowlevel helper function to implement :meth:`loads_unsafe` in
serializer subclasses.
"""
try:
return True, self.loads(s, salt=salt, **(load_kwargs or {}))
except BadSignature as e:
if e.payload is None:
return False, None
try:
return False, self.load_payload(e.payload,
**(load_payload_kwargs or {}))
except BadPayload:
return False, None
def load_unsafe(self, f, *args, **kwargs):
"""Like :meth:`loads_unsafe` but loads from a file.
.. versionadded:: 0.15
"""
return self.loads_unsafe(f.read(), *args, **kwargs)
class TimedSerializer(Serializer):
"""Uses the :class:`TimestampSigner` instead of the default
:meth:`Signer`.
"""
default_signer = TimestampSigner
def loads(self, s, max_age=None, return_timestamp=False, salt=None):
"""Reverse of :meth:`dumps`, raises :exc:`BadSignature` if the
signature validation fails. If a `max_age` is provided it will
ensure the signature is not older than that time in seconds. In
case the signature is outdated, :exc:`SignatureExpired` is raised
which is a subclass of :exc:`BadSignature`. All arguments are
forwarded to the signer's :meth:`~TimestampSigner.unsign` method.
"""
base64d, timestamp = self.make_signer(salt) \
.unsign(s, max_age, return_timestamp=True)
payload = self.load_payload(base64d)
if return_timestamp:
return payload, timestamp
return payload
def loads_unsafe(self, s, max_age=None, salt=None):
load_kwargs = {'max_age': max_age}
load_payload_kwargs = {}
return self._loads_unsafe_impl(s, salt, load_kwargs, load_payload_kwargs)
class JSONWebSignatureSerializer(Serializer):
"""This serializer implements JSON Web Signature (JWS) support. Only
supports the JWS Compact Serialization.
"""
jws_algorithms = {
'HS256': HMACAlgorithm(hashlib.sha256),
'HS384': HMACAlgorithm(hashlib.sha384),
'HS512': HMACAlgorithm(hashlib.sha512),
'none': NoneAlgorithm(),
}
#: The default algorithm to use for signature generation
default_algorithm = 'HS256'
default_serializer = compact_json
def __init__(self, secret_key, salt=None, serializer=None,
signer=None, signer_kwargs=None, algorithm_name=None):
Serializer.__init__(self, secret_key, salt, serializer,
signer, signer_kwargs)
if algorithm_name is None:
algorithm_name = self.default_algorithm
self.algorithm_name = algorithm_name
self.algorithm = self.make_algorithm(algorithm_name)
def load_payload(self, payload, return_header=False):
payload = want_bytes(payload)
if b'.' not in payload:
raise BadPayload('No "." found in value')
base64d_header, base64d_payload = payload.split(b'.', 1)
try:
json_header = base64_decode(base64d_header)
except Exception as e:
raise BadHeader('Could not base64 decode the header because of '
'an exception', original_error=e)
try:
json_payload = base64_decode(base64d_payload)
except Exception as e:
raise BadPayload('Could not base64 decode the payload because of '
'an exception', original_error=e)
try:
header = Serializer.load_payload(self, json_header,
serializer=json)
except BadData as e:
raise BadHeader('Could not unserialize header because it was '
'malformed', original_error=e)
if not isinstance(header, dict):
raise BadHeader('Header payload is not a JSON object',
header=header)
payload = Serializer.load_payload(self, json_payload)
if return_header:
return payload, header
return payload
def dump_payload(self, header, obj):
base64d_header = base64_encode(self.serializer.dumps(header))
base64d_payload = base64_encode(self.serializer.dumps(obj))
return base64d_header + b'.' + base64d_payload
def make_algorithm(self, algorithm_name):
try:
return self.jws_algorithms[algorithm_name]
except KeyError:
raise NotImplementedError('Algorithm not supported')
def make_signer(self, salt=None, algorithm=None):
if salt is None:
salt = self.salt
key_derivation = 'none' if salt is None else None
if algorithm is None:
algorithm = self.algorithm
return self.signer(self.secret_key, salt=salt, sep='.',
key_derivation=key_derivation, algorithm=algorithm)
def make_header(self, header_fields):
header = header_fields.copy() if header_fields else {}
header['alg'] = self.algorithm_name
return header
def dumps(self, obj, salt=None, header_fields=None):
"""Like :meth:`~Serializer.dumps` but creates a JSON Web Signature. It
also allows for specifying additional fields to be included in the JWS
Header.
"""
header = self.make_header(header_fields)
signer = self.make_signer(salt, self.algorithm)
return signer.sign(self.dump_payload(header, obj))
def loads(self, s, salt=None, return_header=False):
"""Reverse of :meth:`dumps`. If requested via `return_header` it will
return a tuple of payload and header.
"""
payload, header = self.load_payload(
self.make_signer(salt, self.algorithm).unsign(want_bytes(s)),
return_header=True)
if header.get('alg') != self.algorithm_name:
raise BadHeader('Algorithm mismatch', header=header,
payload=payload)
if return_header:
return payload, header
return payload
def loads_unsafe(self, s, salt=None, return_header=False):
kwargs = {'return_header': return_header}
return self._loads_unsafe_impl(s, salt, kwargs, kwargs)
class TimedJSONWebSignatureSerializer(JSONWebSignatureSerializer):
"""Works like the regular :class:`JSONWebSignatureSerializer` but also
records the time of the signing and can be used to expire signatures.
JWS currently does not specify this behavior but it mentions a possibility
extension like this in the spec. Expiry date is encoded into the header
similarily as specified in `draft-ietf-oauth-json-web-token
<http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html#expDef`_.
The unsign method can raise a :exc:`SignatureExpired` method if the
unsigning failed because the signature is expired. This exception is a
subclass of :exc:`BadSignature`.
"""
DEFAULT_EXPIRES_IN = 3600
def __init__(self, secret_key, expires_in=None, **kwargs):
JSONWebSignatureSerializer.__init__(self, secret_key, **kwargs)
if expires_in is None:
expires_in = self.DEFAULT_EXPIRES_IN
self.expires_in = expires_in
def make_header(self, header_fields):
header = JSONWebSignatureSerializer.make_header(self, header_fields)
iat = self.now()
exp = iat + self.expires_in
header['iat'] = iat
header['exp'] = exp
return header
def loads(self, s, salt=None, return_header=False):
payload, header = JSONWebSignatureSerializer.loads(
self, s, salt, return_header=True)
if 'exp' not in header:
raise BadSignature('Missing expiry date', payload=payload)
if not (isinstance(header['exp'], number_types)
and header['exp'] > 0):
raise BadSignature('expiry date is not an IntDate',
payload=payload)
if header['exp'] < self.now():
raise SignatureExpired('Signature expired', payload=payload,
date_signed=self.get_issue_date(header))
if return_header:
return payload, header
return payload
def get_issue_date(self, header):
rv = header.get('iat')
if isinstance(rv, number_types):
return datetime.utcfromtimestamp(int(rv))
def now(self):
return int(time.time())
class URLSafeSerializerMixin(object):
"""Mixed in with a regular serializer it will attempt to zlib compress
the string to make it shorter if necessary. It will also base64 encode
the string so that it can safely be placed in a URL.
"""
def load_payload(self, payload):
decompress = False
if payload.startswith(b'.'):
payload = payload[1:]
decompress = True
try:
json = base64_decode(payload)
except Exception as e:
raise BadPayload('Could not base64 decode the payload because of '
'an exception', original_error=e)
if decompress:
try:
json = zlib.decompress(json)
except Exception as e:
raise BadPayload('Could not zlib decompress the payload before '
'decoding the payload', original_error=e)
return super(URLSafeSerializerMixin, self).load_payload(json)
def dump_payload(self, obj):
json = super(URLSafeSerializerMixin, self).dump_payload(obj)
is_compressed = False
compressed = zlib.compress(json)
if len(compressed) < (len(json) - 1):
json = compressed
is_compressed = True
base64d = base64_encode(json)
if is_compressed:
base64d = b'.' + base64d
return base64d
class URLSafeSerializer(URLSafeSerializerMixin, Serializer):
"""Works like :class:`Serializer` but dumps and loads into a URL
safe string consisting of the upper and lowercase character of the
alphabet as well as ``'_'``, ``'-'`` and ``'.'``.
"""
default_serializer = compact_json
class URLSafeTimedSerializer(URLSafeSerializerMixin, TimedSerializer):
"""Works like :class:`TimedSerializer` but dumps and loads into a URL
safe string consisting of the upper and lowercase character of the
alphabet as well as ``'_'``, ``'-'`` and ``'.'``.
"""
default_serializer = compact_json
| apache-2.0 | -2,543,009,830,124,408,300 | 35.513761 | 81 | 0.632004 | false | 4.258392 | false | false | false |
felixbb/forseti-security | tests/common/gcp_api/bigquery_test.py | 1 | 5019 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the Bigquery client."""
import mock
import httplib2
from googleapiclient.errors import HttpError
from google.apputils import basetest
from google.cloud.security.common.gcp_api import bigquery as bq
from google.cloud.security.common.gcp_api import _base_client as _base_client
from google.cloud.security.common.gcp_api import errors as api_errors
from tests.common.gcp_api.test_data import fake_bigquery as fbq
class BigqueryTestCase(basetest.TestCase):
"""Test the Bigquery API Client."""
MAX_BIGQUERY_API_CALLS_PER_100_SECONDS = 88888
@mock.patch.object(bq, 'FLAGS')
@mock.patch.object(_base_client.BaseClient, '__init__', autospec=True)
def setUp(self, mock_base_client, mock_flags):
"""Set up."""
mock_flags.max_bigquery_api_calls_per_100_seconds = (
self.MAX_BIGQUERY_API_CALLS_PER_100_SECONDS)
self.bq_api_client = bq.BigQueryClient()
self.http_response = httplib2.Response(
{'status': '400', 'content-type': 'application/json'}
)
def test_api_client_is_initialized(self):
"""Test that the api client is initialized."""
self.assertEquals(
self.MAX_BIGQUERY_API_CALLS_PER_100_SECONDS,
self.bq_api_client.rate_limiter.max_calls)
self.assertEquals(
bq.BigQueryClient.DEFAULT_QUOTA_TIMESPAN_PER_SECONDS,
self.bq_api_client.rate_limiter.period)
def test_get_bigquery_projectids_raises(self):
mock_bq_stub = mock.MagicMock()
self.bq_api_client.service = mock.MagicMock()
self.bq_api_client.service.projects.return_value = mock_bq_stub
self.bq_api_client._execute = mock.MagicMock(
side_effect=HttpError(self.http_response, '{}')
)
with self.assertRaises(api_errors.ApiExecutionError):
self.bq_api_client.get_bigquery_projectids()
def test_get_bigquery_projectids(self):
mock_bq_stub = mock.MagicMock()
self.bq_api_client.service = mock.MagicMock()
self.bq_api_client.service.projects.return_value = mock_bq_stub
self.bq_api_client._build_paged_result = mock.MagicMock(
return_value=fbq.PROJECTS_LIST_REQUEST_RESPONSE
)
return_value = self.bq_api_client.get_bigquery_projectids()
self.assertListEqual(return_value, fbq.PROJECTS_LIST_EXPECTED)
def test_get_datasets_for_projectid_raises(self):
mock_bq_stub = mock.MagicMock()
self.bq_api_client.service = mock.MagicMock()
self.bq_api_client.service.datasets.return_value = mock_bq_stub
self.bq_api_client._execute = mock.MagicMock(
side_effect=HttpError(self.http_response, '{}')
)
with self.assertRaises(api_errors.ApiExecutionError):
self.bq_api_client.get_datasets_for_projectid(fbq.PROJECT_IDS[0])
def test_getdatasets_for_projectid(self):
mock_bq_stub = mock.MagicMock()
self.bq_api_client.service = mock.MagicMock()
self.bq_api_client.service.datasets.return_value = mock_bq_stub
self.bq_api_client._build_paged_result = mock.MagicMock(
return_value=fbq.DATASETS_LIST_REQUEST_RESPONSE
)
return_value = self.bq_api_client.get_datasets_for_projectid('')
self.assertListEqual(return_value, fbq.DATASETS_LIST_EXPECTED)
def test_get_dataset_access_raises(self):
mock_bq_stub = mock.MagicMock()
self.bq_api_client.service = mock.MagicMock()
self.bq_api_client.service.datasets.return_value = mock_bq_stub
self.bq_api_client._execute = mock.MagicMock(
side_effect=HttpError(self.http_response, '{}')
)
with self.assertRaises(api_errors.ApiExecutionError):
self.bq_api_client.get_dataset_access(fbq.PROJECT_IDS[0],
fbq.DATASET_ID)
def test_get_dataset_access(self):
mock_bq_stub = mock.MagicMock()
self.bq_api_client.service = mock.MagicMock()
self.bq_api_client.service.datasets.return_value = mock_bq_stub
self.bq_api_client._build_paged_result = mock.MagicMock(
return_value=fbq.DATASETS_GET_REQUEST_RESPONSE
)
return_value = self.bq_api_client.get_dataset_access('','')
self.assertListEqual(return_value, fbq.DATASETS_GET_EXPECTED)
if __name__ == '__main__':
basetest.main()
| apache-2.0 | 1,886,433,124,228,149,000 | 37.906977 | 78 | 0.664873 | false | 3.615994 | true | false | false |
slohse/ansible | setup.py | 34 | 11028 |
from __future__ import print_function
import json
import os
import os.path
import re
import sys
import warnings
from collections import defaultdict
from distutils.command.build_scripts import build_scripts as BuildScripts
from distutils.command.sdist import sdist as SDist
try:
from setuptools import setup, find_packages
from setuptools.command.build_py import build_py as BuildPy
from setuptools.command.install_lib import install_lib as InstallLib
from setuptools.command.install_scripts import install_scripts as InstallScripts
except ImportError:
print("Ansible now needs setuptools in order to build. Install it using"
" your package manager (usually python-setuptools) or via pip (pip"
" install setuptools).", file=sys.stderr)
sys.exit(1)
sys.path.insert(0, os.path.abspath('lib'))
from ansible.release import __version__, __author__
SYMLINK_CACHE = 'SYMLINK_CACHE.json'
def _find_symlinks(topdir, extension=''):
"""Find symlinks that should be maintained
Maintained symlinks exist in the bin dir or are modules which have
aliases. Our heuristic is that they are a link in a certain path which
point to a file in the same directory.
"""
symlinks = defaultdict(list)
for base_path, dirs, files in os.walk(topdir):
for filename in files:
filepath = os.path.join(base_path, filename)
if os.path.islink(filepath) and filename.endswith(extension):
target = os.readlink(filepath)
if os.path.dirname(target) == '':
link = filepath[len(topdir):]
if link.startswith('/'):
link = link[1:]
symlinks[os.path.basename(target)].append(link)
return symlinks
def _cache_symlinks(symlink_data):
with open(SYMLINK_CACHE, 'w') as f:
json.dump(symlink_data, f)
def _maintain_symlinks(symlink_type, base_path):
"""Switch a real file into a symlink"""
try:
# Try the cache first because going from git checkout to sdist is the
# only time we know that we're going to cache correctly
with open(SYMLINK_CACHE, 'r') as f:
symlink_data = json.load(f)
except (IOError, OSError) as e:
# IOError on py2, OSError on py3. Both have errno
if e.errno == 2:
# SYMLINKS_CACHE doesn't exist. Fallback to trying to create the
# cache now. Will work if we're running directly from a git
# checkout or from an sdist created earlier.
symlink_data = {'script': _find_symlinks('bin'),
'library': _find_symlinks('lib', '.py'),
}
# Sanity check that something we know should be a symlink was
# found. We'll take that to mean that the current directory
# structure properly reflects symlinks in the git repo
if 'ansible-playbook' in symlink_data['script']['ansible']:
_cache_symlinks(symlink_data)
else:
raise RuntimeError(
"Pregenerated symlink list was not present and expected "
"symlinks in ./bin were missing or broken. "
"Perhaps this isn't a git checkout?"
)
else:
raise
symlinks = symlink_data[symlink_type]
for source in symlinks:
for dest in symlinks[source]:
dest_path = os.path.join(base_path, dest)
if not os.path.islink(dest_path):
try:
os.unlink(dest_path)
except OSError as e:
if e.errno == 2:
# File does not exist which is all we wanted
pass
os.symlink(source, dest_path)
class BuildPyCommand(BuildPy):
def run(self):
BuildPy.run(self)
_maintain_symlinks('library', self.build_lib)
class BuildScriptsCommand(BuildScripts):
def run(self):
BuildScripts.run(self)
_maintain_symlinks('script', self.build_dir)
class InstallLibCommand(InstallLib):
def run(self):
InstallLib.run(self)
_maintain_symlinks('library', self.install_dir)
class InstallScriptsCommand(InstallScripts):
def run(self):
InstallScripts.run(self)
_maintain_symlinks('script', self.install_dir)
class SDistCommand(SDist):
def run(self):
# have to generate the cache of symlinks for release as sdist is the
# only command that has access to symlinks from the git repo
symlinks = {'script': _find_symlinks('bin'),
'library': _find_symlinks('lib', '.py'),
}
_cache_symlinks(symlinks)
SDist.run(self)
def read_file(file_name):
"""Read file and return its contents."""
with open(file_name, 'r') as f:
return f.read()
def read_requirements(file_name):
"""Read requirements file as a list."""
reqs = read_file(file_name).splitlines()
if not reqs:
raise RuntimeError(
"Unable to read requirements from the %s file"
"That indicates this copy of the source code is incomplete."
% file_name
)
return reqs
PYCRYPTO_DIST = 'pycrypto'
def get_crypto_req():
"""Detect custom crypto from ANSIBLE_CRYPTO_BACKEND env var.
pycrypto or cryptography. We choose a default but allow the user to
override it. This translates into pip install of the sdist deciding what
package to install and also the runtime dependencies that pkg_resources
knows about.
"""
crypto_backend = os.environ.get('ANSIBLE_CRYPTO_BACKEND', '').strip()
if crypto_backend == PYCRYPTO_DIST:
# Attempt to set version requirements
return '%s >= 2.6' % PYCRYPTO_DIST
return crypto_backend or None
def substitute_crypto_to_req(req):
"""Replace crypto requirements if customized."""
crypto_backend = get_crypto_req()
if crypto_backend is None:
return req
def is_not_crypto(r):
CRYPTO_LIBS = PYCRYPTO_DIST, 'cryptography'
return not any(r.lower().startswith(c) for c in CRYPTO_LIBS)
return [r for r in req if is_not_crypto(r)] + [crypto_backend]
def read_extras():
"""Specify any extra requirements for installation."""
extras = dict()
extra_requirements_dir = 'packaging/requirements'
for extra_requirements_filename in os.listdir(extra_requirements_dir):
filename_match = re.search(r'^requirements-(\w*).txt$', extra_requirements_filename)
if not filename_match:
continue
extra_req_file_path = os.path.join(extra_requirements_dir, extra_requirements_filename)
try:
extras[filename_match.group(1)] = read_file(extra_req_file_path).splitlines()
except RuntimeError:
pass
return extras
def get_dynamic_setup_params():
"""Add dynamically calculated setup params to static ones."""
return {
# Retrieve the long description from the README
'long_description': read_file('README.rst'),
'install_requires': substitute_crypto_to_req(
read_requirements('requirements.txt'),
),
'extras_require': read_extras(),
}
static_setup_params = dict(
# Use the distutils SDist so that symlinks are not expanded
# Use a custom Build for the same reason
cmdclass={
'build_py': BuildPyCommand,
'build_scripts': BuildScriptsCommand,
'install_lib': InstallLibCommand,
'install_scripts': InstallScriptsCommand,
'sdist': SDistCommand,
},
name='ansible',
version=__version__,
description='Radically simple IT automation',
author=__author__,
author_email='[email protected]',
url='https://ansible.com/',
project_urls={
'Bug Tracker': 'https://github.com/ansible/ansible/issues',
'CI: Shippable': 'https://app.shippable.com/github/ansible/ansible',
'Code of Conduct': 'https://docs.ansible.com/ansible/latest/community/code_of_conduct.html',
'Documentation': 'https://docs.ansible.com/ansible/',
'Mailing lists': 'https://docs.ansible.com/ansible/latest/community/communication.html#mailing-list-information',
'Source Code': 'https://github.com/ansible/ansible',
},
license='GPLv3+',
# Ansible will also make use of a system copy of python-six and
# python-selectors2 if installed but use a Bundled copy if it's not.
python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*',
package_dir={'': 'lib'},
packages=find_packages('lib'),
package_data={
'': [
'executor/powershell/*.ps1',
'module_utils/csharp/*.cs',
'module_utils/csharp/*/*.cs',
'module_utils/powershell/*.psm1',
'module_utils/powershell/*/*.psm1',
'modules/windows/*.ps1',
'modules/windows/*/*.ps1',
'galaxy/data/*/*.*',
'galaxy/data/*/*/.*',
'galaxy/data/*/*/*.*',
'galaxy/data/*/tests/inventory',
'config/base.yml',
'config/module_defaults.yml',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
'Natural Language :: English',
'Operating System :: POSIX',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: System :: Installation/Setup',
'Topic :: System :: Systems Administration',
'Topic :: Utilities',
],
scripts=[
'bin/ansible',
'bin/ansible-playbook',
'bin/ansible-pull',
'bin/ansible-doc',
'bin/ansible-galaxy',
'bin/ansible-console',
'bin/ansible-connection',
'bin/ansible-vault',
'bin/ansible-config',
'bin/ansible-inventory',
],
data_files=[],
# Installing as zip files would break due to references to __file__
zip_safe=False
)
def main():
"""Invoke installation process using setuptools."""
setup_params = dict(static_setup_params, **get_dynamic_setup_params())
ignore_warning_regex = (
r"Unknown distribution option: '(project_urls|python_requires)'"
)
warnings.filterwarnings(
'ignore',
message=ignore_warning_regex,
category=UserWarning,
module='distutils.dist',
)
setup(**setup_params)
warnings.resetwarnings()
if __name__ == '__main__':
main()
| gpl-3.0 | -3,264,189,986,342,779,400 | 33.35514 | 121 | 0.609993 | false | 4.13964 | false | false | false |
clumsy/intellij-community | python/lib/Lib/encodings/cp866.py | 593 | 34652 | """ Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP866.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp866',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x0410, # CYRILLIC CAPITAL LETTER A
0x0081: 0x0411, # CYRILLIC CAPITAL LETTER BE
0x0082: 0x0412, # CYRILLIC CAPITAL LETTER VE
0x0083: 0x0413, # CYRILLIC CAPITAL LETTER GHE
0x0084: 0x0414, # CYRILLIC CAPITAL LETTER DE
0x0085: 0x0415, # CYRILLIC CAPITAL LETTER IE
0x0086: 0x0416, # CYRILLIC CAPITAL LETTER ZHE
0x0087: 0x0417, # CYRILLIC CAPITAL LETTER ZE
0x0088: 0x0418, # CYRILLIC CAPITAL LETTER I
0x0089: 0x0419, # CYRILLIC CAPITAL LETTER SHORT I
0x008a: 0x041a, # CYRILLIC CAPITAL LETTER KA
0x008b: 0x041b, # CYRILLIC CAPITAL LETTER EL
0x008c: 0x041c, # CYRILLIC CAPITAL LETTER EM
0x008d: 0x041d, # CYRILLIC CAPITAL LETTER EN
0x008e: 0x041e, # CYRILLIC CAPITAL LETTER O
0x008f: 0x041f, # CYRILLIC CAPITAL LETTER PE
0x0090: 0x0420, # CYRILLIC CAPITAL LETTER ER
0x0091: 0x0421, # CYRILLIC CAPITAL LETTER ES
0x0092: 0x0422, # CYRILLIC CAPITAL LETTER TE
0x0093: 0x0423, # CYRILLIC CAPITAL LETTER U
0x0094: 0x0424, # CYRILLIC CAPITAL LETTER EF
0x0095: 0x0425, # CYRILLIC CAPITAL LETTER HA
0x0096: 0x0426, # CYRILLIC CAPITAL LETTER TSE
0x0097: 0x0427, # CYRILLIC CAPITAL LETTER CHE
0x0098: 0x0428, # CYRILLIC CAPITAL LETTER SHA
0x0099: 0x0429, # CYRILLIC CAPITAL LETTER SHCHA
0x009a: 0x042a, # CYRILLIC CAPITAL LETTER HARD SIGN
0x009b: 0x042b, # CYRILLIC CAPITAL LETTER YERU
0x009c: 0x042c, # CYRILLIC CAPITAL LETTER SOFT SIGN
0x009d: 0x042d, # CYRILLIC CAPITAL LETTER E
0x009e: 0x042e, # CYRILLIC CAPITAL LETTER YU
0x009f: 0x042f, # CYRILLIC CAPITAL LETTER YA
0x00a0: 0x0430, # CYRILLIC SMALL LETTER A
0x00a1: 0x0431, # CYRILLIC SMALL LETTER BE
0x00a2: 0x0432, # CYRILLIC SMALL LETTER VE
0x00a3: 0x0433, # CYRILLIC SMALL LETTER GHE
0x00a4: 0x0434, # CYRILLIC SMALL LETTER DE
0x00a5: 0x0435, # CYRILLIC SMALL LETTER IE
0x00a6: 0x0436, # CYRILLIC SMALL LETTER ZHE
0x00a7: 0x0437, # CYRILLIC SMALL LETTER ZE
0x00a8: 0x0438, # CYRILLIC SMALL LETTER I
0x00a9: 0x0439, # CYRILLIC SMALL LETTER SHORT I
0x00aa: 0x043a, # CYRILLIC SMALL LETTER KA
0x00ab: 0x043b, # CYRILLIC SMALL LETTER EL
0x00ac: 0x043c, # CYRILLIC SMALL LETTER EM
0x00ad: 0x043d, # CYRILLIC SMALL LETTER EN
0x00ae: 0x043e, # CYRILLIC SMALL LETTER O
0x00af: 0x043f, # CYRILLIC SMALL LETTER PE
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x0440, # CYRILLIC SMALL LETTER ER
0x00e1: 0x0441, # CYRILLIC SMALL LETTER ES
0x00e2: 0x0442, # CYRILLIC SMALL LETTER TE
0x00e3: 0x0443, # CYRILLIC SMALL LETTER U
0x00e4: 0x0444, # CYRILLIC SMALL LETTER EF
0x00e5: 0x0445, # CYRILLIC SMALL LETTER HA
0x00e6: 0x0446, # CYRILLIC SMALL LETTER TSE
0x00e7: 0x0447, # CYRILLIC SMALL LETTER CHE
0x00e8: 0x0448, # CYRILLIC SMALL LETTER SHA
0x00e9: 0x0449, # CYRILLIC SMALL LETTER SHCHA
0x00ea: 0x044a, # CYRILLIC SMALL LETTER HARD SIGN
0x00eb: 0x044b, # CYRILLIC SMALL LETTER YERU
0x00ec: 0x044c, # CYRILLIC SMALL LETTER SOFT SIGN
0x00ed: 0x044d, # CYRILLIC SMALL LETTER E
0x00ee: 0x044e, # CYRILLIC SMALL LETTER YU
0x00ef: 0x044f, # CYRILLIC SMALL LETTER YA
0x00f0: 0x0401, # CYRILLIC CAPITAL LETTER IO
0x00f1: 0x0451, # CYRILLIC SMALL LETTER IO
0x00f2: 0x0404, # CYRILLIC CAPITAL LETTER UKRAINIAN IE
0x00f3: 0x0454, # CYRILLIC SMALL LETTER UKRAINIAN IE
0x00f4: 0x0407, # CYRILLIC CAPITAL LETTER YI
0x00f5: 0x0457, # CYRILLIC SMALL LETTER YI
0x00f6: 0x040e, # CYRILLIC CAPITAL LETTER SHORT U
0x00f7: 0x045e, # CYRILLIC SMALL LETTER SHORT U
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x2116, # NUMERO SIGN
0x00fd: 0x00a4, # CURRENCY SIGN
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
u'\x00' # 0x0000 -> NULL
u'\x01' # 0x0001 -> START OF HEADING
u'\x02' # 0x0002 -> START OF TEXT
u'\x03' # 0x0003 -> END OF TEXT
u'\x04' # 0x0004 -> END OF TRANSMISSION
u'\x05' # 0x0005 -> ENQUIRY
u'\x06' # 0x0006 -> ACKNOWLEDGE
u'\x07' # 0x0007 -> BELL
u'\x08' # 0x0008 -> BACKSPACE
u'\t' # 0x0009 -> HORIZONTAL TABULATION
u'\n' # 0x000a -> LINE FEED
u'\x0b' # 0x000b -> VERTICAL TABULATION
u'\x0c' # 0x000c -> FORM FEED
u'\r' # 0x000d -> CARRIAGE RETURN
u'\x0e' # 0x000e -> SHIFT OUT
u'\x0f' # 0x000f -> SHIFT IN
u'\x10' # 0x0010 -> DATA LINK ESCAPE
u'\x11' # 0x0011 -> DEVICE CONTROL ONE
u'\x12' # 0x0012 -> DEVICE CONTROL TWO
u'\x13' # 0x0013 -> DEVICE CONTROL THREE
u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x0018 -> CANCEL
u'\x19' # 0x0019 -> END OF MEDIUM
u'\x1a' # 0x001a -> SUBSTITUTE
u'\x1b' # 0x001b -> ESCAPE
u'\x1c' # 0x001c -> FILE SEPARATOR
u'\x1d' # 0x001d -> GROUP SEPARATOR
u'\x1e' # 0x001e -> RECORD SEPARATOR
u'\x1f' # 0x001f -> UNIT SEPARATOR
u' ' # 0x0020 -> SPACE
u'!' # 0x0021 -> EXCLAMATION MARK
u'"' # 0x0022 -> QUOTATION MARK
u'#' # 0x0023 -> NUMBER SIGN
u'$' # 0x0024 -> DOLLAR SIGN
u'%' # 0x0025 -> PERCENT SIGN
u'&' # 0x0026 -> AMPERSAND
u"'" # 0x0027 -> APOSTROPHE
u'(' # 0x0028 -> LEFT PARENTHESIS
u')' # 0x0029 -> RIGHT PARENTHESIS
u'*' # 0x002a -> ASTERISK
u'+' # 0x002b -> PLUS SIGN
u',' # 0x002c -> COMMA
u'-' # 0x002d -> HYPHEN-MINUS
u'.' # 0x002e -> FULL STOP
u'/' # 0x002f -> SOLIDUS
u'0' # 0x0030 -> DIGIT ZERO
u'1' # 0x0031 -> DIGIT ONE
u'2' # 0x0032 -> DIGIT TWO
u'3' # 0x0033 -> DIGIT THREE
u'4' # 0x0034 -> DIGIT FOUR
u'5' # 0x0035 -> DIGIT FIVE
u'6' # 0x0036 -> DIGIT SIX
u'7' # 0x0037 -> DIGIT SEVEN
u'8' # 0x0038 -> DIGIT EIGHT
u'9' # 0x0039 -> DIGIT NINE
u':' # 0x003a -> COLON
u';' # 0x003b -> SEMICOLON
u'<' # 0x003c -> LESS-THAN SIGN
u'=' # 0x003d -> EQUALS SIGN
u'>' # 0x003e -> GREATER-THAN SIGN
u'?' # 0x003f -> QUESTION MARK
u'@' # 0x0040 -> COMMERCIAL AT
u'A' # 0x0041 -> LATIN CAPITAL LETTER A
u'B' # 0x0042 -> LATIN CAPITAL LETTER B
u'C' # 0x0043 -> LATIN CAPITAL LETTER C
u'D' # 0x0044 -> LATIN CAPITAL LETTER D
u'E' # 0x0045 -> LATIN CAPITAL LETTER E
u'F' # 0x0046 -> LATIN CAPITAL LETTER F
u'G' # 0x0047 -> LATIN CAPITAL LETTER G
u'H' # 0x0048 -> LATIN CAPITAL LETTER H
u'I' # 0x0049 -> LATIN CAPITAL LETTER I
u'J' # 0x004a -> LATIN CAPITAL LETTER J
u'K' # 0x004b -> LATIN CAPITAL LETTER K
u'L' # 0x004c -> LATIN CAPITAL LETTER L
u'M' # 0x004d -> LATIN CAPITAL LETTER M
u'N' # 0x004e -> LATIN CAPITAL LETTER N
u'O' # 0x004f -> LATIN CAPITAL LETTER O
u'P' # 0x0050 -> LATIN CAPITAL LETTER P
u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
u'R' # 0x0052 -> LATIN CAPITAL LETTER R
u'S' # 0x0053 -> LATIN CAPITAL LETTER S
u'T' # 0x0054 -> LATIN CAPITAL LETTER T
u'U' # 0x0055 -> LATIN CAPITAL LETTER U
u'V' # 0x0056 -> LATIN CAPITAL LETTER V
u'W' # 0x0057 -> LATIN CAPITAL LETTER W
u'X' # 0x0058 -> LATIN CAPITAL LETTER X
u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
u'[' # 0x005b -> LEFT SQUARE BRACKET
u'\\' # 0x005c -> REVERSE SOLIDUS
u']' # 0x005d -> RIGHT SQUARE BRACKET
u'^' # 0x005e -> CIRCUMFLEX ACCENT
u'_' # 0x005f -> LOW LINE
u'`' # 0x0060 -> GRAVE ACCENT
u'a' # 0x0061 -> LATIN SMALL LETTER A
u'b' # 0x0062 -> LATIN SMALL LETTER B
u'c' # 0x0063 -> LATIN SMALL LETTER C
u'd' # 0x0064 -> LATIN SMALL LETTER D
u'e' # 0x0065 -> LATIN SMALL LETTER E
u'f' # 0x0066 -> LATIN SMALL LETTER F
u'g' # 0x0067 -> LATIN SMALL LETTER G
u'h' # 0x0068 -> LATIN SMALL LETTER H
u'i' # 0x0069 -> LATIN SMALL LETTER I
u'j' # 0x006a -> LATIN SMALL LETTER J
u'k' # 0x006b -> LATIN SMALL LETTER K
u'l' # 0x006c -> LATIN SMALL LETTER L
u'm' # 0x006d -> LATIN SMALL LETTER M
u'n' # 0x006e -> LATIN SMALL LETTER N
u'o' # 0x006f -> LATIN SMALL LETTER O
u'p' # 0x0070 -> LATIN SMALL LETTER P
u'q' # 0x0071 -> LATIN SMALL LETTER Q
u'r' # 0x0072 -> LATIN SMALL LETTER R
u's' # 0x0073 -> LATIN SMALL LETTER S
u't' # 0x0074 -> LATIN SMALL LETTER T
u'u' # 0x0075 -> LATIN SMALL LETTER U
u'v' # 0x0076 -> LATIN SMALL LETTER V
u'w' # 0x0077 -> LATIN SMALL LETTER W
u'x' # 0x0078 -> LATIN SMALL LETTER X
u'y' # 0x0079 -> LATIN SMALL LETTER Y
u'z' # 0x007a -> LATIN SMALL LETTER Z
u'{' # 0x007b -> LEFT CURLY BRACKET
u'|' # 0x007c -> VERTICAL LINE
u'}' # 0x007d -> RIGHT CURLY BRACKET
u'~' # 0x007e -> TILDE
u'\x7f' # 0x007f -> DELETE
u'\u0410' # 0x0080 -> CYRILLIC CAPITAL LETTER A
u'\u0411' # 0x0081 -> CYRILLIC CAPITAL LETTER BE
u'\u0412' # 0x0082 -> CYRILLIC CAPITAL LETTER VE
u'\u0413' # 0x0083 -> CYRILLIC CAPITAL LETTER GHE
u'\u0414' # 0x0084 -> CYRILLIC CAPITAL LETTER DE
u'\u0415' # 0x0085 -> CYRILLIC CAPITAL LETTER IE
u'\u0416' # 0x0086 -> CYRILLIC CAPITAL LETTER ZHE
u'\u0417' # 0x0087 -> CYRILLIC CAPITAL LETTER ZE
u'\u0418' # 0x0088 -> CYRILLIC CAPITAL LETTER I
u'\u0419' # 0x0089 -> CYRILLIC CAPITAL LETTER SHORT I
u'\u041a' # 0x008a -> CYRILLIC CAPITAL LETTER KA
u'\u041b' # 0x008b -> CYRILLIC CAPITAL LETTER EL
u'\u041c' # 0x008c -> CYRILLIC CAPITAL LETTER EM
u'\u041d' # 0x008d -> CYRILLIC CAPITAL LETTER EN
u'\u041e' # 0x008e -> CYRILLIC CAPITAL LETTER O
u'\u041f' # 0x008f -> CYRILLIC CAPITAL LETTER PE
u'\u0420' # 0x0090 -> CYRILLIC CAPITAL LETTER ER
u'\u0421' # 0x0091 -> CYRILLIC CAPITAL LETTER ES
u'\u0422' # 0x0092 -> CYRILLIC CAPITAL LETTER TE
u'\u0423' # 0x0093 -> CYRILLIC CAPITAL LETTER U
u'\u0424' # 0x0094 -> CYRILLIC CAPITAL LETTER EF
u'\u0425' # 0x0095 -> CYRILLIC CAPITAL LETTER HA
u'\u0426' # 0x0096 -> CYRILLIC CAPITAL LETTER TSE
u'\u0427' # 0x0097 -> CYRILLIC CAPITAL LETTER CHE
u'\u0428' # 0x0098 -> CYRILLIC CAPITAL LETTER SHA
u'\u0429' # 0x0099 -> CYRILLIC CAPITAL LETTER SHCHA
u'\u042a' # 0x009a -> CYRILLIC CAPITAL LETTER HARD SIGN
u'\u042b' # 0x009b -> CYRILLIC CAPITAL LETTER YERU
u'\u042c' # 0x009c -> CYRILLIC CAPITAL LETTER SOFT SIGN
u'\u042d' # 0x009d -> CYRILLIC CAPITAL LETTER E
u'\u042e' # 0x009e -> CYRILLIC CAPITAL LETTER YU
u'\u042f' # 0x009f -> CYRILLIC CAPITAL LETTER YA
u'\u0430' # 0x00a0 -> CYRILLIC SMALL LETTER A
u'\u0431' # 0x00a1 -> CYRILLIC SMALL LETTER BE
u'\u0432' # 0x00a2 -> CYRILLIC SMALL LETTER VE
u'\u0433' # 0x00a3 -> CYRILLIC SMALL LETTER GHE
u'\u0434' # 0x00a4 -> CYRILLIC SMALL LETTER DE
u'\u0435' # 0x00a5 -> CYRILLIC SMALL LETTER IE
u'\u0436' # 0x00a6 -> CYRILLIC SMALL LETTER ZHE
u'\u0437' # 0x00a7 -> CYRILLIC SMALL LETTER ZE
u'\u0438' # 0x00a8 -> CYRILLIC SMALL LETTER I
u'\u0439' # 0x00a9 -> CYRILLIC SMALL LETTER SHORT I
u'\u043a' # 0x00aa -> CYRILLIC SMALL LETTER KA
u'\u043b' # 0x00ab -> CYRILLIC SMALL LETTER EL
u'\u043c' # 0x00ac -> CYRILLIC SMALL LETTER EM
u'\u043d' # 0x00ad -> CYRILLIC SMALL LETTER EN
u'\u043e' # 0x00ae -> CYRILLIC SMALL LETTER O
u'\u043f' # 0x00af -> CYRILLIC SMALL LETTER PE
u'\u2591' # 0x00b0 -> LIGHT SHADE
u'\u2592' # 0x00b1 -> MEDIUM SHADE
u'\u2593' # 0x00b2 -> DARK SHADE
u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
u'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
u'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
u'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
u'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
u'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
u'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
u'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
u'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
u'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
u'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
u'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
u'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
u'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
u'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2588' # 0x00db -> FULL BLOCK
u'\u2584' # 0x00dc -> LOWER HALF BLOCK
u'\u258c' # 0x00dd -> LEFT HALF BLOCK
u'\u2590' # 0x00de -> RIGHT HALF BLOCK
u'\u2580' # 0x00df -> UPPER HALF BLOCK
u'\u0440' # 0x00e0 -> CYRILLIC SMALL LETTER ER
u'\u0441' # 0x00e1 -> CYRILLIC SMALL LETTER ES
u'\u0442' # 0x00e2 -> CYRILLIC SMALL LETTER TE
u'\u0443' # 0x00e3 -> CYRILLIC SMALL LETTER U
u'\u0444' # 0x00e4 -> CYRILLIC SMALL LETTER EF
u'\u0445' # 0x00e5 -> CYRILLIC SMALL LETTER HA
u'\u0446' # 0x00e6 -> CYRILLIC SMALL LETTER TSE
u'\u0447' # 0x00e7 -> CYRILLIC SMALL LETTER CHE
u'\u0448' # 0x00e8 -> CYRILLIC SMALL LETTER SHA
u'\u0449' # 0x00e9 -> CYRILLIC SMALL LETTER SHCHA
u'\u044a' # 0x00ea -> CYRILLIC SMALL LETTER HARD SIGN
u'\u044b' # 0x00eb -> CYRILLIC SMALL LETTER YERU
u'\u044c' # 0x00ec -> CYRILLIC SMALL LETTER SOFT SIGN
u'\u044d' # 0x00ed -> CYRILLIC SMALL LETTER E
u'\u044e' # 0x00ee -> CYRILLIC SMALL LETTER YU
u'\u044f' # 0x00ef -> CYRILLIC SMALL LETTER YA
u'\u0401' # 0x00f0 -> CYRILLIC CAPITAL LETTER IO
u'\u0451' # 0x00f1 -> CYRILLIC SMALL LETTER IO
u'\u0404' # 0x00f2 -> CYRILLIC CAPITAL LETTER UKRAINIAN IE
u'\u0454' # 0x00f3 -> CYRILLIC SMALL LETTER UKRAINIAN IE
u'\u0407' # 0x00f4 -> CYRILLIC CAPITAL LETTER YI
u'\u0457' # 0x00f5 -> CYRILLIC SMALL LETTER YI
u'\u040e' # 0x00f6 -> CYRILLIC CAPITAL LETTER SHORT U
u'\u045e' # 0x00f7 -> CYRILLIC SMALL LETTER SHORT U
u'\xb0' # 0x00f8 -> DEGREE SIGN
u'\u2219' # 0x00f9 -> BULLET OPERATOR
u'\xb7' # 0x00fa -> MIDDLE DOT
u'\u221a' # 0x00fb -> SQUARE ROOT
u'\u2116' # 0x00fc -> NUMERO SIGN
u'\xa4' # 0x00fd -> CURRENCY SIGN
u'\u25a0' # 0x00fe -> BLACK SQUARE
u'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a4: 0x00fd, # CURRENCY SIGN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x0401: 0x00f0, # CYRILLIC CAPITAL LETTER IO
0x0404: 0x00f2, # CYRILLIC CAPITAL LETTER UKRAINIAN IE
0x0407: 0x00f4, # CYRILLIC CAPITAL LETTER YI
0x040e: 0x00f6, # CYRILLIC CAPITAL LETTER SHORT U
0x0410: 0x0080, # CYRILLIC CAPITAL LETTER A
0x0411: 0x0081, # CYRILLIC CAPITAL LETTER BE
0x0412: 0x0082, # CYRILLIC CAPITAL LETTER VE
0x0413: 0x0083, # CYRILLIC CAPITAL LETTER GHE
0x0414: 0x0084, # CYRILLIC CAPITAL LETTER DE
0x0415: 0x0085, # CYRILLIC CAPITAL LETTER IE
0x0416: 0x0086, # CYRILLIC CAPITAL LETTER ZHE
0x0417: 0x0087, # CYRILLIC CAPITAL LETTER ZE
0x0418: 0x0088, # CYRILLIC CAPITAL LETTER I
0x0419: 0x0089, # CYRILLIC CAPITAL LETTER SHORT I
0x041a: 0x008a, # CYRILLIC CAPITAL LETTER KA
0x041b: 0x008b, # CYRILLIC CAPITAL LETTER EL
0x041c: 0x008c, # CYRILLIC CAPITAL LETTER EM
0x041d: 0x008d, # CYRILLIC CAPITAL LETTER EN
0x041e: 0x008e, # CYRILLIC CAPITAL LETTER O
0x041f: 0x008f, # CYRILLIC CAPITAL LETTER PE
0x0420: 0x0090, # CYRILLIC CAPITAL LETTER ER
0x0421: 0x0091, # CYRILLIC CAPITAL LETTER ES
0x0422: 0x0092, # CYRILLIC CAPITAL LETTER TE
0x0423: 0x0093, # CYRILLIC CAPITAL LETTER U
0x0424: 0x0094, # CYRILLIC CAPITAL LETTER EF
0x0425: 0x0095, # CYRILLIC CAPITAL LETTER HA
0x0426: 0x0096, # CYRILLIC CAPITAL LETTER TSE
0x0427: 0x0097, # CYRILLIC CAPITAL LETTER CHE
0x0428: 0x0098, # CYRILLIC CAPITAL LETTER SHA
0x0429: 0x0099, # CYRILLIC CAPITAL LETTER SHCHA
0x042a: 0x009a, # CYRILLIC CAPITAL LETTER HARD SIGN
0x042b: 0x009b, # CYRILLIC CAPITAL LETTER YERU
0x042c: 0x009c, # CYRILLIC CAPITAL LETTER SOFT SIGN
0x042d: 0x009d, # CYRILLIC CAPITAL LETTER E
0x042e: 0x009e, # CYRILLIC CAPITAL LETTER YU
0x042f: 0x009f, # CYRILLIC CAPITAL LETTER YA
0x0430: 0x00a0, # CYRILLIC SMALL LETTER A
0x0431: 0x00a1, # CYRILLIC SMALL LETTER BE
0x0432: 0x00a2, # CYRILLIC SMALL LETTER VE
0x0433: 0x00a3, # CYRILLIC SMALL LETTER GHE
0x0434: 0x00a4, # CYRILLIC SMALL LETTER DE
0x0435: 0x00a5, # CYRILLIC SMALL LETTER IE
0x0436: 0x00a6, # CYRILLIC SMALL LETTER ZHE
0x0437: 0x00a7, # CYRILLIC SMALL LETTER ZE
0x0438: 0x00a8, # CYRILLIC SMALL LETTER I
0x0439: 0x00a9, # CYRILLIC SMALL LETTER SHORT I
0x043a: 0x00aa, # CYRILLIC SMALL LETTER KA
0x043b: 0x00ab, # CYRILLIC SMALL LETTER EL
0x043c: 0x00ac, # CYRILLIC SMALL LETTER EM
0x043d: 0x00ad, # CYRILLIC SMALL LETTER EN
0x043e: 0x00ae, # CYRILLIC SMALL LETTER O
0x043f: 0x00af, # CYRILLIC SMALL LETTER PE
0x0440: 0x00e0, # CYRILLIC SMALL LETTER ER
0x0441: 0x00e1, # CYRILLIC SMALL LETTER ES
0x0442: 0x00e2, # CYRILLIC SMALL LETTER TE
0x0443: 0x00e3, # CYRILLIC SMALL LETTER U
0x0444: 0x00e4, # CYRILLIC SMALL LETTER EF
0x0445: 0x00e5, # CYRILLIC SMALL LETTER HA
0x0446: 0x00e6, # CYRILLIC SMALL LETTER TSE
0x0447: 0x00e7, # CYRILLIC SMALL LETTER CHE
0x0448: 0x00e8, # CYRILLIC SMALL LETTER SHA
0x0449: 0x00e9, # CYRILLIC SMALL LETTER SHCHA
0x044a: 0x00ea, # CYRILLIC SMALL LETTER HARD SIGN
0x044b: 0x00eb, # CYRILLIC SMALL LETTER YERU
0x044c: 0x00ec, # CYRILLIC SMALL LETTER SOFT SIGN
0x044d: 0x00ed, # CYRILLIC SMALL LETTER E
0x044e: 0x00ee, # CYRILLIC SMALL LETTER YU
0x044f: 0x00ef, # CYRILLIC SMALL LETTER YA
0x0451: 0x00f1, # CYRILLIC SMALL LETTER IO
0x0454: 0x00f3, # CYRILLIC SMALL LETTER UKRAINIAN IE
0x0457: 0x00f5, # CYRILLIC SMALL LETTER YI
0x045e: 0x00f7, # CYRILLIC SMALL LETTER SHORT U
0x2116: 0x00fc, # NUMERO SIGN
0x2219: 0x00f9, # BULLET OPERATOR
0x221a: 0x00fb, # SQUARE ROOT
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| apache-2.0 | 4,318,136,999,309,368,000 | 48.644699 | 97 | 0.611278 | false | 2.941596 | false | false | false |
Simran-B/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/numbers.py | 59 | 10271 | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Abstract Base Classes (ABCs) for numbers, according to PEP 3141.
TODO: Fill out more detailed documentation on the operators."""
from __future__ import division
from abc import ABCMeta, abstractmethod, abstractproperty
__all__ = ["Number", "Complex", "Real", "Rational", "Integral"]
class Number(object):
"""All numbers inherit from this class.
If you just want to check if an argument x is a number, without
caring what kind, use isinstance(x, Number).
"""
__metaclass__ = ABCMeta
__slots__ = ()
# Concrete numeric types must provide their own hash implementation
__hash__ = None
## Notes on Decimal
## ----------------
## Decimal has all of the methods specified by the Real abc, but it should
## not be registered as a Real because decimals do not interoperate with
## binary floats (i.e. Decimal('3.14') + 2.71828 is undefined). But,
## abstract reals are expected to interoperate (i.e. R1 + R2 should be
## expected to work if R1 and R2 are both Reals).
class Complex(Number):
"""Complex defines the operations that work on the builtin complex type.
In short, those are: a conversion to complex, .real, .imag, +, -,
*, /, abs(), .conjugate, ==, and !=.
If it is given heterogenous arguments, and doesn't have special
knowledge about them, it should fall back to the builtin complex
type as described below.
"""
__slots__ = ()
@abstractmethod
def __complex__(self):
"""Return a builtin complex instance. Called for complex(self)."""
# Will be __bool__ in 3.0.
def __nonzero__(self):
"""True if self != 0. Called for bool(self)."""
return self != 0
@abstractproperty
def real(self):
"""Retrieve the real component of this number.
This should subclass Real.
"""
raise NotImplementedError
@abstractproperty
def imag(self):
"""Retrieve the real component of this number.
This should subclass Real.
"""
raise NotImplementedError
@abstractmethod
def __add__(self, other):
"""self + other"""
raise NotImplementedError
@abstractmethod
def __radd__(self, other):
"""other + self"""
raise NotImplementedError
@abstractmethod
def __neg__(self):
"""-self"""
raise NotImplementedError
@abstractmethod
def __pos__(self):
"""+self"""
raise NotImplementedError
def __sub__(self, other):
"""self - other"""
return self + -other
def __rsub__(self, other):
"""other - self"""
return -self + other
@abstractmethod
def __mul__(self, other):
"""self * other"""
raise NotImplementedError
@abstractmethod
def __rmul__(self, other):
"""other * self"""
raise NotImplementedError
@abstractmethod
def __div__(self, other):
"""self / other without __future__ division
May promote to float.
"""
raise NotImplementedError
@abstractmethod
def __rdiv__(self, other):
"""other / self without __future__ division"""
raise NotImplementedError
@abstractmethod
def __truediv__(self, other):
"""self / other with __future__ division.
Should promote to float when necessary.
"""
raise NotImplementedError
@abstractmethod
def __rtruediv__(self, other):
"""other / self with __future__ division"""
raise NotImplementedError
@abstractmethod
def __pow__(self, exponent):
"""self**exponent; should promote to float or complex when necessary."""
raise NotImplementedError
@abstractmethod
def __rpow__(self, base):
"""base ** self"""
raise NotImplementedError
@abstractmethod
def __abs__(self):
"""Returns the Real distance from 0. Called for abs(self)."""
raise NotImplementedError
@abstractmethod
def conjugate(self):
"""(x+y*i).conjugate() returns (x-y*i)."""
raise NotImplementedError
@abstractmethod
def __eq__(self, other):
"""self == other"""
raise NotImplementedError
def __ne__(self, other):
"""self != other"""
# The default __ne__ doesn't negate __eq__ until 3.0.
return not (self == other)
Complex.register(complex)
class Real(Complex):
"""To Complex, Real adds the operations that work on real numbers.
In short, those are: a conversion to float, trunc(), divmod,
%, <, <=, >, and >=.
Real also provides defaults for the derived operations.
"""
__slots__ = ()
@abstractmethod
def __float__(self):
"""Any Real can be converted to a native float object.
Called for float(self)."""
raise NotImplementedError
@abstractmethod
def __trunc__(self):
"""trunc(self): Truncates self to an Integral.
Returns an Integral i such that:
* i>0 iff self>0;
* abs(i) <= abs(self);
* for any Integral j satisfying the first two conditions,
abs(i) >= abs(j) [i.e. i has "maximal" abs among those].
i.e. "truncate towards 0".
"""
raise NotImplementedError
def __divmod__(self, other):
"""divmod(self, other): The pair (self // other, self % other).
Sometimes this can be computed faster than the pair of
operations.
"""
return (self // other, self % other)
def __rdivmod__(self, other):
"""divmod(other, self): The pair (self // other, self % other).
Sometimes this can be computed faster than the pair of
operations.
"""
return (other // self, other % self)
@abstractmethod
def __floordiv__(self, other):
"""self // other: The floor() of self/other."""
raise NotImplementedError
@abstractmethod
def __rfloordiv__(self, other):
"""other // self: The floor() of other/self."""
raise NotImplementedError
@abstractmethod
def __mod__(self, other):
"""self % other"""
raise NotImplementedError
@abstractmethod
def __rmod__(self, other):
"""other % self"""
raise NotImplementedError
@abstractmethod
def __lt__(self, other):
"""self < other
< on Reals defines a total ordering, except perhaps for NaN."""
raise NotImplementedError
@abstractmethod
def __le__(self, other):
"""self <= other"""
raise NotImplementedError
# Concrete implementations of Complex abstract methods.
def __complex__(self):
"""complex(self) == complex(float(self), 0)"""
return complex(float(self))
@property
def real(self):
"""Real numbers are their real component."""
return +self
@property
def imag(self):
"""Real numbers have no imaginary component."""
return 0
def conjugate(self):
"""Conjugate is a no-op for Reals."""
return +self
Real.register(float)
class Rational(Real):
""".numerator and .denominator should be in lowest terms."""
__slots__ = ()
@abstractproperty
def numerator(self):
raise NotImplementedError
@abstractproperty
def denominator(self):
raise NotImplementedError
# Concrete implementation of Real's conversion to float.
def __float__(self):
"""float(self) = self.numerator / self.denominator
It's important that this conversion use the integer's "true"
division rather than casting one side to float before dividing
so that ratios of huge integers convert without overflowing.
"""
return self.numerator / self.denominator
class Integral(Rational):
"""Integral adds a conversion to long and the bit-string operations."""
__slots__ = ()
@abstractmethod
def __long__(self):
"""long(self)"""
raise NotImplementedError
def __index__(self):
"""index(self)"""
return long(self)
@abstractmethod
def __pow__(self, exponent, modulus=None):
"""self ** exponent % modulus, but maybe faster.
Accept the modulus argument if you want to support the
3-argument version of pow(). Raise a TypeError if exponent < 0
or any argument isn't Integral. Otherwise, just implement the
2-argument version described in Complex.
"""
raise NotImplementedError
@abstractmethod
def __lshift__(self, other):
"""self << other"""
raise NotImplementedError
@abstractmethod
def __rlshift__(self, other):
"""other << self"""
raise NotImplementedError
@abstractmethod
def __rshift__(self, other):
"""self >> other"""
raise NotImplementedError
@abstractmethod
def __rrshift__(self, other):
"""other >> self"""
raise NotImplementedError
@abstractmethod
def __and__(self, other):
"""self & other"""
raise NotImplementedError
@abstractmethod
def __rand__(self, other):
"""other & self"""
raise NotImplementedError
@abstractmethod
def __xor__(self, other):
"""self ^ other"""
raise NotImplementedError
@abstractmethod
def __rxor__(self, other):
"""other ^ self"""
raise NotImplementedError
@abstractmethod
def __or__(self, other):
"""self | other"""
raise NotImplementedError
@abstractmethod
def __ror__(self, other):
"""other | self"""
raise NotImplementedError
@abstractmethod
def __invert__(self):
"""~self"""
raise NotImplementedError
# Concrete implementations of Rational and Real abstract methods.
def __float__(self):
"""float(self) == float(long(self))"""
return float(long(self))
@property
def numerator(self):
"""Integers are their own numerators."""
return +self
@property
def denominator(self):
"""Integers have a denominator of 1."""
return 1
Integral.register(int)
Integral.register(long)
| apache-2.0 | 3,724,814,421,603,377,700 | 25.268542 | 80 | 0.595852 | false | 4.700686 | false | false | false |
mgracer48/panda3d | direct/src/distributed/MsgTypes.py | 5 | 5594 | """MsgTypes module: contains distributed object message types"""
from direct.showbase.PythonUtil import invertDictLossless
MsgName2Id = {
# 2 new params: passwd, char bool 0/1 1 = new account
# 2 new return values: 129 = not found, 12 = bad passwd,
'CLIENT_LOGIN': 1,
'CLIENT_LOGIN_RESP': 2,
'CLIENT_GET_AVATARS': 3,
# Sent by the server when it is dropping the connection deliberately.
'CLIENT_GO_GET_LOST': 4,
'CLIENT_GET_AVATARS_RESP': 5,
'CLIENT_CREATE_AVATAR': 6,
'CLIENT_CREATE_AVATAR_RESP': 7,
'CLIENT_GET_FRIEND_LIST': 10,
'CLIENT_GET_FRIEND_LIST_RESP': 11,
'CLIENT_GET_AVATAR_DETAILS': 14,
'CLIENT_GET_AVATAR_DETAILS_RESP': 15,
'CLIENT_LOGIN_2': 16,
'CLIENT_LOGIN_2_RESP': 17,
'CLIENT_OBJECT_UPDATE_FIELD': 24,
'CLIENT_OBJECT_UPDATE_FIELD_RESP': 24,
'CLIENT_OBJECT_DISABLE': 25,
'CLIENT_OBJECT_DISABLE_RESP': 25,
'CLIENT_OBJECT_DISABLE_OWNER': 26,
'CLIENT_OBJECT_DISABLE_OWNER_RESP': 26,
'CLIENT_OBJECT_DELETE': 27,
'CLIENT_OBJECT_DELETE_RESP': 27,
'CLIENT_SET_ZONE_CMU': 29,
'CLIENT_REMOVE_ZONE': 30,
'CLIENT_SET_AVATAR': 32,
'CLIENT_CREATE_OBJECT_REQUIRED': 34,
'CLIENT_CREATE_OBJECT_REQUIRED_RESP': 34,
'CLIENT_CREATE_OBJECT_REQUIRED_OTHER': 35,
'CLIENT_CREATE_OBJECT_REQUIRED_OTHER_RESP': 35,
'CLIENT_CREATE_OBJECT_REQUIRED_OTHER_OWNER': 36,
'CLIENT_CREATE_OBJECT_REQUIRED_OTHER_OWNER_RESP':36,
'CLIENT_REQUEST_GENERATES': 36,
'CLIENT_DISCONNECT': 37,
'CLIENT_GET_STATE_RESP': 47,
'CLIENT_DONE_INTEREST_RESP': 48,
'CLIENT_DELETE_AVATAR': 49,
'CLIENT_DELETE_AVATAR_RESP': 5,
'CLIENT_HEARTBEAT': 52,
'CLIENT_FRIEND_ONLINE': 53,
'CLIENT_FRIEND_OFFLINE': 54,
'CLIENT_REMOVE_FRIEND': 56,
'CLIENT_CHANGE_PASSWORD': 65,
'CLIENT_SET_NAME_PATTERN': 67,
'CLIENT_SET_NAME_PATTERN_ANSWER': 68,
'CLIENT_SET_WISHNAME': 70,
'CLIENT_SET_WISHNAME_RESP': 71,
'CLIENT_SET_WISHNAME_CLEAR': 72,
'CLIENT_SET_SECURITY': 73,
'CLIENT_SET_DOID_RANGE': 74,
'CLIENT_GET_AVATARS_RESP2': 75,
'CLIENT_CREATE_AVATAR2': 76,
'CLIENT_SYSTEM_MESSAGE': 78,
'CLIENT_SET_AVTYPE': 80,
'CLIENT_GET_PET_DETAILS': 81,
'CLIENT_GET_PET_DETAILS_RESP': 82,
'CLIENT_ADD_INTEREST': 97,
'CLIENT_REMOVE_INTEREST': 99,
'CLIENT_OBJECT_LOCATION': 102,
'CLIENT_LOGIN_3': 111,
'CLIENT_LOGIN_3_RESP': 110,
'CLIENT_GET_FRIEND_LIST_EXTENDED': 115,
'CLIENT_GET_FRIEND_LIST_EXTENDED_RESP': 116,
'CLIENT_SET_FIELD_SENDABLE': 120,
'CLIENT_SYSTEMMESSAGE_AKNOWLEDGE': 123,
'CLIENT_CHANGE_GENERATE_ORDER': 124,
# new toontown specific login message, adds last logged in, and if child account has parent acount
'CLIENT_LOGIN_TOONTOWN': 125,
'CLIENT_LOGIN_TOONTOWN_RESP': 126,
'STATESERVER_OBJECT_GENERATE_WITH_REQUIRED': 2001,
'STATESERVER_OBJECT_GENERATE_WITH_REQUIRED_OTHER': 2003,
'STATESERVER_OBJECT_UPDATE_FIELD': 2004,
'STATESERVER_OBJECT_CREATE_WITH_REQUIRED_CONTEXT': 2050,
'STATESERVER_OBJECT_CREATE_WITH_REQUIR_OTHER_CONTEXT': 2051,
'STATESERVER_BOUNCE_MESSAGE': 2086,
}
# create id->name table for debugging
MsgId2Names = invertDictLossless(MsgName2Id)
# put msg names in module scope, assigned to msg value
globals().update(MsgName2Id)
# These messages are ignored when the client is headed to the quiet zone
QUIET_ZONE_IGNORED_LIST = [
# We mustn't ignore updates, because some updates for localToon
# are always important.
#CLIENT_OBJECT_UPDATE_FIELD,
# These are now handled. If it is a create for a class that is in the
# uber zone, we should create it.
#CLIENT_CREATE_OBJECT_REQUIRED,
#CLIENT_CREATE_OBJECT_REQUIRED_OTHER,
]
# The following is a different set of numbers from above.
# These are the sub-message types for CLIENT_LOGIN_2.
CLIENT_LOGIN_2_GREEN = 1 # Disney's GoReg subscription token, not used.
CLIENT_LOGIN_2_PLAY_TOKEN = 2 # VR Studio PlayToken.
CLIENT_LOGIN_2_BLUE = 3 # The international GoReg token.
CLIENT_LOGIN_3_DISL_TOKEN = 4 # SSL encoded blob from DISL system.
| bsd-3-clause | -4,714,705,701,539,997,000 | 41.70229 | 102 | 0.499821 | false | 3.958953 | false | false | false |
Bulochkin/tensorflow_pack | tensorflow/python/kernel_tests/distributions/multinomial_test.py | 48 | 13141 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import multinomial
from tensorflow.python.platform import test
class MultinomialTest(test.TestCase):
def setUp(self):
self._rng = np.random.RandomState(42)
def testSimpleShapes(self):
with self.test_session():
p = [.1, .3, .6]
dist = multinomial.Multinomial(total_count=1., probs=p)
self.assertEqual(3, dist.event_shape_tensor().eval())
self.assertAllEqual([], dist.batch_shape_tensor().eval())
self.assertEqual(tensor_shape.TensorShape([3]), dist.event_shape)
self.assertEqual(tensor_shape.TensorShape([]), dist.batch_shape)
def testComplexShapes(self):
with self.test_session():
p = 0.5 * np.ones([3, 2, 2], dtype=np.float32)
n = [[3., 2], [4, 5], [6, 7]]
dist = multinomial.Multinomial(total_count=n, probs=p)
self.assertEqual(2, dist.event_shape_tensor().eval())
self.assertAllEqual([3, 2], dist.batch_shape_tensor().eval())
self.assertEqual(tensor_shape.TensorShape([2]), dist.event_shape)
self.assertEqual(tensor_shape.TensorShape([3, 2]), dist.batch_shape)
def testN(self):
p = [[0.1, 0.2, 0.7], [0.2, 0.3, 0.5]]
n = [[3.], [4]]
with self.test_session():
dist = multinomial.Multinomial(total_count=n, probs=p)
self.assertEqual((2, 1), dist.total_count.get_shape())
self.assertAllClose(n, dist.total_count.eval())
def testP(self):
p = [[0.1, 0.2, 0.7]]
with self.test_session():
dist = multinomial.Multinomial(total_count=3., probs=p)
self.assertEqual((1, 3), dist.probs.get_shape())
self.assertEqual((1, 3), dist.logits.get_shape())
self.assertAllClose(p, dist.probs.eval())
def testLogits(self):
p = np.array([[0.1, 0.2, 0.7]], dtype=np.float32)
logits = np.log(p) - 50.
with self.test_session():
multinom = multinomial.Multinomial(total_count=3., logits=logits)
self.assertEqual((1, 3), multinom.probs.get_shape())
self.assertEqual((1, 3), multinom.logits.get_shape())
self.assertAllClose(p, multinom.probs.eval())
self.assertAllClose(logits, multinom.logits.eval())
def testPmfandCountsAgree(self):
p = [[0.1, 0.2, 0.7]]
n = [[5.]]
with self.test_session():
dist = multinomial.Multinomial(total_count=n, probs=p, validate_args=True)
dist.prob([2., 3, 0]).eval()
dist.prob([3., 0, 2]).eval()
with self.assertRaisesOpError("must be non-negative"):
dist.prob([-1., 4, 2]).eval()
with self.assertRaisesOpError("counts must sum to `self.total_count`"):
dist.prob([3., 3, 0]).eval()
def testPmfNonIntegerCounts(self):
p = [[0.1, 0.2, 0.7]]
n = [[5.]]
with self.test_session():
# No errors with integer n.
multinom = multinomial.Multinomial(
total_count=n, probs=p, validate_args=True)
multinom.prob([2., 1, 2]).eval()
multinom.prob([3., 0, 2]).eval()
# Counts don't sum to n.
with self.assertRaisesOpError("counts must sum to `self.total_count`"):
multinom.prob([2., 3, 2]).eval()
# Counts are non-integers.
x = array_ops.placeholder(dtypes.float32)
with self.assertRaisesOpError(
"cannot contain fractional components."):
multinom.prob(x).eval(feed_dict={x: [1.0, 2.5, 1.5]})
multinom = multinomial.Multinomial(
total_count=n, probs=p, validate_args=False)
multinom.prob([1., 2., 2.]).eval()
# Non-integer arguments work.
multinom.prob([1.0, 2.5, 1.5]).eval()
def testPmfBothZeroBatches(self):
with self.test_session():
# Both zero-batches. No broadcast
p = [0.5, 0.5]
counts = [1., 0]
pmf = multinomial.Multinomial(total_count=1., probs=p).prob(counts)
self.assertAllClose(0.5, pmf.eval())
self.assertEqual((), pmf.get_shape())
def testPmfBothZeroBatchesNontrivialN(self):
with self.test_session():
# Both zero-batches. No broadcast
p = [0.1, 0.9]
counts = [3., 2]
dist = multinomial.Multinomial(total_count=5., probs=p)
pmf = dist.prob(counts)
# 5 choose 3 = 5 choose 2 = 10. 10 * (.9)^2 * (.1)^3 = 81/10000.
self.assertAllClose(81. / 10000, pmf.eval())
self.assertEqual((), pmf.get_shape())
def testPmfPStretchedInBroadcastWhenSameRank(self):
with self.test_session():
p = [[0.1, 0.9]]
counts = [[1., 0], [0, 1]]
pmf = multinomial.Multinomial(total_count=1., probs=p).prob(counts)
self.assertAllClose([0.1, 0.9], pmf.eval())
self.assertEqual((2), pmf.get_shape())
def testPmfPStretchedInBroadcastWhenLowerRank(self):
with self.test_session():
p = [0.1, 0.9]
counts = [[1., 0], [0, 1]]
pmf = multinomial.Multinomial(total_count=1., probs=p).prob(counts)
self.assertAllClose([0.1, 0.9], pmf.eval())
self.assertEqual((2), pmf.get_shape())
def testPmfCountsStretchedInBroadcastWhenSameRank(self):
with self.test_session():
p = [[0.1, 0.9], [0.7, 0.3]]
counts = [[1., 0]]
pmf = multinomial.Multinomial(total_count=1., probs=p).prob(counts)
self.assertAllClose(pmf.eval(), [0.1, 0.7])
self.assertEqual((2), pmf.get_shape())
def testPmfCountsStretchedInBroadcastWhenLowerRank(self):
with self.test_session():
p = [[0.1, 0.9], [0.7, 0.3]]
counts = [1., 0]
pmf = multinomial.Multinomial(total_count=1., probs=p).prob(counts)
self.assertAllClose(pmf.eval(), [0.1, 0.7])
self.assertEqual(pmf.get_shape(), (2))
def testPmfShapeCountsStretchedN(self):
with self.test_session():
# [2, 2, 2]
p = [[[0.1, 0.9], [0.1, 0.9]], [[0.7, 0.3], [0.7, 0.3]]]
# [2, 2]
n = [[3., 3], [3, 3]]
# [2]
counts = [2., 1]
pmf = multinomial.Multinomial(total_count=n, probs=p).prob(counts)
pmf.eval()
self.assertEqual(pmf.get_shape(), (2, 2))
def testPmfShapeCountsPStretchedN(self):
with self.test_session():
p = [0.1, 0.9]
counts = [3., 2]
n = np.full([4, 3], 5., dtype=np.float32)
pmf = multinomial.Multinomial(total_count=n, probs=p).prob(counts)
pmf.eval()
self.assertEqual((4, 3), pmf.get_shape())
def testMultinomialMean(self):
with self.test_session():
n = 5.
p = [0.1, 0.2, 0.7]
dist = multinomial.Multinomial(total_count=n, probs=p)
expected_means = 5 * np.array(p, dtype=np.float32)
self.assertEqual((3,), dist.mean().get_shape())
self.assertAllClose(expected_means, dist.mean().eval())
def testMultinomialCovariance(self):
with self.test_session():
n = 5.
p = [0.1, 0.2, 0.7]
dist = multinomial.Multinomial(total_count=n, probs=p)
expected_covariances = [[9. / 20, -1 / 10, -7 / 20],
[-1 / 10, 4 / 5, -7 / 10],
[-7 / 20, -7 / 10, 21 / 20]]
self.assertEqual((3, 3), dist.covariance().get_shape())
self.assertAllClose(expected_covariances, dist.covariance().eval())
def testMultinomialCovarianceBatch(self):
with self.test_session():
# Shape [2]
n = [5.] * 2
# Shape [4, 1, 2]
p = [[[0.1, 0.9]], [[0.1, 0.9]]] * 2
dist = multinomial.Multinomial(total_count=n, probs=p)
# Shape [2, 2]
inner_var = [[9. / 20, -9 / 20], [-9 / 20, 9 / 20]]
# Shape [4, 2, 2, 2]
expected_covariances = [[inner_var, inner_var]] * 4
self.assertEqual((4, 2, 2, 2), dist.covariance().get_shape())
self.assertAllClose(expected_covariances, dist.covariance().eval())
def testCovarianceMultidimensional(self):
# Shape [3, 5, 4]
p = np.random.dirichlet([.25, .25, .25, .25], [3, 5]).astype(np.float32)
# Shape [6, 3, 3]
p2 = np.random.dirichlet([.3, .3, .4], [6, 3]).astype(np.float32)
ns = np.random.randint(low=1, high=11, size=[3, 5]).astype(np.float32)
ns2 = np.random.randint(low=1, high=11, size=[6, 1]).astype(np.float32)
with self.test_session():
dist = multinomial.Multinomial(ns, p)
dist2 = multinomial.Multinomial(ns2, p2)
covariance = dist.covariance()
covariance2 = dist2.covariance()
self.assertEqual((3, 5, 4, 4), covariance.get_shape())
self.assertEqual((6, 3, 3, 3), covariance2.get_shape())
def testCovarianceFromSampling(self):
# We will test mean, cov, var, stddev on a DirichletMultinomial constructed
# via broadcast between alpha, n.
theta = np.array([[1., 2, 3],
[2.5, 4, 0.01]], dtype=np.float32)
theta /= np.sum(theta, 1)[..., array_ops.newaxis]
# Ideally we'd be able to test broadcasting but, the multinomial sampler
# doesn't support different total counts.
n = np.float32(5)
with self.test_session() as sess:
# batch_shape=[2], event_shape=[3]
dist = multinomial.Multinomial(n, theta)
x = dist.sample(int(250e3), seed=1)
sample_mean = math_ops.reduce_mean(x, 0)
x_centered = x - sample_mean[array_ops.newaxis, ...]
sample_cov = math_ops.reduce_mean(math_ops.matmul(
x_centered[..., array_ops.newaxis],
x_centered[..., array_ops.newaxis, :]), 0)
sample_var = array_ops.matrix_diag_part(sample_cov)
sample_stddev = math_ops.sqrt(sample_var)
[
sample_mean_,
sample_cov_,
sample_var_,
sample_stddev_,
analytic_mean,
analytic_cov,
analytic_var,
analytic_stddev,
] = sess.run([
sample_mean,
sample_cov,
sample_var,
sample_stddev,
dist.mean(),
dist.covariance(),
dist.variance(),
dist.stddev(),
])
self.assertAllClose(sample_mean_, analytic_mean, atol=0., rtol=0.01)
self.assertAllClose(sample_cov_, analytic_cov, atol=0., rtol=0.01)
self.assertAllClose(sample_var_, analytic_var, atol=0., rtol=0.01)
self.assertAllClose(sample_stddev_, analytic_stddev, atol=0., rtol=0.01)
def testSampleUnbiasedNonScalarBatch(self):
with self.test_session() as sess:
dist = multinomial.Multinomial(
total_count=5.,
logits=math_ops.log(2. * self._rng.rand(4, 3, 2).astype(np.float32)))
n = int(3e3)
x = dist.sample(n, seed=0)
sample_mean = math_ops.reduce_mean(x, 0)
# Cyclically rotate event dims left.
x_centered = array_ops.transpose(x - sample_mean, [1, 2, 3, 0])
sample_covariance = math_ops.matmul(
x_centered, x_centered, adjoint_b=True) / n
[
sample_mean_,
sample_covariance_,
actual_mean_,
actual_covariance_,
] = sess.run([
sample_mean,
sample_covariance,
dist.mean(),
dist.covariance(),
])
self.assertAllEqual([4, 3, 2], sample_mean.get_shape())
self.assertAllClose(actual_mean_, sample_mean_, atol=0., rtol=0.07)
self.assertAllEqual([4, 3, 2, 2], sample_covariance.get_shape())
self.assertAllClose(
actual_covariance_, sample_covariance_, atol=0., rtol=0.10)
def testSampleUnbiasedScalarBatch(self):
with self.test_session() as sess:
dist = multinomial.Multinomial(
total_count=5.,
logits=math_ops.log(2. * self._rng.rand(4).astype(np.float32)))
n = int(5e3)
x = dist.sample(n, seed=0)
sample_mean = math_ops.reduce_mean(x, 0)
x_centered = x - sample_mean # Already transposed to [n, 2].
sample_covariance = math_ops.matmul(
x_centered, x_centered, adjoint_a=True) / n
[
sample_mean_,
sample_covariance_,
actual_mean_,
actual_covariance_,
] = sess.run([
sample_mean,
sample_covariance,
dist.mean(),
dist.covariance(),
])
self.assertAllEqual([4], sample_mean.get_shape())
self.assertAllClose(actual_mean_, sample_mean_, atol=0., rtol=0.07)
self.assertAllEqual([4, 4], sample_covariance.get_shape())
self.assertAllClose(
actual_covariance_, sample_covariance_, atol=0., rtol=0.10)
if __name__ == "__main__":
test.main()
| apache-2.0 | -627,194,293,295,146,100 | 37.311953 | 80 | 0.602542 | false | 3.263223 | true | false | false |
sebmarchand/syzygy | third_party/numpy/files/numpy/distutils/fcompiler/gnu.py | 85 | 14383 | import re
import os
import sys
import warnings
import platform
import tempfile
from subprocess import Popen, PIPE, STDOUT
from numpy.distutils.cpuinfo import cpu
from numpy.distutils.fcompiler import FCompiler
from numpy.distutils.exec_command import exec_command
from numpy.distutils.misc_util import msvc_runtime_library
from numpy.distutils.compat import get_exception
compilers = ['GnuFCompiler', 'Gnu95FCompiler']
TARGET_R = re.compile("Target: ([a-zA-Z0-9_\-]*)")
# XXX: handle cross compilation
def is_win64():
return sys.platform == "win32" and platform.architecture()[0] == "64bit"
if is_win64():
#_EXTRAFLAGS = ["-fno-leading-underscore"]
_EXTRAFLAGS = []
else:
_EXTRAFLAGS = []
class GnuFCompiler(FCompiler):
compiler_type = 'gnu'
compiler_aliases = ('g77',)
description = 'GNU Fortran 77 compiler'
def gnu_version_match(self, version_string):
"""Handle the different versions of GNU fortran compilers"""
m = re.match(r'GNU Fortran', version_string)
if not m:
return None
m = re.match(r'GNU Fortran\s+95.*?([0-9-.]+)', version_string)
if m:
return ('gfortran', m.group(1))
m = re.match(r'GNU Fortran.*?([0-9-.]+)', version_string)
if m:
v = m.group(1)
if v.startswith('0') or v.startswith('2') or v.startswith('3'):
# the '0' is for early g77's
return ('g77', v)
else:
# at some point in the 4.x series, the ' 95' was dropped
# from the version string
return ('gfortran', v)
def version_match(self, version_string):
v = self.gnu_version_match(version_string)
if not v or v[0] != 'g77':
return None
return v[1]
# 'g77 --version' results
# SunOS: GNU Fortran (GCC 3.2) 3.2 20020814 (release)
# Debian: GNU Fortran (GCC) 3.3.3 20040110 (prerelease) (Debian)
# GNU Fortran (GCC) 3.3.3 (Debian 20040401)
# GNU Fortran 0.5.25 20010319 (prerelease)
# Redhat: GNU Fortran (GCC 3.2.2 20030222 (Red Hat Linux 3.2.2-5)) 3.2.2 20030222 (Red Hat Linux 3.2.2-5)
# GNU Fortran (GCC) 3.4.2 (mingw-special)
possible_executables = ['g77', 'f77']
executables = {
'version_cmd' : [None, "--version"],
'compiler_f77' : [None, "-g", "-Wall", "-fno-second-underscore"],
'compiler_f90' : None, # Use --fcompiler=gnu95 for f90 codes
'compiler_fix' : None,
'linker_so' : [None, "-g", "-Wall"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"],
'linker_exe' : [None, "-g", "-Wall"]
}
module_dir_switch = None
module_include_switch = None
# Cygwin: f771: warning: -fPIC ignored for target (all code is
# position independent)
if os.name != 'nt' and sys.platform != 'cygwin':
pic_flags = ['-fPIC']
# use -mno-cygwin for g77 when Python is not Cygwin-Python
if sys.platform == 'win32':
for key in ['version_cmd', 'compiler_f77', 'linker_so', 'linker_exe']:
executables[key].append('-mno-cygwin')
g2c = 'g2c'
suggested_f90_compiler = 'gnu95'
#def get_linker_so(self):
# # win32 linking should be handled by standard linker
# # Darwin g77 cannot be used as a linker.
# #if re.match(r'(darwin)', sys.platform):
# # return
# return FCompiler.get_linker_so(self)
def get_flags_linker_so(self):
opt = self.linker_so[1:]
if sys.platform=='darwin':
target = os.environ.get('MACOSX_DEPLOYMENT_TARGET', None)
# If MACOSX_DEPLOYMENT_TARGET is set, we simply trust the value
# and leave it alone. But, distutils will complain if the
# environment's value is different from the one in the Python
# Makefile used to build Python. We let disutils handle this
# error checking.
if not target:
# If MACOSX_DEPLOYMENT_TARGET is not set in the environment,
# we try to get it first from the Python Makefile and then we
# fall back to setting it to 10.3 to maximize the set of
# versions we can work with. This is a reasonable default
# even when using the official Python dist and those derived
# from it.
import distutils.sysconfig as sc
g = {}
filename = sc.get_makefile_filename()
sc.parse_makefile(filename, g)
target = g.get('MACOSX_DEPLOYMENT_TARGET', '10.3')
os.environ['MACOSX_DEPLOYMENT_TARGET'] = target
if target == '10.3':
s = 'Env. variable MACOSX_DEPLOYMENT_TARGET set to 10.3'
warnings.warn(s)
opt.extend(['-undefined', 'dynamic_lookup', '-bundle'])
else:
opt.append("-shared")
if sys.platform.startswith('sunos'):
# SunOS often has dynamically loaded symbols defined in the
# static library libg2c.a The linker doesn't like this. To
# ignore the problem, use the -mimpure-text flag. It isn't
# the safest thing, but seems to work. 'man gcc' says:
# ".. Instead of using -mimpure-text, you should compile all
# source code with -fpic or -fPIC."
opt.append('-mimpure-text')
return opt
def get_libgcc_dir(self):
status, output = exec_command(self.compiler_f77 +
['-print-libgcc-file-name'],
use_tee=0)
if not status:
return os.path.dirname(output)
return None
def get_library_dirs(self):
opt = []
if sys.platform[:5] != 'linux':
d = self.get_libgcc_dir()
if d:
# if windows and not cygwin, libg2c lies in a different folder
if sys.platform == 'win32' and not d.startswith('/usr/lib'):
d = os.path.normpath(d)
if not os.path.exists(os.path.join(d, "lib%s.a" % self.g2c)):
d2 = os.path.abspath(os.path.join(d,
'../../../../lib'))
if os.path.exists(os.path.join(d2, "lib%s.a" % self.g2c)):
opt.append(d2)
opt.append(d)
return opt
def get_libraries(self):
opt = []
d = self.get_libgcc_dir()
if d is not None:
g2c = self.g2c + '-pic'
f = self.static_lib_format % (g2c, self.static_lib_extension)
if not os.path.isfile(os.path.join(d,f)):
g2c = self.g2c
else:
g2c = self.g2c
if g2c is not None:
opt.append(g2c)
c_compiler = self.c_compiler
if sys.platform == 'win32' and c_compiler and \
c_compiler.compiler_type=='msvc':
# the following code is not needed (read: breaks) when using MinGW
# in case want to link F77 compiled code with MSVC
opt.append('gcc')
runtime_lib = msvc_runtime_library()
if runtime_lib:
opt.append(runtime_lib)
if sys.platform == 'darwin':
opt.append('cc_dynamic')
return opt
def get_flags_debug(self):
return ['-g']
def get_flags_opt(self):
v = self.get_version()
if v and v<='3.3.3':
# With this compiler version building Fortran BLAS/LAPACK
# with -O3 caused failures in lib.lapack heevr,syevr tests.
opt = ['-O2']
else:
opt = ['-O3']
opt.append('-funroll-loops')
return opt
def _c_arch_flags(self):
""" Return detected arch flags from CFLAGS """
from distutils import sysconfig
try:
cflags = sysconfig.get_config_vars()['CFLAGS']
except KeyError:
return []
arch_re = re.compile(r"-arch\s+(\w+)")
arch_flags = []
for arch in arch_re.findall(cflags):
arch_flags += ['-arch', arch]
return arch_flags
def get_flags_arch(self):
return []
class Gnu95FCompiler(GnuFCompiler):
compiler_type = 'gnu95'
compiler_aliases = ('gfortran',)
description = 'GNU Fortran 95 compiler'
def version_match(self, version_string):
v = self.gnu_version_match(version_string)
if not v or v[0] != 'gfortran':
return None
v = v[1]
if v>='4.':
# gcc-4 series releases do not support -mno-cygwin option
pass
else:
# use -mno-cygwin flag for gfortran when Python is not Cygwin-Python
if sys.platform == 'win32':
for key in ['version_cmd', 'compiler_f77', 'compiler_f90',
'compiler_fix', 'linker_so', 'linker_exe']:
self.executables[key].append('-mno-cygwin')
return v
# 'gfortran --version' results:
# XXX is the below right?
# Debian: GNU Fortran 95 (GCC 4.0.3 20051023 (prerelease) (Debian 4.0.2-3))
# GNU Fortran 95 (GCC) 4.1.2 20061115 (prerelease) (Debian 4.1.1-21)
# OS X: GNU Fortran 95 (GCC) 4.1.0
# GNU Fortran 95 (GCC) 4.2.0 20060218 (experimental)
# GNU Fortran (GCC) 4.3.0 20070316 (experimental)
possible_executables = ['gfortran', 'f95']
executables = {
'version_cmd' : ["<F90>", "--version"],
'compiler_f77' : [None, "-Wall", "-ffixed-form",
"-fno-second-underscore"] + _EXTRAFLAGS,
'compiler_f90' : [None, "-Wall", "-fno-second-underscore"] + _EXTRAFLAGS,
'compiler_fix' : [None, "-Wall", "-ffixed-form",
"-fno-second-underscore"] + _EXTRAFLAGS,
'linker_so' : ["<F90>", "-Wall"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"],
'linker_exe' : [None, "-Wall"]
}
module_dir_switch = '-J'
module_include_switch = '-I'
g2c = 'gfortran'
def _universal_flags(self, cmd):
"""Return a list of -arch flags for every supported architecture."""
if not sys.platform == 'darwin':
return []
arch_flags = []
# get arches the C compiler gets.
c_archs = self._c_arch_flags()
if "i386" in c_archs:
c_archs[c_archs.index("i386")] = "i686"
# check the arches the Fortran compiler supports, and compare with
# arch flags from C compiler
for arch in ["ppc", "i686", "x86_64", "ppc64"]:
if _can_target(cmd, arch) and arch in c_archs:
arch_flags.extend(["-arch", arch])
return arch_flags
def get_flags(self):
flags = GnuFCompiler.get_flags(self)
arch_flags = self._universal_flags(self.compiler_f90)
if arch_flags:
flags[:0] = arch_flags
return flags
def get_flags_linker_so(self):
flags = GnuFCompiler.get_flags_linker_so(self)
arch_flags = self._universal_flags(self.linker_so)
if arch_flags:
flags[:0] = arch_flags
return flags
def get_library_dirs(self):
opt = GnuFCompiler.get_library_dirs(self)
if sys.platform == 'win32':
c_compiler = self.c_compiler
if c_compiler and c_compiler.compiler_type == "msvc":
target = self.get_target()
if target:
d = os.path.normpath(self.get_libgcc_dir())
root = os.path.join(d, os.pardir, os.pardir, os.pardir, os.pardir)
mingwdir = os.path.normpath(os.path.join(root, target, "lib"))
full = os.path.join(mingwdir, "libmingwex.a")
if os.path.exists(full):
opt.append(mingwdir)
return opt
def get_libraries(self):
opt = GnuFCompiler.get_libraries(self)
if sys.platform == 'darwin':
opt.remove('cc_dynamic')
if sys.platform == 'win32':
c_compiler = self.c_compiler
if c_compiler and c_compiler.compiler_type == "msvc":
if "gcc" in opt:
i = opt.index("gcc")
opt.insert(i+1, "mingwex")
opt.insert(i+1, "mingw32")
# XXX: fix this mess, does not work for mingw
if is_win64():
c_compiler = self.c_compiler
if c_compiler and c_compiler.compiler_type == "msvc":
return []
else:
raise NotImplementedError("Only MS compiler supported with gfortran on win64")
return opt
def get_target(self):
status, output = exec_command(self.compiler_f77 +
['-v'],
use_tee=0)
if not status:
m = TARGET_R.search(output)
if m:
return m.group(1)
return ""
def get_flags_opt(self):
if is_win64():
return ['-O0']
else:
return GnuFCompiler.get_flags_opt(self)
def _can_target(cmd, arch):
"""Return true is the command supports the -arch flag for the given
architecture."""
newcmd = cmd[:]
fid, filename = tempfile.mkstemp(suffix=".f")
try:
d = os.path.dirname(filename)
output = os.path.splitext(filename)[0] + ".o"
try:
newcmd.extend(["-arch", arch, "-c", filename])
p = Popen(newcmd, stderr=STDOUT, stdout=PIPE, cwd=d)
p.communicate()
return p.returncode == 0
finally:
if os.path.exists(output):
os.remove(output)
finally:
os.remove(filename)
return False
if __name__ == '__main__':
from distutils import log
log.set_verbosity(2)
compiler = GnuFCompiler()
compiler.customize()
print(compiler.get_version())
raw_input('Press ENTER to continue...')
try:
compiler = Gnu95FCompiler()
compiler.customize()
print(compiler.get_version())
except Exception:
msg = get_exception()
print(msg)
raw_input('Press ENTER to continue...')
| apache-2.0 | -6,499,297,468,068,734,000 | 36.455729 | 109 | 0.538761 | false | 3.750456 | false | false | false |
KirtoXX/Security_Camera | ssd_mobilenet/object_detection/models/faster_rcnn_inception_resnet_v2_feature_extractor.py | 13 | 9042 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Inception Resnet v2 Faster R-CNN implementation.
See "Inception-v4, Inception-ResNet and the Impact of Residual Connections on
Learning" by Szegedy et al. (https://arxiv.org/abs/1602.07261)
as well as
"Speed/accuracy trade-offs for modern convolutional object detectors" by
Huang et al. (https://arxiv.org/abs/1611.10012)
"""
import tensorflow as tf
from object_detection.meta_architectures import faster_rcnn_meta_arch
from nets import inception_resnet_v2
slim = tf.contrib.slim
class FasterRCNNInceptionResnetV2FeatureExtractor(
faster_rcnn_meta_arch.FasterRCNNFeatureExtractor):
"""Faster R-CNN with Inception Resnet v2 feature extractor implementation."""
def __init__(self,
is_training,
first_stage_features_stride,
reuse_weights=None,
weight_decay=0.0):
"""Constructor.
Args:
is_training: See base class.
first_stage_features_stride: See base class.
reuse_weights: See base class.
weight_decay: See base class.
Raises:
ValueError: If `first_stage_features_stride` is not 8 or 16.
"""
if first_stage_features_stride != 8 and first_stage_features_stride != 16:
raise ValueError('`first_stage_features_stride` must be 8 or 16.')
super(FasterRCNNInceptionResnetV2FeatureExtractor, self).__init__(
is_training, first_stage_features_stride, reuse_weights, weight_decay)
def preprocess(self, resized_inputs):
"""Faster R-CNN with Inception Resnet v2 preprocessing.
Maps pixel values to the range [-1, 1].
Args:
resized_inputs: A [batch, height_in, width_in, channels] float32 tensor
representing a batch of images with values between 0 and 255.0.
Returns:
preprocessed_inputs: A [batch, height_out, width_out, channels] float32
tensor representing a batch of images.
"""
return (2.0 / 255.0) * resized_inputs - 1.0
def _extract_proposal_features(self, preprocessed_inputs, scope):
"""Extracts first stage RPN features.
Extracts features using the first half of the Inception Resnet v2 network.
We construct the network in `align_feature_maps=True` mode, which means
that all VALID paddings in the network are changed to SAME padding so that
the feature maps are aligned.
Args:
preprocessed_inputs: A [batch, height, width, channels] float32 tensor
representing a batch of images.
scope: A scope name.
Returns:
rpn_feature_map: A tensor with shape [batch, height, width, depth]
Raises:
InvalidArgumentError: If the spatial size of `preprocessed_inputs`
(height or width) is less than 33.
ValueError: If the created network is missing the required activation.
"""
if len(preprocessed_inputs.get_shape().as_list()) != 4:
raise ValueError('`preprocessed_inputs` must be 4 dimensional, got a '
'tensor of shape %s' % preprocessed_inputs.get_shape())
with slim.arg_scope(inception_resnet_v2.inception_resnet_v2_arg_scope(
weight_decay=self._weight_decay)):
# Forces is_training to False to disable batch norm update.
with slim.arg_scope([slim.batch_norm], is_training=False):
with tf.variable_scope('InceptionResnetV2',
reuse=self._reuse_weights) as scope:
rpn_feature_map, _ = (
inception_resnet_v2.inception_resnet_v2_base(
preprocessed_inputs, final_endpoint='PreAuxLogits',
scope=scope, output_stride=self._first_stage_features_stride,
align_feature_maps=True))
return rpn_feature_map
def _extract_box_classifier_features(self, proposal_feature_maps, scope):
"""Extracts second stage box classifier features.
This function reconstructs the "second half" of the Inception ResNet v2
network after the part defined in `_extract_proposal_features`.
Args:
proposal_feature_maps: A 4-D float tensor with shape
[batch_size * self.max_num_proposals, crop_height, crop_width, depth]
representing the feature map cropped to each proposal.
scope: A scope name.
Returns:
proposal_classifier_features: A 4-D float tensor with shape
[batch_size * self.max_num_proposals, height, width, depth]
representing box classifier features for each proposal.
"""
with tf.variable_scope('InceptionResnetV2', reuse=self._reuse_weights):
with slim.arg_scope(inception_resnet_v2.inception_resnet_v2_arg_scope(
weight_decay=self._weight_decay)):
# Forces is_training to False to disable batch norm update.
with slim.arg_scope([slim.batch_norm], is_training=False):
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1, padding='SAME'):
with tf.variable_scope('Mixed_7a'):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(proposal_feature_maps,
256, 1, scope='Conv2d_0a_1x1')
tower_conv_1 = slim.conv2d(
tower_conv, 384, 3, stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
tower_conv1 = slim.conv2d(
proposal_feature_maps, 256, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(
tower_conv1, 288, 3, stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
tower_conv2 = slim.conv2d(
proposal_feature_maps, 256, 1, scope='Conv2d_0a_1x1')
tower_conv2_1 = slim.conv2d(tower_conv2, 288, 3,
scope='Conv2d_0b_3x3')
tower_conv2_2 = slim.conv2d(
tower_conv2_1, 320, 3, stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_3'):
tower_pool = slim.max_pool2d(
proposal_feature_maps, 3, stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
net = tf.concat(
[tower_conv_1, tower_conv1_1, tower_conv2_2, tower_pool], 3)
net = slim.repeat(net, 9, inception_resnet_v2.block8, scale=0.20)
net = inception_resnet_v2.block8(net, activation_fn=None)
proposal_classifier_features = slim.conv2d(
net, 1536, 1, scope='Conv2d_7b_1x1')
return proposal_classifier_features
def restore_from_classification_checkpoint_fn(
self,
first_stage_feature_extractor_scope,
second_stage_feature_extractor_scope):
"""Returns a map of variables to load from a foreign checkpoint.
Note that this overrides the default implementation in
faster_rcnn_meta_arch.FasterRCNNFeatureExtractor which does not work for
InceptionResnetV2 checkpoints.
TODO: revisit whether it's possible to force the
`Repeat` namescope as created in `_extract_box_classifier_features` to
start counting at 2 (e.g. `Repeat_2`) so that the default restore_fn can
be used.
Args:
first_stage_feature_extractor_scope: A scope name for the first stage
feature extractor.
second_stage_feature_extractor_scope: A scope name for the second stage
feature extractor.
Returns:
A dict mapping variable names (to load from a checkpoint) to variables in
the model graph.
"""
variables_to_restore = {}
for variable in tf.global_variables():
if variable.op.name.startswith(
first_stage_feature_extractor_scope):
var_name = variable.op.name.replace(
first_stage_feature_extractor_scope + '/', '')
variables_to_restore[var_name] = variable
if variable.op.name.startswith(
second_stage_feature_extractor_scope):
var_name = variable.op.name.replace(
second_stage_feature_extractor_scope
+ '/InceptionResnetV2/Repeat', 'InceptionResnetV2/Repeat_2')
var_name = var_name.replace(
second_stage_feature_extractor_scope + '/', '')
variables_to_restore[var_name] = variable
return variables_to_restore
| apache-2.0 | -2,878,342,389,594,409,500 | 42.263158 | 80 | 0.641009 | false | 3.844388 | false | false | false |
jkyeung/XlsxWriter | xlsxwriter/test/comparison/test_fit_to_pages01.py | 1 | 1354 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2016, John McNamara, [email protected]
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'fit_to_pages01.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = ['xl/printerSettings/printerSettings1.bin',
'xl/worksheets/_rels/sheet1.xml.rels']
self.ignore_elements = {'[Content_Types].xml': ['<Default Extension="bin"'],
'xl/worksheets/sheet1.xml': ['<pageMargins', '<pageSetup']}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with fit to print."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.fit_to_pages(1, 1)
worksheet.set_paper(9)
worksheet.write('A1', 'Foo')
workbook.close()
self.assertExcelEqual()
| bsd-2-clause | 7,276,973,820,359,425,000 | 28.434783 | 91 | 0.581241 | false | 3.835694 | true | false | false |
est31/godot | tools/translations/extract.py | 4 | 3162 | #!/bin/python
import fnmatch
import os
import shutil
import subprocess
import sys
line_nb = False
for arg in sys.argv[1:]:
if (arg == "--with-line-nb"):
print("Enabling line numbers in the context locations.")
line_nb = True
else:
os.sys.exit("Non supported argument '" + arg + "'. Aborting.")
if (not os.path.exists("tools")):
os.sys.exit("ERROR: This script should be started from the root of the git repo.")
matches = []
for root, dirnames, filenames in os.walk('.'):
for filename in fnmatch.filter(filenames, '*.cpp'):
if (filename.find("collada") != -1):
continue
matches.append(os.path.join(root, filename))
for filename in fnmatch.filter(filenames, '*.h'):
if (filename.find("collada") != -1):
continue
matches.append(os.path.join(root, filename))
matches.sort()
unique_str = []
unique_loc = {}
main_po = """
# LANGUAGE translation of the Godot Engine editor
# Copyright (C) 2016 Juan Linietsky, Ariel Manzur and the Godot community
# This file is distributed under the same license as the Godot source code.
# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
#
#, fuzzy
msgid ""
msgstr ""
"Project-Id-Version: Godot Engine editor\\n"
"Content-Type: text/plain; charset=UTF-8\\n"
"Content-Transfer-Encoding: 8-bit\\n"
"""
print("Updating the tools.pot template...")
for fname in matches:
f = open(fname, "rb")
l = f.readline()
lc = 1
while (l):
patterns = ['RTR(\"', 'TTR(\"']
idx = 0
pos = 0
while (pos >= 0):
pos = l.find(patterns[idx], pos)
if (pos == -1):
if (idx < len(patterns) - 1):
idx += 1
pos = 0
continue
pos += 5
msg = ""
while (pos < len(l) and (l[pos] != '"' or l[pos - 1] == '\\')):
msg += l[pos]
pos += 1
location = os.path.relpath(fname).replace('\\','/')
if (line_nb):
location += ":" + str(lc)
if (not msg in unique_str):
main_po += "\n#: " + location + "\n"
main_po += 'msgid "' + msg + '"\n'
main_po += 'msgstr ""\n'
unique_str.append(msg)
unique_loc[msg] = [location]
elif (not location in unique_loc[msg]):
# Add additional location to previous occurence too
msg_pos = main_po.find('\nmsgid "' + msg + '"')
if (msg_pos == -1):
print("Someone apparently thought writing Python was as easy as GDScript. Ping Akien.")
main_po = main_po[:msg_pos] + ' ' + location + main_po[msg_pos:]
unique_loc[msg].append(location)
l = f.readline()
lc += 1
f.close()
f = open("tools.pot", "wb")
f.write(main_po)
f.close()
if (os.name == "posix"):
os.system("msgmerge -w80 tools.pot tools.pot > tools.pot.wrap")
shutil.move("tools.pot.wrap", "tools.pot")
shutil.move("tools.pot", "tools/translations/tools.pot")
# TODO: Make that in a portable way, if we care; if not, kudos to Unix users
if (os.name == "posix"):
added = subprocess.check_output("git diff tools/translations/tools.pot | grep \+msgid | wc -l", shell = True)
removed = subprocess.check_output("git diff tools/translations/tools.pot | grep \\\-msgid | wc -l", shell = True)
print("\n# Template changes compared to the staged status:")
print("# Additions: %s msgids.\n# Deletions: %s msgids." % (int(added), int(removed)))
| mit | -9,170,374,170,648,537,000 | 25.571429 | 114 | 0.629981 | false | 2.927778 | false | false | false |
feroda/django | django/contrib/postgres/fields/hstore.py | 45 | 2945 | import json
from django.contrib.postgres import forms, lookups
from django.contrib.postgres.fields.array import ArrayField
from django.core import exceptions
from django.db.models import Field, TextField, Transform
from django.utils import six
from django.utils.translation import ugettext_lazy as _
__all__ = ['HStoreField']
class HStoreField(Field):
empty_strings_allowed = False
description = _('Map of strings to strings')
default_error_messages = {
'not_a_string': _('The value of "%(key)s" is not a string.'),
}
def db_type(self, connection):
return 'hstore'
def get_transform(self, name):
transform = super(HStoreField, self).get_transform(name)
if transform:
return transform
return KeyTransformFactory(name)
def validate(self, value, model_instance):
super(HStoreField, self).validate(value, model_instance)
for key, val in value.items():
if not isinstance(val, six.string_types):
raise exceptions.ValidationError(
self.error_messages['not_a_string'],
code='not_a_string',
params={'key': key},
)
def to_python(self, value):
if isinstance(value, six.string_types):
value = json.loads(value)
return value
def value_to_string(self, obj):
value = self._get_val_from_obj(obj)
return json.dumps(value)
def formfield(self, **kwargs):
defaults = {
'form_class': forms.HStoreField,
}
defaults.update(kwargs)
return super(HStoreField, self).formfield(**defaults)
HStoreField.register_lookup(lookups.DataContains)
HStoreField.register_lookup(lookups.ContainedBy)
@HStoreField.register_lookup
class HasKeyLookup(lookups.PostgresSimpleLookup):
lookup_name = 'has_key'
operator = '?'
@HStoreField.register_lookup
class HasKeysLookup(lookups.PostgresSimpleLookup):
lookup_name = 'has_keys'
operator = '?&'
class KeyTransform(Transform):
output_field = TextField()
def __init__(self, key_name, *args, **kwargs):
super(KeyTransform, self).__init__(*args, **kwargs)
self.key_name = key_name
def as_sql(self, compiler, connection):
lhs, params = compiler.compile(self.lhs)
return "%s -> '%s'" % (lhs, self.key_name), params
class KeyTransformFactory(object):
def __init__(self, key_name):
self.key_name = key_name
def __call__(self, *args, **kwargs):
return KeyTransform(self.key_name, *args, **kwargs)
@HStoreField.register_lookup
class KeysTransform(lookups.FunctionTransform):
lookup_name = 'keys'
function = 'akeys'
output_field = ArrayField(TextField())
@HStoreField.register_lookup
class ValuesTransform(lookups.FunctionTransform):
lookup_name = 'values'
function = 'avals'
output_field = ArrayField(TextField())
| bsd-3-clause | 6,559,330,261,788,073,000 | 27.317308 | 69 | 0.647878 | false | 3.864829 | false | false | false |
pratikmallya/hue | desktop/core/ext-py/Django-1.6.10/django/views/decorators/cache.py | 129 | 2286 | from functools import wraps
from django.utils.decorators import decorator_from_middleware_with_args, available_attrs
from django.utils.cache import patch_cache_control, add_never_cache_headers
from django.middleware.cache import CacheMiddleware
def cache_page(*args, **kwargs):
"""
Decorator for views that tries getting the page from the cache and
populates the cache if the page isn't in the cache yet.
The cache is keyed by the URL and some data from the headers.
Additionally there is the key prefix that is used to distinguish different
cache areas in a multi-site setup. You could use the
sites.get_current_site().domain, for example, as that is unique across a Django
project.
Additionally, all headers from the response's Vary header will be taken
into account on caching -- just like the middleware does.
"""
# We also add some asserts to give better error messages in case people are
# using other ways to call cache_page that no longer work.
if len(args) != 1 or callable(args[0]):
raise TypeError("cache_page has a single mandatory positional argument: timeout")
cache_timeout = args[0]
cache_alias = kwargs.pop('cache', None)
key_prefix = kwargs.pop('key_prefix', None)
if kwargs:
raise TypeError("cache_page has two optional keyword arguments: cache and key_prefix")
return decorator_from_middleware_with_args(CacheMiddleware)(cache_timeout=cache_timeout, cache_alias=cache_alias, key_prefix=key_prefix)
def cache_control(**kwargs):
def _cache_controller(viewfunc):
@wraps(viewfunc, assigned=available_attrs(viewfunc))
def _cache_controlled(request, *args, **kw):
response = viewfunc(request, *args, **kw)
patch_cache_control(response, **kwargs)
return response
return _cache_controlled
return _cache_controller
def never_cache(view_func):
"""
Decorator that adds headers to a response so that it will
never be cached.
"""
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view_func(request, *args, **kwargs):
response = view_func(request, *args, **kwargs)
add_never_cache_headers(response)
return response
return _wrapped_view_func
| apache-2.0 | -3,524,898,767,110,126,000 | 40.563636 | 140 | 0.704724 | false | 4.126354 | false | false | false |
tody411/ImageViewerFramework | ivf/cmds/save_depth.py | 1 | 1284 |
# -*- coding: utf-8 -*-
## @package ivf.cmds.save_depth
#
# ivf.cmds.save_depth utility package.
# @author tody
# @date 2016/02/02
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import os
from ivf.cmds.base_cmds import BaseCommand
from ivf.scene.gl3d.image_plane import ImagePlane
from ivf.io_util.obj_model import saveOBJ
class SaveDepthCommand(BaseCommand):
def __init__(self, scene, file_path="", parent=None):
super(SaveDepthCommand, self).__init__(scene, "Save Depth Mesh", parent)
self._file_path = file_path
self._show_ui = file_path is ""
self._root_dir = os.path.expanduser('~')
def _runImp(self):
if self._show_ui:
self._file_path = str(QFileDialog.getSaveFileName(None, "Save Depth Mesh", self._root_dir, "Obj File (*.obj)" ))
if self._file_path is "":
return
RGBA_8U = self._scene.image()
D_32F = self._scene.depth()
if D_32F is None:
return
model = ImagePlane(RGBA_8U)
model.setDepth(D_32F)
vertices = model.mesh().positions()
index_array = model.mesh().indexArray()
vertex_colors = model.mesh().vertexColors()
saveOBJ(self._file_path, vertices, index_array, vertex_colors) | mit | -5,482,225,773,421,896,000 | 26.934783 | 124 | 0.614486 | false | 3.34375 | false | false | false |
kisel/trex-core | scripts/astf/param_mss_err.py | 2 | 1056 | from trex_astf_lib.api import *
# IPV6 tunable example
#
# ipv6.src_msb
# ipv6.dst_msb
# ipv6.enable
#
class Prof1():
def __init__(self):
pass
def get_profile(self, **kwargs):
# ip generator
ip_gen_c = ASTFIPGenDist(ip_range=["16.0.0.0", "16.0.0.255"], distribution="seq")
ip_gen_s = ASTFIPGenDist(ip_range=["48.0.0.0", "48.0.255.255"], distribution="seq")
ip_gen = ASTFIPGen(glob=ASTFIPGenGlobal(ip_offset="1.0.0.0"),
dist_client=ip_gen_c,
dist_server=ip_gen_s)
c_glob_info = ASTFGlobalInfo()
c_glob_info.tcp.mss = 1
return ASTFProfile(default_ip_gen=ip_gen,
# Defaults affects all files
default_c_glob_info=c_glob_info,
cap_list=[
ASTFCapInfo(file="../avl/delay_10_http_browsing_0.pcap", cps=1)
]
)
def register():
return Prof1()
| apache-2.0 | 7,100,825,590,404,855,000 | 28.333333 | 100 | 0.484848 | false | 3.259259 | false | false | false |
vschs007/buck | third-party/py/pex/pex/interpreter.py | 52 | 12996 | # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
"""pex support for interacting with interpreters."""
from __future__ import absolute_import
import os
import re
import subprocess
import sys
from collections import defaultdict
from pkg_resources import Distribution, Requirement, find_distributions
from .base import maybe_requirement
from .compatibility import string
from .tracer import TRACER
try:
from numbers import Integral
except ImportError:
Integral = (int, long)
# Determine in the most platform-compatible way possible the identity of the interpreter
# and its known packages.
ID_PY = b"""
import sys
if hasattr(sys, 'pypy_version_info'):
subversion = 'PyPy'
elif sys.platform.startswith('java'):
subversion = 'Jython'
else:
subversion = 'CPython'
print("%s %s %s %s" % (
subversion,
sys.version_info[0],
sys.version_info[1],
sys.version_info[2]))
setuptools_path = None
try:
import pkg_resources
except ImportError:
sys.exit(0)
requirements = {}
for item in sys.path:
for dist in pkg_resources.find_distributions(item):
requirements[str(dist.as_requirement())] = dist.location
for requirement_str, location in requirements.items():
rs = requirement_str.split('==', 2)
if len(rs) == 2:
print('%s %s %s' % (rs[0], rs[1], location))
"""
class PythonIdentity(object):
class Error(Exception): pass
class InvalidError(Error): pass
class UnknownRequirement(Error): pass
# TODO(wickman) Support interpreter-specific versions, e.g. PyPy-2.2.1
HASHBANGS = {
'CPython': 'python%(major)d.%(minor)d',
'Jython': 'jython',
'PyPy': 'pypy',
}
@classmethod
def get_subversion(cls):
if hasattr(sys, 'pypy_version_info'):
subversion = 'PyPy'
elif sys.platform.startswith('java'):
subversion = 'Jython'
else:
subversion = 'CPython'
return subversion
@classmethod
def get(cls):
return cls(cls.get_subversion(), sys.version_info[0], sys.version_info[1], sys.version_info[2])
@classmethod
def from_id_string(cls, id_string):
values = id_string.split()
if len(values) != 4:
raise cls.InvalidError("Invalid id string: %s" % id_string)
return cls(str(values[0]), int(values[1]), int(values[2]), int(values[3]))
@classmethod
def from_path(cls, dirname):
interp, version = dirname.split('-')
major, minor, patch = version.split('.')
return cls(str(interp), int(major), int(minor), int(patch))
def __init__(self, interpreter, major, minor, patch):
for var in (major, minor, patch):
assert isinstance(var, Integral)
self._interpreter = interpreter
self._version = (major, minor, patch)
@property
def interpreter(self):
return self._interpreter
@property
def version(self):
return self._version
@property
def requirement(self):
return self.distribution.as_requirement()
@property
def distribution(self):
return Distribution(project_name=self._interpreter, version='.'.join(map(str, self._version)))
@classmethod
def parse_requirement(cls, requirement, default_interpreter='CPython'):
if isinstance(requirement, Requirement):
return requirement
elif isinstance(requirement, string):
try:
requirement = Requirement.parse(requirement)
except ValueError:
try:
requirement = Requirement.parse('%s%s' % (default_interpreter, requirement))
except ValueError:
raise ValueError('Unknown requirement string: %s' % requirement)
return requirement
else:
raise ValueError('Unknown requirement type: %r' % (requirement,))
def matches(self, requirement):
"""Given a Requirement, check if this interpreter matches."""
try:
requirement = self.parse_requirement(requirement, self._interpreter)
except ValueError as e:
raise self.UnknownRequirement(str(e))
return self.distribution in requirement
def hashbang(self):
hashbang_string = self.HASHBANGS.get(self.interpreter, 'CPython') % {
'major': self._version[0],
'minor': self._version[1],
'patch': self._version[2],
}
return '#!/usr/bin/env %s' % hashbang_string
@property
def python(self):
# return the python version in the format of the 'python' key for distributions
# specifically, '2.6', '2.7', '3.2', etc.
return '%d.%d' % (self.version[0:2])
def __str__(self):
return '%s-%s.%s.%s' % (self._interpreter,
self._version[0], self._version[1], self._version[2])
def __repr__(self):
return 'PythonIdentity(%r, %s, %s, %s)' % (
self._interpreter, self._version[0], self._version[1], self._version[2])
def __eq__(self, other):
return all([isinstance(other, PythonIdentity),
self.interpreter == other.interpreter,
self.version == other.version])
def __hash__(self):
return hash((self._interpreter, self._version))
class PythonInterpreter(object):
REGEXEN = (
re.compile(r'jython$'),
# NB: OSX ships python binaries named Python so we allow for capital-P.
re.compile(r'[Pp]ython$'),
re.compile(r'python[23].[0-9]$'),
re.compile(r'pypy$'),
re.compile(r'pypy-1.[0-9]$'),
)
CACHE = {} # memoize executable => PythonInterpreter
try:
# Versions of distribute prior to the setuptools merge would automatically replace
# 'setuptools' requirements with 'distribute'. It provided the 'replacement' kwarg
# to toggle this, but it was removed post-merge.
COMPATIBLE_SETUPTOOLS = Requirement.parse('setuptools>=1.0', replacement=False)
except TypeError:
COMPATIBLE_SETUPTOOLS = Requirement.parse('setuptools>=1.0')
class Error(Exception): pass
class IdentificationError(Error): pass
class InterpreterNotFound(Error): pass
@classmethod
def get(cls):
return cls.from_binary(sys.executable)
@classmethod
def all(cls, paths=None):
if paths is None:
paths = os.getenv('PATH', '').split(':')
return cls.filter(cls.find(paths))
@classmethod
def _parse_extras(cls, output_lines):
def iter_lines():
for line in output_lines:
try:
dist_name, dist_version, location = line.split()
except ValueError:
raise cls.IdentificationError('Could not identify requirement: %s' % line)
yield ((dist_name, dist_version), location)
return dict(iter_lines())
@classmethod
def _from_binary_internal(cls, path_extras):
def iter_extras():
for item in sys.path + list(path_extras):
for dist in find_distributions(item):
if dist.version:
yield ((dist.key, dist.version), dist.location)
return cls(sys.executable, PythonIdentity.get(), dict(iter_extras()))
@classmethod
def _from_binary_external(cls, binary, path_extras):
environ = cls.sanitized_environment()
environ['PYTHONPATH'] = ':'.join(path_extras)
po = subprocess.Popen(
[binary],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
env=environ)
so, _ = po.communicate(ID_PY)
output = so.decode('utf8').splitlines()
if len(output) == 0:
raise cls.IdentificationError('Could not establish identity of %s' % binary)
identity, extras = output[0], output[1:]
return cls(
binary,
PythonIdentity.from_id_string(identity),
extras=cls._parse_extras(extras))
@classmethod
def expand_path(cls, path):
if os.path.isfile(path):
return [path]
elif os.path.isdir(path):
return [os.path.join(path, fn) for fn in os.listdir(path)]
return []
@classmethod
def from_env(cls, hashbang):
"""Resolve a PythonInterpreter as /usr/bin/env would.
:param hashbang: A string, e.g. "python3.3" representing some binary on the $PATH.
"""
paths = os.getenv('PATH', '').split(':')
for path in paths:
for fn in cls.expand_path(path):
basefile = os.path.basename(fn)
if hashbang == basefile:
try:
return cls.from_binary(fn)
except Exception as e:
TRACER.log('Could not identify %s: %s' % (fn, e))
@classmethod
def from_binary(cls, binary, path_extras=None):
path_extras = path_extras or ()
if binary not in cls.CACHE:
if binary == sys.executable:
cls.CACHE[binary] = cls._from_binary_internal(path_extras)
else:
cls.CACHE[binary] = cls._from_binary_external(binary, path_extras)
return cls.CACHE[binary]
@classmethod
def find(cls, paths):
"""
Given a list of files or directories, try to detect python interpreters amongst them.
Returns a list of PythonInterpreter objects.
"""
pythons = []
for path in paths:
for fn in cls.expand_path(path):
basefile = os.path.basename(fn)
if any(matcher.match(basefile) is not None for matcher in cls.REGEXEN):
try:
pythons.append(cls.from_binary(fn))
except Exception as e:
TRACER.log('Could not identify %s: %s' % (fn, e))
continue
return pythons
@classmethod
def filter(cls, pythons):
"""
Given a map of python interpreters in the format provided by PythonInterpreter.find(),
filter out duplicate versions and versions we would prefer not to use.
Returns a map in the same format as find.
"""
good = []
MAJOR, MINOR, SUBMINOR = range(3)
def version_filter(version):
return (version[MAJOR] == 2 and version[MINOR] >= 6 or
version[MAJOR] == 3 and version[MINOR] >= 2)
all_versions = set(interpreter.identity.version for interpreter in pythons)
good_versions = filter(version_filter, all_versions)
for version in good_versions:
# For each candidate, use the latest version we find on the filesystem.
candidates = defaultdict(list)
for interp in pythons:
if interp.identity.version == version:
candidates[interp.identity.interpreter].append(interp)
for interp_class in candidates:
candidates[interp_class].sort(
key=lambda interp: os.path.getmtime(interp.binary), reverse=True)
good.append(candidates[interp_class].pop(0))
return good
@classmethod
def sanitized_environment(cls):
# N.B. This is merely a hack because sysconfig.py on the default OS X
# installation of 2.6/2.7 breaks.
env_copy = os.environ.copy()
env_copy.pop('MACOSX_DEPLOYMENT_TARGET', None)
return env_copy
@classmethod
def replace(cls, requirement):
self = cls.get()
if self.identity.matches(requirement):
return False
for pi in cls.all():
if pi.identity.matches(requirement):
break
else:
raise cls.InterpreterNotFound('Could not find interpreter matching filter!')
os.execve(pi.binary, [pi.binary] + sys.argv, cls.sanitized_environment())
def __init__(self, binary, identity, extras=None):
"""Construct a PythonInterpreter.
You should probably PythonInterpreter.from_binary instead.
:param binary: The full path of the python binary.
:param identity: The :class:`PythonIdentity` of the PythonInterpreter.
:param extras: A mapping from (dist.key, dist.version) to dist.location
of the extras associated with this interpreter.
"""
self._binary = os.path.realpath(binary)
self._extras = extras or {}
self._identity = identity
def with_extra(self, key, version, location):
extras = self._extras.copy()
extras[(key, version)] = location
return self.__class__(self._binary, self._identity, extras)
@property
def extras(self):
return self._extras.copy()
@property
def binary(self):
return self._binary
@property
def identity(self):
return self._identity
@property
def python(self):
return self._identity.python
@property
def version(self):
return self._identity.version
@property
def version_string(self):
return str(self._identity)
def satisfies(self, capability):
if not isinstance(capability, list):
raise TypeError('Capability must be a list, got %s' % type(capability))
return not any(self.get_location(req) is None for req in capability)
def get_location(self, req):
req = maybe_requirement(req)
for dist, location in self.extras.items():
dist_name, dist_version = dist
if req.key == dist_name and dist_version in req:
return location
def __hash__(self):
return hash((self._binary, self._identity))
def __eq__(self, other):
if not isinstance(other, PythonInterpreter):
return False
return (self._binary, self._identity) == (other._binary, other._identity)
def __lt__(self, other):
if not isinstance(other, PythonInterpreter):
return False
return self.version < other.version
def __repr__(self):
return '%s(%r, %r, %r)' % (self.__class__.__name__, self._binary, self._identity, self._extras)
| apache-2.0 | -4,550,507,036,375,673,300 | 29.578824 | 99 | 0.657433 | false | 3.805564 | false | false | false |
AOKP/external_chromium_org | tools/python/google/gethash_timer.py | 182 | 4366 | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Issue a series of GetHash requests to the SafeBrowsing servers and measure
the response times.
Usage:
$ ./gethash_timer.py --period=600 --samples=20 --output=resp.csv
--period (or -p): The amount of time (in seconds) to wait between GetHash
requests. Using a value of more than 300 (5 minutes) to
include the effect of DNS.
--samples (or -s): The number of requests to issue. If this parameter is not
specified, the test will run indefinitely.
--output (or -o): The path to a file where the output will be written in
CSV format: sample_number,response_code,elapsed_time_ms
"""
import getopt
import httplib
import sys
import time
_GETHASH_HOST = 'safebrowsing.clients.google.com'
_GETHASH_REQUEST = (
'/safebrowsing/gethash?client=googleclient&appver=1.0&pver=2.1')
# Global logging file handle.
g_file_handle = None
def IssueGetHash(prefix):
'''Issue one GetHash request to the safebrowsing servers.
Args:
prefix: A 4 byte value to look up on the server.
Returns:
The HTTP response code for the GetHash request.
'''
body = '4:4\n' + prefix
h = httplib.HTTPConnection(_GETHASH_HOST)
h.putrequest('POST', _GETHASH_REQUEST)
h.putheader('content-length', str(len(body)))
h.endheaders()
h.send(body)
response_code = h.getresponse().status
h.close()
return response_code
def TimedGetHash(prefix):
'''Measure the amount of time it takes to receive a GetHash response.
Args:
prefix: A 4 byte value to look up on the the server.
Returns:
A tuple of HTTP resonse code and the response time (in milliseconds).
'''
start = time.time()
response_code = IssueGetHash(prefix)
return response_code, (time.time() - start) * 1000
def RunTimedGetHash(period, samples=None):
'''Runs an experiment to measure the amount of time it takes to receive
multiple responses from the GetHash servers.
Args:
period: A floating point value that indicates (in seconds) the delay
between requests.
samples: An integer value indicating the number of requests to make.
If 'None', the test continues indefinitely.
Returns:
None.
'''
global g_file_handle
prefix = '\x50\x61\x75\x6c'
sample_count = 1
while True:
response_code, elapsed_time = TimedGetHash(prefix)
LogResponse(sample_count, response_code, elapsed_time)
sample_count += 1
if samples is not None and sample_count == samples:
break
time.sleep(period)
def LogResponse(sample_count, response_code, elapsed_time):
'''Output the response for one GetHash query.
Args:
sample_count: The current sample number.
response_code: The HTTP response code for the GetHash request.
elapsed_time: The round-trip time (in milliseconds) for the
GetHash request.
Returns:
None.
'''
global g_file_handle
output_list = (sample_count, response_code, elapsed_time)
print 'Request: %d, status: %d, elapsed time: %f ms' % output_list
if g_file_handle is not None:
g_file_handle.write(('%d,%d,%f' % output_list) + '\n')
g_file_handle.flush()
def SetupOutputFile(file_name):
'''Open a file for logging results.
Args:
file_name: A path to a file to store the output.
Returns:
None.
'''
global g_file_handle
g_file_handle = open(file_name, 'w')
def main():
period = 10
samples = None
options, args = getopt.getopt(sys.argv[1:],
's:p:o:',
['samples=', 'period=', 'output='])
for option, value in options:
if option == '-s' or option == '--samples':
samples = int(value)
elif option == '-p' or option == '--period':
period = float(value)
elif option == '-o' or option == '--output':
file_name = value
else:
print 'Bad option: %s' % option
return 1
try:
print 'Starting Timed GetHash ----------'
SetupOutputFile(file_name)
RunTimedGetHash(period, samples)
except KeyboardInterrupt:
pass
print 'Timed GetHash complete ----------'
g_file_handle.close()
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause | -2,610,213,376,318,876,000 | 28.302013 | 78 | 0.653 | false | 3.703138 | false | false | false |
srinathv/vispy | vispy/visuals/isocurve.py | 18 | 7809 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
from __future__ import division
import numpy as np
from .line import LineVisual
from ..color import ColorArray
from ..color.colormap import _normalize, get_colormap
from ..geometry.isocurve import isocurve
from ..testing import has_matplotlib
# checking for matplotlib
_HAS_MPL = has_matplotlib()
if _HAS_MPL:
from matplotlib import _cntr as cntr
class IsocurveVisual(LineVisual):
"""Displays an isocurve of a 2D scalar array.
Parameters
----------
data : ndarray | None
2D scalar array.
levels : ndarray, shape (Nlev,) | None
The levels at which the isocurve is constructed from "*data*".
color_lev : Color, colormap name, tuple, list or array
The color to use when drawing the line. If a list is given, it
must be of shape (Nlev), if an array is given, it must be of
shape (Nlev, ...). and provide one color per level (rgba, colorname).
clim : tuple
(min, max) limits to apply when mapping level values through a
colormap.
**kwargs : dict
Keyword arguments to pass to `LineVisual`.
Notes
-----
"""
def __init__(self, data=None, levels=None, color_lev=None, clim=None,
**kwargs):
self._data = None
self._levels = levels
self._color_lev = color_lev
self._clim = clim
self._need_color_update = True
self._need_level_update = True
self._need_recompute = True
self._X = None
self._Y = None
self._iso = None
self._level_min = None
self._data_is_uniform = False
self._lc = None
self._cl = None
self._li = None
self._connect = None
self._verts = None
kwargs['method'] = 'gl'
kwargs['antialias'] = False
LineVisual.__init__(self, **kwargs)
if data is not None:
self.set_data(data)
@property
def levels(self):
""" The threshold at which the isocurve is constructed from the
2D data.
"""
return self._levels
@levels.setter
def levels(self, levels):
self._levels = levels
self._need_level_update = True
self._need_recompute = True
self.update()
@property
def color(self):
return self._color_lev
@color.setter
def color(self, color):
self._color_lev = color
self._need_level_update = True
self._need_color_update = True
self.update()
def set_data(self, data):
""" Set the scalar array data
Parameters
----------
data : ndarray
A 2D array of scalar values. The isocurve is constructed to show
all locations in the scalar field equal to ``self.levels``.
"""
self._data = data
# if using matplotlib isoline algorithm we have to check for meshgrid
# and we can setup the tracer object here
if _HAS_MPL:
if self._X is None or self._X.T.shape != data.shape:
self._X, self._Y = np.meshgrid(np.arange(data.shape[0]),
np.arange(data.shape[1]))
self._iso = cntr.Cntr(self._X, self._Y, self._data.astype(float))
if self._clim is None:
self._clim = (data.min(), data.max())
# sanity check,
# should we raise an error here, since no isolines can be drawn?
# for now, _prepare_draw returns False if no isoline can be drawn
if self._data.min() != self._data.max():
self._data_is_uniform = False
else:
self._data_is_uniform = True
self._need_recompute = True
self.update()
def _get_verts_and_connect(self, paths):
""" retrieve vertices and connects from given paths-list
"""
verts = np.vstack(paths)
gaps = np.add.accumulate(np.array([len(x) for x in paths])) - 1
connect = np.ones(gaps[-1], dtype=bool)
connect[gaps[:-1]] = False
return verts, connect
def _compute_iso_line(self):
""" compute LineVisual vertices, connects and color-index
"""
level_index = []
connects = []
verts = []
# calculate which level are within data range
# this works for now and the existing examples, but should be tested
# thoroughly also with the data-sanity check in set_data-function
choice = np.nonzero((self.levels > self._data.min()) &
(self._levels < self._data.max()))
levels_to_calc = np.array(self.levels)[choice]
# save minimum level index
self._level_min = choice[0][0]
for level in levels_to_calc:
# if we use matplotlib isoline algorithm we need to add half a
# pixel in both (x,y) dimensions because isolines are aligned to
# pixel centers
if _HAS_MPL:
nlist = self._iso.trace(level, level, 0)
paths = nlist[:len(nlist)//2]
v, c = self._get_verts_and_connect(paths)
v += np.array([0.5, 0.5])
else:
paths = isocurve(self._data.astype(float).T, level,
extend_to_edge=True, connected=True)
v, c = self._get_verts_and_connect(paths)
level_index.append(v.shape[0])
connects.append(np.hstack((c, [False])))
verts.append(v)
self._li = np.hstack(level_index)
self._connect = np.hstack(connects)
self._verts = np.vstack(verts)
def _compute_iso_color(self):
""" compute LineVisual color from level index and corresponding color
"""
level_color = []
colors = self._lc
for i, index in enumerate(self._li):
level_color.append(np.zeros((index, 4)) +
colors[i+self._level_min])
self._cl = np.vstack(level_color)
def _levels_to_colors(self):
# computes ColorArrays for given levels
# try _color_lev as colormap, except as everything else
try:
f_color_levs = get_colormap(self._color_lev)
except:
colors = ColorArray(self._color_lev).rgba
else:
lev = _normalize(self._levels, self._clim[0], self._clim[1])
# map function expects (Nlev,1)!
colors = f_color_levs.map(lev[:, np.newaxis])
# broadcast to (nlev, 4) array
if len(colors) == 1:
colors = colors * np.ones((len(self._levels), 1))
# detect color_lev/levels mismatch and raise error
if (len(colors) != len(self._levels)):
raise TypeError("Color/level mismatch. Color must be of shape "
"(Nlev, ...) and provide one color per level")
self._lc = colors
def _prepare_draw(self, view):
if (self._data is None or self._levels is None or
self._color_lev is None or self._data_is_uniform):
return False
if self._need_level_update:
self._levels_to_colors()
self._need_level_update = False
if self._need_recompute:
self._compute_iso_line()
self._compute_iso_color()
LineVisual.set_data(self, pos=self._verts, connect=self._connect,
color=self._cl)
self._need_recompute = False
if self._need_color_update:
self._compute_iso_color()
LineVisual.set_data(self, color=self._cl)
self._need_color_update = False
return LineVisual._prepare_draw(self, view)
| bsd-3-clause | 4,437,686,698,291,279,000 | 33.25 | 77 | 0.560891 | false | 3.963959 | false | false | false |
cbare/Etudes | python/strings.py | 1 | 1543 | """
String algorithms
"""
def balanced_parens(s: str) -> bool:
open = 0
for c in s:
if c=='(': open += 1
if c==')':
if open > 0:
open -= 1
else:
return False
return open==0
assert balanced_parens('')
assert balanced_parens('()')
assert balanced_parens('((()))')
assert balanced_parens('((()()()))')
assert balanced_parens('((()()()))()(())(()())')
assert not balanced_parens('(()')
assert not balanced_parens('((())))')
assert not balanced_parens('((()())')
assert not balanced_parens('())(()')
def longest_valid_parens(s: str) -> int:
"""
return the length of the longest run of valid nested parens.
Given a string containing just the characters '(' and ')', find the length
of the longest well-formed substring.
"""
seeds = [(i,i+1) for i in range(len(s)-1) if s[i:i+2]=='()']
grew = True
while grew or merged:
grew = 0
merged = 0
# grow
for i in range(len(seeds)):
a,b = seeds[i]
if a>0 and b+1<len(s) and s[a-1]=='(' and s[b+1]==')':
grew += 1
seeds[i] = (a-1, b+1)
# merge
new_seeds = []
s0 = seeds[0]
for s1 in seeds[1:]:
if s0[1]+1==s1[0]:
merged += 1
s0 = (s0[0], s1[1])
else:
new_seeds.append(s0)
s0 = s1
new_seeds.append(s0)
seeds = new_seeds
return max(b-a+1 for a,b in seeds)
| apache-2.0 | 6,885,814,736,535,310,000 | 23.492063 | 78 | 0.473104 | false | 3.339827 | false | false | false |
lorenzo-desantis/mne-python | mne/preprocessing/eog.py | 6 | 7587 | # Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Eric Larson <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
from .peak_finder import peak_finder
from .. import pick_types, pick_channels
from ..utils import logger, verbose
from ..filter import band_pass_filter
from ..epochs import Epochs
@verbose
def find_eog_events(raw, event_id=998, l_freq=1, h_freq=10,
filter_length='10s', ch_name=None, tstart=0,
verbose=None):
"""Locate EOG artifacts
Parameters
----------
raw : instance of Raw
The raw data.
event_id : int
The index to assign to found events.
l_freq : float
Low cut-off frequency in Hz.
h_freq : float
High cut-off frequency in Hz.
filter_length : str | int | None
Number of taps to use for filtering.
ch_name: str | None
If not None, use specified channel(s) for EOG
tstart : float
Start detection after tstart seconds.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
eog_events : array
Events.
"""
# Getting EOG Channel
eog_inds = _get_eog_channel_index(ch_name, raw)
logger.info('EOG channel index for this subject is: %s' % eog_inds)
eog, _ = raw[eog_inds, :]
eog_events = _find_eog_events(eog, event_id=event_id, l_freq=l_freq,
h_freq=h_freq,
sampling_rate=raw.info['sfreq'],
first_samp=raw.first_samp,
filter_length=filter_length,
tstart=tstart)
return eog_events
def _find_eog_events(eog, event_id, l_freq, h_freq, sampling_rate, first_samp,
filter_length='10s', tstart=0.):
"""Helper function"""
logger.info('Filtering the data to remove DC offset to help '
'distinguish blinks from saccades')
# filtering to remove dc offset so that we know which is blink and saccades
fmax = np.minimum(45, sampling_rate / 2.0 - 0.75) # protect Nyquist
filteog = np.array([band_pass_filter(x, sampling_rate, 2, fmax,
filter_length=filter_length)
for x in eog])
temp = np.sqrt(np.sum(filteog ** 2, axis=1))
indexmax = np.argmax(temp)
# easier to detect peaks with filtering.
filteog = band_pass_filter(eog[indexmax], sampling_rate, l_freq, h_freq,
filter_length=filter_length)
# detecting eog blinks and generating event file
logger.info('Now detecting blinks and generating corresponding events')
temp = filteog - np.mean(filteog)
n_samples_start = int(sampling_rate * tstart)
if np.abs(np.max(temp)) > np.abs(np.min(temp)):
eog_events, _ = peak_finder(filteog[n_samples_start:], extrema=1)
else:
eog_events, _ = peak_finder(filteog[n_samples_start:], extrema=-1)
eog_events += n_samples_start
n_events = len(eog_events)
logger.info("Number of EOG events detected : %d" % n_events)
eog_events = np.array([eog_events + first_samp,
np.zeros(n_events, int),
event_id * np.ones(n_events, int)]).T
return eog_events
def _get_eog_channel_index(ch_name, inst):
if isinstance(ch_name, str):
# Check if multiple EOG Channels
if ',' in ch_name:
ch_name = ch_name.split(',')
else:
ch_name = [ch_name]
eog_inds = pick_channels(inst.ch_names, include=ch_name)
if len(eog_inds) == 0:
raise ValueError('%s not in channel list' % ch_name)
else:
logger.info('Using channel %s as EOG channel%s' % (
" and ".join(ch_name),
'' if len(eog_inds) < 2 else 's'))
elif ch_name is None:
eog_inds = pick_types(inst.info, meg=False, eeg=False, stim=False,
eog=True, ecg=False, emg=False, ref_meg=False,
exclude='bads')
if len(eog_inds) == 0:
logger.info('No EOG channels found')
logger.info('Trying with EEG 061 and EEG 062')
eog_inds = pick_channels(inst.ch_names,
include=['EEG 061', 'EEG 062'])
if len(eog_inds) != 2:
raise RuntimeError('EEG 61 or EEG 62 channel not found !!')
else:
raise ValueError('Could not find EOG channel.')
return eog_inds
@verbose
def create_eog_epochs(raw, ch_name=None, event_id=998, picks=None,
tmin=-0.5, tmax=0.5, l_freq=1, h_freq=10,
reject=None, flat=None, baseline=None,
preload=True, verbose=None):
"""Conveniently generate epochs around EOG artifact events
Parameters
----------
raw : instance of Raw
The raw data
ch_name : str
The name of the channel to use for EOG peak detection.
The argument is mandatory if the dataset contains no EOG channels.
event_id : int
The index to assign to found events
picks : array-like of int | None (default)
Indices of channels to include (if None, all channels
are used).
tmin : float
Start time before event.
tmax : float
End time after event.
l_freq : float
Low pass frequency.
h_freq : float
High pass frequency.
reject : dict | None
Rejection parameters based on peak-to-peak amplitude.
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'.
If reject is None then no rejection is done. Example::
reject = dict(grad=4000e-13, # T / m (gradiometers)
mag=4e-12, # T (magnetometers)
eeg=40e-6, # uV (EEG channels)
eog=250e-6 # uV (EOG channels)
)
flat : dict | None
Rejection parameters based on flatness of signal.
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values
are floats that set the minimum acceptable peak-to-peak amplitude.
If flat is None then no rejection is done.
baseline : tuple or list of length 2, or None
The time interval to apply rescaling / baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal ot (None, None) all the time
interval is used. If None, no correction is applied.
preload : bool
Preload epochs or not.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
eog_epochs : instance of Epochs
Data epoched around EOG events.
"""
events = find_eog_events(raw, ch_name=ch_name, event_id=event_id,
l_freq=l_freq, h_freq=h_freq)
# create epochs around EOG events
eog_epochs = Epochs(raw, events=events, event_id=event_id,
tmin=tmin, tmax=tmax, proj=False, reject=reject,
flat=flat, picks=picks, baseline=baseline,
preload=preload)
return eog_epochs
| bsd-3-clause | -1,182,136,550,195,087,600 | 35.475962 | 79 | 0.568604 | false | 3.670537 | false | false | false |
ttrifonov/EventBrain | src/eventbrain/bin/launcher.py | 1 | 9958 | #!/usr/bin/env python
# file: launcher.py
import os
import sys
import logging
from optparse import OptionParser, OptionGroup
from eventbrain.util.daemon import Daemon
FORMAT = '%(asctime)-15s:%(name)s:%(process)d:%(levelname)s === %(message)s'
logging.basicConfig(format=FORMAT, level=logging.INFO, stream=sys.stdout)
usage = "Usage: %prog [options] start|stop|restart"
def set_daemonize(option, opt_str, value, parser):
parser.values.daemonize = True
parser = OptionParser(usage=usage)
parser.add_option("-t", "--type", dest="type",
help="type of object to process('actor', 'a' "
"or 'decision', 'd')")
parser.add_option("-i", "--id", dest="Id",
help="Id of the object to process")
parser.add_option("-p", "--pid-dir", dest="pid_dir",
default="/var/run/eventbrain/",
help="Directory to store pid files for daemonized objects. "
"Default path is %default")
parser.add_option("-l", "--log-file", dest="logfile", default='/dev/null',
help="File to write logs. Default is %default")
parser.add_option("-d", "--daemonize", dest="daemonize", action="callback",
callback=set_daemonize, default=False,
help="Start in daemon mode")
parser.add_option("-o", "--options", dest="opts",
default=None,
help="Additional options to send to the class constructor")
parser.add_option("-c", "--config", dest="config",
default=None,
help="Config file with initial settings. "
"If a config file is provided, "
"other parameters are ignored.")
server_opts = OptionGroup(parser, "RabbitMQ options")
server_opts.add_option("-s", "--server", dest="host", default='localhost',
help="RabbitMQ server. Default is %default")
server_opts.add_option("-u", "--user", dest="user",
help="RabbitMQ credentials: username")
server_opts.add_option("-v", "--vhost", dest="vhost",
help="RabbitMQ credentials: virtual host")
server_opts.add_option("-w", "--password", dest="password",
help="RabbitMQ credentials: password")
parser.add_option_group(server_opts)
(options, args) = parser.parse_args()
commands = ('start', 'stop', 'restart')
types = ('actor', 'a', 'decision', 'd')
command = args[0]
class DaemonRunner(Daemon):
def run(self):
print "Run"
if hasattr(options, "kwargs"):
kwargs = options.kwargs
else:
kwargs = {}
if options.opts:
for opt in options.opts.split(";"):
(k, v) = opt.split("=")
kwargs[k] = v
print "kwargs", kwargs
if options.user and options.password:
kwargs['user'] = options.user
kwargs['vhost'] = options.vhost
kwargs['password'] = options.password
if options.host:
kwargs['host'] = options.host
inst = self.klass(**kwargs)
try:
inst.connect()
except KeyboardInterrupt:
inst.disconnect(reason="keyboard interruption")
def run_actor(obj_id):
print "Starting actor %s" % obj_id
klass = _import('actors', obj_id)
print "Found actor with exchange %s" % klass.id
if options.daemonize:
daemon = DaemonRunner(pid_file('a', klass),
stdout=options.logfile,
stderr=options.logfile)
daemon.klass = klass
daemon.start()
else:
kwargs = {}
if options.user and options.password:
kwargs['user'] = options.user
kwargs['vhost'] = options.vhost
kwargs['password'] = options.password
if options.host:
kwargs['host'] = options.host
if options.opts:
for opt in options.opts.split(";"):
(k, v) = opt.split("=")
kwargs[k] = v
print "kwargs", kwargs
inst = klass(**kwargs)
try:
inst.connect()
except KeyboardInterrupt:
inst.disconnect(reason="keyboard interruption")
print "Done"
def stop_actor(obj_id):
print "Stopping actor %s" % obj_id
klass = _import('actors', obj_id)
daemon = DaemonRunner(pid_file('a', klass))
daemon.stop()
print "Done"
def run_decision(obj_id):
print "Starting decision %s" % obj_id
klass = _import('decisions', obj_id)
print "Found decision with exchange %s" % klass.id
if options.daemonize:
daemon = DaemonRunner(pid_file('d', klass),
stdout=options.logfile,
stderr=options.logfile)
daemon.klass = klass
daemon.start()
else:
kwargs = {}
if options.user and options.password:
kwargs['user'] = options.user
kwargs['vhost'] = options.vhost
kwargs['password'] = options.password
if options.host:
kwargs['host'] = options.host
if options.opts:
for opt in options.opts.split(";"):
(k, v) = opt.split("=")
kwargs[k] = v
print "kwargs", kwargs
inst = klass(**kwargs)
try:
inst.connect()
except KeyboardInterrupt:
inst.disconnect(reason="keyboard interruption")
print "Done"
def stop_decision(obj_id):
print "Stopping decision %s" % obj_id
klass = _import('decisions', obj_id)
daemon = DaemonRunner(pid_file('d', klass))
daemon.stop()
print "Done"
def pid_file(prefix, klass):
pidfile = os.path.join(options.pid_dir, "".join([prefix,
'-',
klass.id,
".pid"]))
pidfile = os.path.abspath(pidfile)
print "PID file: %s" % pidfile
return pidfile
def _import(scope, obj_id):
try:
(_mod, _klass) = obj_id.split('.')
module = __import__('eventbrain.contrib.%s.%s' % (scope,
_mod),
fromlist=[_klass])
klass = getattr(module, _klass)
except Exception, ex:
print "Cannot import class %s\n%r" % (obj_id, ex)
exit(1)
return klass
def from_config():
import ConfigParser
config = ConfigParser.RawConfigParser()
config.readfp(open(options.config))
sections = config.sections()
if config.has_section("Main"):
if config.has_option("Main", "host"):
parser.values.host = config.get("Main", "host")
if config.has_option("Main", "user"):
parser.values.user = config.get("Main", "user")
if config.has_option("Main", "password"):
parser.values.password = config.get("Main", "password")
if config.has_option("Main", "vhost"):
parser.values.vhost = config.get("Main", "vhost")
for section in sections:
print ">>> Found section ", section
if section == "Main":
continue
else:
# Fork to avoid exiting from main thread after daemonizing
fpid = os.fork()
if fpid != 0:
process_section(config, section)
exit(0)
else:
continue
return True
def process_section(config, section):
if config.has_option(section, "type"):
_type = config.get(section, "type")
if _type not in types:
print "Unrecognized type: %s" % _type
return False
kwargs = {}
for item in config.items(section):
if item[0] == "daemonize":
parser.values.daemonize = config.getboolean(section,
"daemonize")
elif item[0] == "pid_dir":
parser.values.pid_dir = item[1]
elif item[0] == "log_file":
parser.values.logfile = item[1]
else:
kwargs[item[0]] = item[1]
print "kwargs", kwargs
parser.values.kwargs = kwargs
if _type in ('actor', 'a'):
if command == "start":
run_actor(section)
elif command == "stop":
stop_actor(section)
elif command == "restart":
stop_actor(section)
run_actor(section)
elif _type in ('decision', 'd'):
if command == "start":
run_decision(section)
elif command == "stop":
stop_decision(section)
elif command == "restart":
stop_decision(section)
run_decision(section)
if __name__ == "__main__":
if options.config:
if from_config():
exit(0)
else:
exit(1)
if not options.type:
print "Type not specified"
exit(1)
if options.type not in types:
print "Unrecognized type: %s" % options.type
exit(1)
if not options.Id:
print "Id not specified"
exit(1)
if not args or args[0] not in commands:
print "Unknown command %s" % ",".join(args)
exit(1)
if options.type in ('actor', 'a'):
# Actor
if command == "start":
run_actor(options.Id)
elif command == "stop":
stop_actor(options.Id)
elif command == "restart":
stop_actor(options.Id)
run_actor(options.Id)
if options.type in ('decision', 'd'):
# Decision
if command == "start":
run_decision(options.Id)
elif command == "stop":
stop_decision(options.Id)
elif command == "restart":
stop_decision(options.Id)
run_decision(options.Id)
| apache-2.0 | -816,246,304,667,039,600 | 31.864686 | 78 | 0.527214 | false | 4.177013 | true | false | false |
BraveSirRobbo/steam-scraper | test.py | 1 | 4225 | import unittest
from bs4 import BeautifulSoup
import re
def justOne(ls):
assert(len(ls) == 1)
return ls[0]
def scrapePage(html_doc):
soup = BeautifulSoup(html_doc, 'html.parser')
ratings = [s.get_text() for s in soup.find_all("span",attrs={ "class": re.compile(r"game_review_summary .*")})]
assert(len(ratings) != 1)
reviewCounts = [x.attrs["content"] for x in soup.find_all("meta",attrs={"itemprop":"reviewCount"})]
aList = [t.get_text() for t in soup.find_all("div",class_="game_area_details_specs")]
def tagChecker(*things):
for thing in things:
if thing in aList:
return True
return False
return { "title":
justOne(soup.find_all("div",class_="apphub_AppName")).get_text()
, "overall_rating" :
ratings[1] if len(ratings) > 0 else None
, "num_reviews" :
reviewCounts[0] if len(reviewCounts) > 0 else None
, "release_year" :
justOne(soup.find_all("span",class_="date")).get_text()[-4:]
, "user_tags" :
[x.get_text().strip() for x in justOne(soup.find_all("div",class_="glance_tags popular_tags")).find_all("a")]
, "multiplayer" :
tagChecker("Multi-player")
, "co-op" :
tagChecker("Co-op")
, "local_multiplayer" :
tagChecker("Shared/Split Screen")
, "steam_cloud" :
tagChecker("Steam Cloud")
, "controller_supported" :
tagChecker("Full controller support", "Partial Controller Support")
}
class ScraperTests(unittest.TestCase):
def assertKeyValue(self, d, key, value):
self.assertIn(key, d)
self.assertEqual(d[key], value)
def test_example_page(self):
with open("examples/Age of Wonders III on Steam.html", "r") as f:
page_text = "".join(f.readlines())
res = scrapePage(page_text)
self.assertKeyValue(res, "title", "Age of Wonders III")
self.assertKeyValue(res, "overall_rating", "Very Positive")
self.assertKeyValue(res, "num_reviews", "3504")
self.assertKeyValue(res, "release_year","2014") #from class "release_date"
self.assertKeyValue(res, "user_tags", ['Strategy', 'Turn-Based Strategy', 'Fantasy', 'RPG', '4X', 'Turn-Based', 'Multiplayer', 'Singleplayer', 'Tactical', 'Co-op', 'Adventure', 'Hex Grid', 'Great Soundtrack', 'Grand Strategy', 'Classic', 'Atmospheric', 'Moddable', 'Action', 'Female Protagonist', 'Indie']) #from class "glance_tags popular_tags"
self.assertKeyValue(res, "multiplayer", True) #"Multi-Player" from class "game_area_details_specs"
self.assertKeyValue(res, "co-op", True) #"Co-op" from class "game_area_details_specs"
self.assertKeyValue(res, "local_multiplayer", True) #"Shared/Split Screen" from class "game_area_details_specs"
self.assertKeyValue(res, "steam_cloud", True) #Cross-Platform Multiplayer from class "game_area_details_specs"
self.assertKeyValue(res, "controller_supported", False) #Full OR Partial Controller Support from class "game_area_details_specs"
def test_no_recent_reviews(self):
with open("examples/No Recent Reviews.html") as f:
page_text = "".join(f.readlines())
res = scrapePage(page_text)
self.assertKeyValue(res,"overall_rating", "Very Positive")
def test_no_reviews(self):
with open("examples/No Reviews.html") as f:
page_text = "".join(f.readlines())
res = scrapePage(page_text)
self.assertKeyValue(res,"overall_rating", None)
self.assertKeyValue(res,"num_reviews", None)
# TODO: Real implementation
def filterGames(ls,q):
return [ls[0]]
# TODO: This is just a silly example
class FilterTests(unittest.TestCase):
def test_basic_filter(self):
examples = [ {"title" : "blah", "overall_rating" : "Very Positive"}
, {"title" : "bad", "overall_rating" : "Very Negative"} ]
# TODO: Will - Do you have ideas about the form of the query input to the filter
q = "overall_rating > Ok"
self.assertEqual(filterGames(examples, q),
[ {"title" : "blah", "overall_rating" : "Very Positive"} ])
if __name__ =="__main__":
unittest.main()
| apache-2.0 | 8,001,148,401,895,838,000 | 35.73913 | 351 | 0.624615 | false | 3.38 | true | false | false |
eljost/pysisyphus | deprecated/tests/test_dynamics/test_dynamics.py | 1 | 2531 | from matplotlib.patches import Circle
import matplotlib.pyplot as plt
import numpy as np
import pytest
from pysisyphus.calculators.AnaPot import AnaPot
from pysisyphus.dynamics.velocity_verlet import md
def test_velocity_verlet():
geom = AnaPot.get_geom((0.52, 1.80, 0))
x0 = geom.coords.copy()
v0 = .1 * np.random.rand(*geom.coords.shape)
t = 3
dts = (.005, .01, .02, .04, .08)
all_xs = list()
for dt in dts:
geom.coords = x0.copy()
md_kwargs = {
"v0": v0.copy(),
"t": t,
"dt": dt,
}
md_result = md(geom, **md_kwargs)
all_xs.append(md_result.coords)
calc = geom.calculator
calc.plot()
ax = calc.ax
for dt, xs in zip(dts, all_xs):
ax.plot(*xs.T[:2], "o-", label=f"dt={dt:.3f}")
# ax.plot(*xs.T[:2], "-", label=f"dt={dt:.3f}")
ax.legend()
plt.show()
def ase_md_playground():
geom = AnaPot.get_geom((0.52, 1.80, 0), atoms=("H", ))
atoms = geom.as_ase_atoms()
# ase_calc = FakeASE(geom.calculator)
# from ase.optimize import BFGS
# dyn = BFGS(atoms)
# dyn.run(fmax=0.05)
import ase
from ase import units
from ase.io.trajectory import Trajectory
from ase.md.velocitydistribution import MaxwellBoltzmannDistribution
from ase.md.verlet import VelocityVerlet
MaxwellBoltzmannDistribution(atoms, 300 * units.kB)
momenta = atoms.get_momenta()
momenta[0, 2] = 0.
# Zero 3rd dimension
atoms.set_momenta(momenta)
dyn = VelocityVerlet(atoms, .005 * units.fs) # 5 fs time step.
def printenergy(a):
"""Function to print the potential, kinetic and total energy"""
epot = a.get_potential_energy() / len(a)
ekin = a.get_kinetic_energy() / len(a)
print('Energy per atom: Epot = %.3feV Ekin = %.3feV (T=%3.0fK) '
'Etot = %.3feV' % (epot, ekin, ekin / (1.5 * units.kB), epot + ekin))
# Now run the dynamics
printenergy(atoms)
traj_fn = 'asemd.traj'
traj = Trajectory(traj_fn, 'w', atoms)
dyn.attach(traj.write, interval=5)
# dyn.attach(bumms().bimms, interval=1)
dyn.run(10000)
printenergy(atoms)
traj.close()
traj = ase.io.read(traj_fn+"@:")#, "r")
pos = [a.get_positions() for a in traj]
from pysisyphus.constants import BOHR2ANG
pos = np.array(pos) / BOHR2ANG
calc = geom.calculator
calc.plot()
ax = calc.ax
ax.plot(*pos[:,0,:2].T)
plt.show()
if __name__ == "__main__":
ase_md_playground()
| gpl-3.0 | -5,884,419,591,824,381,000 | 26.51087 | 83 | 0.591861 | false | 2.831096 | false | false | false |
yashsharan/sympy | sympy/calculus/euler.py | 16 | 3263 | """
This module implements a method to find
Euler-Lagrange Equations for given Lagrangian.
"""
from itertools import combinations_with_replacement
from sympy import Function, sympify, diff, Eq, S, Symbol, Derivative
from sympy.core.compatibility import (iterable, range)
def euler_equations(L, funcs=(), vars=()):
r"""
Find the Euler-Lagrange equations [1]_ for a given Lagrangian.
Parameters
==========
L : Expr
The Lagrangian that should be a function of the functions listed
in the second argument and their derivatives.
For example, in the case of two functions `f(x,y)`, `g(x,y)` and
two independent variables `x`, `y` the Lagrangian would have the form:
.. math:: L\left(f(x,y),g(x,y),\frac{\partial f(x,y)}{\partial x},
\frac{\partial f(x,y)}{\partial y},
\frac{\partial g(x,y)}{\partial x},
\frac{\partial g(x,y)}{\partial y},x,y\right)
In many cases it is not necessary to provide anything, except the
Lagrangian, it will be auto-detected (and an error raised if this
couldn't be done).
funcs : Function or an iterable of Functions
The functions that the Lagrangian depends on. The Euler equations
are differential equations for each of these functions.
vars : Symbol or an iterable of Symbols
The Symbols that are the independent variables of the functions.
Returns
=======
eqns : list of Eq
The list of differential equations, one for each function.
Examples
========
>>> from sympy import Symbol, Function
>>> from sympy.calculus.euler import euler_equations
>>> x = Function('x')
>>> t = Symbol('t')
>>> L = (x(t).diff(t))**2/2 - x(t)**2/2
>>> euler_equations(L, x(t), t)
[Eq(-x(t) - Derivative(x(t), t, t), 0)]
>>> u = Function('u')
>>> x = Symbol('x')
>>> L = (u(t, x).diff(t))**2/2 - (u(t, x).diff(x))**2/2
>>> euler_equations(L, u(t, x), [t, x])
[Eq(-Derivative(u(t, x), t, t) + Derivative(u(t, x), x, x), 0)]
References
==========
.. [1] http://en.wikipedia.org/wiki/Euler%E2%80%93Lagrange_equation
"""
funcs = tuple(funcs) if iterable(funcs) else (funcs,)
if not funcs:
funcs = tuple(L.atoms(Function))
else:
for f in funcs:
if not isinstance(f, Function):
raise TypeError('Function expected, got: %s' % f)
vars = tuple(vars) if iterable(vars) else (vars,)
if not vars:
vars = funcs[0].args
else:
vars = tuple(sympify(var) for var in vars)
if not all(isinstance(v, Symbol) for v in vars):
raise TypeError('Variables are not symbols, got %s' % vars)
for f in funcs:
if not vars == f.args:
raise ValueError("Variables %s don't match args: %s" % (vars, f))
order = max(len(d.variables) for d in L.atoms(Derivative)
if d.expr in funcs)
eqns = []
for f in funcs:
eq = diff(L, f)
for i in range(1, order + 1):
for p in combinations_with_replacement(vars, i):
eq = eq + S.NegativeOne**i*diff(L, diff(f, *p), *p)
eqns.append(Eq(eq))
return eqns
| bsd-3-clause | 1,317,070,981,513,880,300 | 30.679612 | 78 | 0.579835 | false | 3.478678 | false | false | false |
adit-chandra/tensorflow | tensorflow/python/keras/applications/imagenet_utils.py | 5 | 1439 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for ImageNet data preprocessing & prediction decoding.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras_applications import imagenet_utils
from tensorflow.python.keras.applications import keras_modules_injection
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.applications.imagenet_utils.decode_predictions')
@keras_modules_injection
def decode_predictions(*args, **kwargs):
return imagenet_utils.decode_predictions(*args, **kwargs)
@keras_export('keras.applications.imagenet_utils.preprocess_input')
@keras_modules_injection
def preprocess_input(*args, **kwargs):
return imagenet_utils.preprocess_input(*args, **kwargs)
| apache-2.0 | 5,496,285,182,079,640,000 | 38.972222 | 80 | 0.737318 | false | 4.195335 | false | false | false |
fibbo/DIRAC | Interfaces/scripts/dirac-wms-job-attributes.py | 8 | 1197 | #!/usr/bin/env python
########################################################################
# $HeadURL$
# File : dirac-wms-job-attributes
# Author : Stuart Paterson
########################################################################
"""
Retrieve attributes associated with the given DIRAC job
"""
__RCSID__ = "$Id$"
import DIRAC
from DIRAC.Core.Base import Script
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1],
'Usage:',
' %s [option|cfgfile] ... JobID ...' % Script.scriptName,
'Arguments:',
' JobID: DIRAC Job ID' ] ) )
Script.parseCommandLine( ignoreErrors = True )
args = Script.getPositionalArgs()
if len( args ) < 1:
Script.showHelp()
from DIRAC.Interfaces.API.Dirac import Dirac
dirac = Dirac()
exitCode = 0
errorList = []
for job in args:
result = dirac.attributes( int(job), printOutput = True )
if not result['OK']:
errorList.append( ( job, result['Message'] ) )
exitCode = 2
for error in errorList:
print "ERROR %s: %s" % error
DIRAC.exit( exitCode )
| gpl-3.0 | 6,073,621,364,408,975,000 | 28.925 | 95 | 0.482038 | false | 4.043919 | false | false | false |
nikhil93uf/Qemu | scripts/tracetool/backend/simple.py | 97 | 2669 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Simple built-in backend.
"""
__author__ = "Lluís Vilanova <[email protected]>"
__copyright__ = "Copyright 2012-2014, Lluís Vilanova <[email protected]>"
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "[email protected]"
from tracetool import out
PUBLIC = True
def is_string(arg):
strtype = ('const char*', 'char*', 'const char *', 'char *')
if arg.lstrip().startswith(strtype):
return True
else:
return False
def generate_h_begin(events):
for event in events:
out('void _simple_%(api)s(%(args)s);',
api=event.api(),
args=event.args)
out('')
def generate_h(event):
out(' _simple_%(api)s(%(args)s);',
api=event.api(),
args=", ".join(event.args.names()))
def generate_c_begin(events):
out('#include "trace.h"',
'#include "trace/control.h"',
'#include "trace/simple.h"',
'')
def generate_c(event):
out('void _simple_%(api)s(%(args)s)',
'{',
' TraceBufferRecord rec;',
api=event.api(),
args=event.args)
sizes = []
for type_, name in event.args:
if is_string(type_):
out(' size_t arg%(name)s_len = %(name)s ? MIN(strlen(%(name)s), MAX_TRACE_STRLEN) : 0;',
name=name)
strsizeinfo = "4 + arg%s_len" % name
sizes.append(strsizeinfo)
else:
sizes.append("8")
sizestr = " + ".join(sizes)
if len(event.args) == 0:
sizestr = '0'
out('',
' if (!trace_event_get_state(%(event_id)s)) {',
' return;',
' }',
'',
' if (trace_record_start(&rec, %(event_id)s, %(size_str)s)) {',
' return; /* Trace Buffer Full, Event Dropped ! */',
' }',
event_id='TRACE_' + event.name.upper(),
size_str=sizestr)
if len(event.args) > 0:
for type_, name in event.args:
# string
if is_string(type_):
out(' trace_record_write_str(&rec, %(name)s, arg%(name)s_len);',
name=name)
# pointer var (not string)
elif type_.endswith('*'):
out(' trace_record_write_u64(&rec, (uintptr_t)(uint64_t *)%(name)s);',
name=name)
# primitive data type
else:
out(' trace_record_write_u64(&rec, (uint64_t)%(name)s);',
name=name)
out(' trace_record_finish(&rec);',
'}',
'')
| gpl-2.0 | -3,565,729,492,435,361,000 | 25.939394 | 103 | 0.493063 | false | 3.350503 | false | false | false |
tod31/pyload | module/plugins/hoster/RPNetBiz.py | 5 | 3545 | # -*- coding: utf-8 -*-
import re
from module.plugins.internal.MultiHoster import MultiHoster
from module.plugins.internal.misc import json
class RPNetBiz(MultiHoster):
__name__ = "RPNetBiz"
__type__ = "hoster"
__version__ = "0.20"
__status__ = "testing"
__pattern__ = r'https?://.+rpnet\.biz'
__config__ = [("activated" , "bool", "Activated" , True ),
("use_premium" , "bool", "Use premium account if available" , True ),
("fallback" , "bool", "Fallback to free download if premium fails" , False),
("chk_filesize", "bool", "Check file size" , True ),
("max_wait" , "int" , "Reconnect if waiting time is greater than minutes", 10 ),
("revertfailed", "bool", "Revert to standard download if fails" , True )]
__description__ = """RPNet.biz multi-hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("Dman", "[email protected]")]
def setup(self):
self.chunk_limit = -1
def handle_premium(self, pyfile):
user, info = self.account.select()
res = self.load("https://premium.rpnet.biz/client_api.php",
get={'username': user,
'password': info['login']['password'],
'action' : "generate",
'links' : pyfile.url})
self.log_debug("JSON data: %s" % res)
link_status = json.loads(res)['links'][0] #: Get the first link... since we only queried one
#: Check if we only have an id as a HDD link
if 'id' in link_status:
self.log_debug("Need to wait at least 30 seconds before requery")
self.wait(30) #: Wait for 30 seconds
#: Lets query the server again asking for the status on the link,
#: We need to keep doing this until we reach 100
attemps = 30
my_try = 0
while (my_try <= attemps):
self.log_debug("Try: %d ; Max Tries: %d" % (my_try, attemps))
res = self.load("https://premium.rpnet.biz/client_api.php",
get={'username': user,
'password': info['login']['password'],
'action' : "downloadInformation",
'id' : link_status['id']})
self.log_debug("JSON data hdd query: %s" % res)
download_status = json.loads(res)['download']
if download_status['status'] == "100":
link_status['generated'] = download_status['rpnet_link']
self.log_debug("Successfully downloaded to rpnet HDD: %s" % link_status['generated'])
break
else:
self.log_debug("At %s%% for the file download" % download_status['status'])
self.wait(30)
my_try += 1
if my_try > attemps: #: We went over the limit!
self.fail(_("Waited for about 15 minutes for download to finish but failed"))
if 'generated' in link_status:
self.link = link_status['generated']
return
elif 'error' in link_status:
self.fail(link_status['error'])
else:
self.fail(_("Something went wrong, not supposed to enter here"))
| gpl-3.0 | 5,885,631,327,325,925,000 | 42.765432 | 105 | 0.490268 | false | 4.151054 | false | false | false |
Hellowlol/HTPC-Manager | libs/mako/_ast_util.py | 60 | 25652 | # mako/_ast_util.py
# Copyright (C) 2006-2015 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
ast
~~~
The `ast` module helps Python applications to process trees of the Python
abstract syntax grammar. The abstract syntax itself might change with
each Python release; this module helps to find out programmatically what
the current grammar looks like and allows modifications of it.
An abstract syntax tree can be generated by passing `ast.PyCF_ONLY_AST` as
a flag to the `compile()` builtin function or by using the `parse()`
function from this module. The result will be a tree of objects whose
classes all inherit from `ast.AST`.
A modified abstract syntax tree can be compiled into a Python code object
using the built-in `compile()` function.
Additionally various helper functions are provided that make working with
the trees simpler. The main intention of the helper functions and this
module in general is to provide an easy to use interface for libraries
that work tightly with the python syntax (template engines for example).
:copyright: Copyright 2008 by Armin Ronacher.
:license: Python License.
"""
from _ast import *
from mako.compat import arg_stringname
BOOLOP_SYMBOLS = {
And: 'and',
Or: 'or'
}
BINOP_SYMBOLS = {
Add: '+',
Sub: '-',
Mult: '*',
Div: '/',
FloorDiv: '//',
Mod: '%',
LShift: '<<',
RShift: '>>',
BitOr: '|',
BitAnd: '&',
BitXor: '^'
}
CMPOP_SYMBOLS = {
Eq: '==',
Gt: '>',
GtE: '>=',
In: 'in',
Is: 'is',
IsNot: 'is not',
Lt: '<',
LtE: '<=',
NotEq: '!=',
NotIn: 'not in'
}
UNARYOP_SYMBOLS = {
Invert: '~',
Not: 'not',
UAdd: '+',
USub: '-'
}
ALL_SYMBOLS = {}
ALL_SYMBOLS.update(BOOLOP_SYMBOLS)
ALL_SYMBOLS.update(BINOP_SYMBOLS)
ALL_SYMBOLS.update(CMPOP_SYMBOLS)
ALL_SYMBOLS.update(UNARYOP_SYMBOLS)
def parse(expr, filename='<unknown>', mode='exec'):
"""Parse an expression into an AST node."""
return compile(expr, filename, mode, PyCF_ONLY_AST)
def to_source(node, indent_with=' ' * 4):
"""
This function can convert a node tree back into python sourcecode. This
is useful for debugging purposes, especially if you're dealing with custom
asts not generated by python itself.
It could be that the sourcecode is evaluable when the AST itself is not
compilable / evaluable. The reason for this is that the AST contains some
more data than regular sourcecode does, which is dropped during
conversion.
Each level of indentation is replaced with `indent_with`. Per default this
parameter is equal to four spaces as suggested by PEP 8, but it might be
adjusted to match the application's styleguide.
"""
generator = SourceGenerator(indent_with)
generator.visit(node)
return ''.join(generator.result)
def dump(node):
"""
A very verbose representation of the node passed. This is useful for
debugging purposes.
"""
def _format(node):
if isinstance(node, AST):
return '%s(%s)' % (node.__class__.__name__,
', '.join('%s=%s' % (a, _format(b))
for a, b in iter_fields(node)))
elif isinstance(node, list):
return '[%s]' % ', '.join(_format(x) for x in node)
return repr(node)
if not isinstance(node, AST):
raise TypeError('expected AST, got %r' % node.__class__.__name__)
return _format(node)
def copy_location(new_node, old_node):
"""
Copy the source location hint (`lineno` and `col_offset`) from the
old to the new node if possible and return the new one.
"""
for attr in 'lineno', 'col_offset':
if attr in old_node._attributes and attr in new_node._attributes \
and hasattr(old_node, attr):
setattr(new_node, attr, getattr(old_node, attr))
return new_node
def fix_missing_locations(node):
"""
Some nodes require a line number and the column offset. Without that
information the compiler will abort the compilation. Because it can be
a dull task to add appropriate line numbers and column offsets when
adding new nodes this function can help. It copies the line number and
column offset of the parent node to the child nodes without this
information.
Unlike `copy_location` this works recursive and won't touch nodes that
already have a location information.
"""
def _fix(node, lineno, col_offset):
if 'lineno' in node._attributes:
if not hasattr(node, 'lineno'):
node.lineno = lineno
else:
lineno = node.lineno
if 'col_offset' in node._attributes:
if not hasattr(node, 'col_offset'):
node.col_offset = col_offset
else:
col_offset = node.col_offset
for child in iter_child_nodes(node):
_fix(child, lineno, col_offset)
_fix(node, 1, 0)
return node
def increment_lineno(node, n=1):
"""
Increment the line numbers of all nodes by `n` if they have line number
attributes. This is useful to "move code" to a different location in a
file.
"""
for node in zip((node,), walk(node)):
if 'lineno' in node._attributes:
node.lineno = getattr(node, 'lineno', 0) + n
def iter_fields(node):
"""Iterate over all fields of a node, only yielding existing fields."""
# CPython 2.5 compat
if not hasattr(node, '_fields') or not node._fields:
return
for field in node._fields:
try:
yield field, getattr(node, field)
except AttributeError:
pass
def get_fields(node):
"""Like `iter_fiels` but returns a dict."""
return dict(iter_fields(node))
def iter_child_nodes(node):
"""Iterate over all child nodes or a node."""
for name, field in iter_fields(node):
if isinstance(field, AST):
yield field
elif isinstance(field, list):
for item in field:
if isinstance(item, AST):
yield item
def get_child_nodes(node):
"""Like `iter_child_nodes` but returns a list."""
return list(iter_child_nodes(node))
def get_compile_mode(node):
"""
Get the mode for `compile` of a given node. If the node is not a `mod`
node (`Expression`, `Module` etc.) a `TypeError` is thrown.
"""
if not isinstance(node, mod):
raise TypeError('expected mod node, got %r' % node.__class__.__name__)
return {
Expression: 'eval',
Interactive: 'single'
}.get(node.__class__, 'expr')
def get_docstring(node):
"""
Return the docstring for the given node or `None` if no docstring can be
found. If the node provided does not accept docstrings a `TypeError`
will be raised.
"""
if not isinstance(node, (FunctionDef, ClassDef, Module)):
raise TypeError("%r can't have docstrings" % node.__class__.__name__)
if node.body and isinstance(node.body[0], Str):
return node.body[0].s
def walk(node):
"""
Iterate over all nodes. This is useful if you only want to modify nodes in
place and don't care about the context or the order the nodes are returned.
"""
from collections import deque
todo = deque([node])
while todo:
node = todo.popleft()
todo.extend(iter_child_nodes(node))
yield node
class NodeVisitor(object):
"""
Walks the abstract syntax tree and call visitor functions for every node
found. The visitor functions may return values which will be forwarded
by the `visit` method.
Per default the visitor functions for the nodes are ``'visit_'`` +
class name of the node. So a `TryFinally` node visit function would
be `visit_TryFinally`. This behavior can be changed by overriding
the `get_visitor` function. If no visitor function exists for a node
(return value `None`) the `generic_visit` visitor is used instead.
Don't use the `NodeVisitor` if you want to apply changes to nodes during
traversing. For this a special visitor exists (`NodeTransformer`) that
allows modifications.
"""
def get_visitor(self, node):
"""
Return the visitor function for this node or `None` if no visitor
exists for this node. In that case the generic visit function is
used instead.
"""
method = 'visit_' + node.__class__.__name__
return getattr(self, method, None)
def visit(self, node):
"""Visit a node."""
f = self.get_visitor(node)
if f is not None:
return f(node)
return self.generic_visit(node)
def generic_visit(self, node):
"""Called if no explicit visitor function exists for a node."""
for field, value in iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, AST):
self.visit(item)
elif isinstance(value, AST):
self.visit(value)
class NodeTransformer(NodeVisitor):
"""
Walks the abstract syntax tree and allows modifications of nodes.
The `NodeTransformer` will walk the AST and use the return value of the
visitor functions to replace or remove the old node. If the return
value of the visitor function is `None` the node will be removed
from the previous location otherwise it's replaced with the return
value. The return value may be the original node in which case no
replacement takes place.
Here an example transformer that rewrites all `foo` to `data['foo']`::
class RewriteName(NodeTransformer):
def visit_Name(self, node):
return copy_location(Subscript(
value=Name(id='data', ctx=Load()),
slice=Index(value=Str(s=node.id)),
ctx=node.ctx
), node)
Keep in mind that if the node you're operating on has child nodes
you must either transform the child nodes yourself or call the generic
visit function for the node first.
Nodes that were part of a collection of statements (that applies to
all statement nodes) may also return a list of nodes rather than just
a single node.
Usually you use the transformer like this::
node = YourTransformer().visit(node)
"""
def generic_visit(self, node):
for field, old_value in iter_fields(node):
old_value = getattr(node, field, None)
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, AST):
value = self.visit(value)
if value is None:
continue
elif not isinstance(value, AST):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, AST):
new_node = self.visit(old_value)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
class SourceGenerator(NodeVisitor):
"""
This visitor is able to transform a well formed syntax tree into python
sourcecode. For more details have a look at the docstring of the
`node_to_source` function.
"""
def __init__(self, indent_with):
self.result = []
self.indent_with = indent_with
self.indentation = 0
self.new_lines = 0
def write(self, x):
if self.new_lines:
if self.result:
self.result.append('\n' * self.new_lines)
self.result.append(self.indent_with * self.indentation)
self.new_lines = 0
self.result.append(x)
def newline(self, n=1):
self.new_lines = max(self.new_lines, n)
def body(self, statements):
self.new_line = True
self.indentation += 1
for stmt in statements:
self.visit(stmt)
self.indentation -= 1
def body_or_else(self, node):
self.body(node.body)
if node.orelse:
self.newline()
self.write('else:')
self.body(node.orelse)
def signature(self, node):
want_comma = []
def write_comma():
if want_comma:
self.write(', ')
else:
want_comma.append(True)
padding = [None] * (len(node.args) - len(node.defaults))
for arg, default in zip(node.args, padding + node.defaults):
write_comma()
self.visit(arg)
if default is not None:
self.write('=')
self.visit(default)
if node.vararg is not None:
write_comma()
self.write('*' + arg_stringname(node.vararg))
if node.kwarg is not None:
write_comma()
self.write('**' + arg_stringname(node.kwarg))
def decorators(self, node):
for decorator in node.decorator_list:
self.newline()
self.write('@')
self.visit(decorator)
# Statements
def visit_Assign(self, node):
self.newline()
for idx, target in enumerate(node.targets):
if idx:
self.write(', ')
self.visit(target)
self.write(' = ')
self.visit(node.value)
def visit_AugAssign(self, node):
self.newline()
self.visit(node.target)
self.write(BINOP_SYMBOLS[type(node.op)] + '=')
self.visit(node.value)
def visit_ImportFrom(self, node):
self.newline()
self.write('from %s%s import ' % ('.' * node.level, node.module))
for idx, item in enumerate(node.names):
if idx:
self.write(', ')
self.write(item)
def visit_Import(self, node):
self.newline()
for item in node.names:
self.write('import ')
self.visit(item)
def visit_Expr(self, node):
self.newline()
self.generic_visit(node)
def visit_FunctionDef(self, node):
self.newline(n=2)
self.decorators(node)
self.newline()
self.write('def %s(' % node.name)
self.signature(node.args)
self.write('):')
self.body(node.body)
def visit_ClassDef(self, node):
have_args = []
def paren_or_comma():
if have_args:
self.write(', ')
else:
have_args.append(True)
self.write('(')
self.newline(n=3)
self.decorators(node)
self.newline()
self.write('class %s' % node.name)
for base in node.bases:
paren_or_comma()
self.visit(base)
# XXX: the if here is used to keep this module compatible
# with python 2.6.
if hasattr(node, 'keywords'):
for keyword in node.keywords:
paren_or_comma()
self.write(keyword.arg + '=')
self.visit(keyword.value)
if node.starargs is not None:
paren_or_comma()
self.write('*')
self.visit(node.starargs)
if node.kwargs is not None:
paren_or_comma()
self.write('**')
self.visit(node.kwargs)
self.write(have_args and '):' or ':')
self.body(node.body)
def visit_If(self, node):
self.newline()
self.write('if ')
self.visit(node.test)
self.write(':')
self.body(node.body)
while True:
else_ = node.orelse
if len(else_) == 1 and isinstance(else_[0], If):
node = else_[0]
self.newline()
self.write('elif ')
self.visit(node.test)
self.write(':')
self.body(node.body)
else:
self.newline()
self.write('else:')
self.body(else_)
break
def visit_For(self, node):
self.newline()
self.write('for ')
self.visit(node.target)
self.write(' in ')
self.visit(node.iter)
self.write(':')
self.body_or_else(node)
def visit_While(self, node):
self.newline()
self.write('while ')
self.visit(node.test)
self.write(':')
self.body_or_else(node)
def visit_With(self, node):
self.newline()
self.write('with ')
self.visit(node.context_expr)
if node.optional_vars is not None:
self.write(' as ')
self.visit(node.optional_vars)
self.write(':')
self.body(node.body)
def visit_Pass(self, node):
self.newline()
self.write('pass')
def visit_Print(self, node):
# XXX: python 2.6 only
self.newline()
self.write('print ')
want_comma = False
if node.dest is not None:
self.write(' >> ')
self.visit(node.dest)
want_comma = True
for value in node.values:
if want_comma:
self.write(', ')
self.visit(value)
want_comma = True
if not node.nl:
self.write(',')
def visit_Delete(self, node):
self.newline()
self.write('del ')
for idx, target in enumerate(node):
if idx:
self.write(', ')
self.visit(target)
def visit_TryExcept(self, node):
self.newline()
self.write('try:')
self.body(node.body)
for handler in node.handlers:
self.visit(handler)
def visit_TryFinally(self, node):
self.newline()
self.write('try:')
self.body(node.body)
self.newline()
self.write('finally:')
self.body(node.finalbody)
def visit_Global(self, node):
self.newline()
self.write('global ' + ', '.join(node.names))
def visit_Nonlocal(self, node):
self.newline()
self.write('nonlocal ' + ', '.join(node.names))
def visit_Return(self, node):
self.newline()
self.write('return ')
self.visit(node.value)
def visit_Break(self, node):
self.newline()
self.write('break')
def visit_Continue(self, node):
self.newline()
self.write('continue')
def visit_Raise(self, node):
# XXX: Python 2.6 / 3.0 compatibility
self.newline()
self.write('raise')
if hasattr(node, 'exc') and node.exc is not None:
self.write(' ')
self.visit(node.exc)
if node.cause is not None:
self.write(' from ')
self.visit(node.cause)
elif hasattr(node, 'type') and node.type is not None:
self.visit(node.type)
if node.inst is not None:
self.write(', ')
self.visit(node.inst)
if node.tback is not None:
self.write(', ')
self.visit(node.tback)
# Expressions
def visit_Attribute(self, node):
self.visit(node.value)
self.write('.' + node.attr)
def visit_Call(self, node):
want_comma = []
def write_comma():
if want_comma:
self.write(', ')
else:
want_comma.append(True)
self.visit(node.func)
self.write('(')
for arg in node.args:
write_comma()
self.visit(arg)
for keyword in node.keywords:
write_comma()
self.write(keyword.arg + '=')
self.visit(keyword.value)
if node.starargs is not None:
write_comma()
self.write('*')
self.visit(node.starargs)
if node.kwargs is not None:
write_comma()
self.write('**')
self.visit(node.kwargs)
self.write(')')
def visit_Name(self, node):
self.write(node.id)
def visit_NameConstant(self, node):
self.write(str(node.value))
def visit_arg(self, node):
self.write(node.arg)
def visit_Str(self, node):
self.write(repr(node.s))
def visit_Bytes(self, node):
self.write(repr(node.s))
def visit_Num(self, node):
self.write(repr(node.n))
def visit_Tuple(self, node):
self.write('(')
idx = -1
for idx, item in enumerate(node.elts):
if idx:
self.write(', ')
self.visit(item)
self.write(idx and ')' or ',)')
def sequence_visit(left, right):
def visit(self, node):
self.write(left)
for idx, item in enumerate(node.elts):
if idx:
self.write(', ')
self.visit(item)
self.write(right)
return visit
visit_List = sequence_visit('[', ']')
visit_Set = sequence_visit('{', '}')
del sequence_visit
def visit_Dict(self, node):
self.write('{')
for idx, (key, value) in enumerate(zip(node.keys, node.values)):
if idx:
self.write(', ')
self.visit(key)
self.write(': ')
self.visit(value)
self.write('}')
def visit_BinOp(self, node):
self.write('(')
self.visit(node.left)
self.write(' %s ' % BINOP_SYMBOLS[type(node.op)])
self.visit(node.right)
self.write(')')
def visit_BoolOp(self, node):
self.write('(')
for idx, value in enumerate(node.values):
if idx:
self.write(' %s ' % BOOLOP_SYMBOLS[type(node.op)])
self.visit(value)
self.write(')')
def visit_Compare(self, node):
self.write('(')
self.visit(node.left)
for op, right in zip(node.ops, node.comparators):
self.write(' %s ' % CMPOP_SYMBOLS[type(op)])
self.visit(right)
self.write(')')
def visit_UnaryOp(self, node):
self.write('(')
op = UNARYOP_SYMBOLS[type(node.op)]
self.write(op)
if op == 'not':
self.write(' ')
self.visit(node.operand)
self.write(')')
def visit_Subscript(self, node):
self.visit(node.value)
self.write('[')
self.visit(node.slice)
self.write(']')
def visit_Slice(self, node):
if node.lower is not None:
self.visit(node.lower)
self.write(':')
if node.upper is not None:
self.visit(node.upper)
if node.step is not None:
self.write(':')
if not (isinstance(node.step, Name) and node.step.id == 'None'):
self.visit(node.step)
def visit_ExtSlice(self, node):
for idx, item in node.dims:
if idx:
self.write(', ')
self.visit(item)
def visit_Yield(self, node):
self.write('yield ')
self.visit(node.value)
def visit_Lambda(self, node):
self.write('lambda ')
self.signature(node.args)
self.write(': ')
self.visit(node.body)
def visit_Ellipsis(self, node):
self.write('Ellipsis')
def generator_visit(left, right):
def visit(self, node):
self.write(left)
self.visit(node.elt)
for comprehension in node.generators:
self.visit(comprehension)
self.write(right)
return visit
visit_ListComp = generator_visit('[', ']')
visit_GeneratorExp = generator_visit('(', ')')
visit_SetComp = generator_visit('{', '}')
del generator_visit
def visit_DictComp(self, node):
self.write('{')
self.visit(node.key)
self.write(': ')
self.visit(node.value)
for comprehension in node.generators:
self.visit(comprehension)
self.write('}')
def visit_IfExp(self, node):
self.visit(node.body)
self.write(' if ')
self.visit(node.test)
self.write(' else ')
self.visit(node.orelse)
def visit_Starred(self, node):
self.write('*')
self.visit(node.value)
def visit_Repr(self, node):
# XXX: python 2.6 only
self.write('`')
self.visit(node.value)
self.write('`')
# Helper Nodes
def visit_alias(self, node):
self.write(node.name)
if node.asname is not None:
self.write(' as ' + node.asname)
def visit_comprehension(self, node):
self.write(' for ')
self.visit(node.target)
self.write(' in ')
self.visit(node.iter)
if node.ifs:
for if_ in node.ifs:
self.write(' if ')
self.visit(if_)
def visit_excepthandler(self, node):
self.newline()
self.write('except')
if node.type is not None:
self.write(' ')
self.visit(node.type)
if node.name is not None:
self.write(' as ')
self.visit(node.name)
self.write(':')
self.body(node.body)
| mit | 607,937,448,600,968,600 | 29.357396 | 79 | 0.555668 | false | 4.031432 | false | false | false |
40223117cda/cda | static/Brython3.1.1-20150328-091302/Lib/test/test_int.py | 765 | 12587 | import sys
import unittest
from test.support import run_unittest
L = [
('0', 0),
('1', 1),
('9', 9),
('10', 10),
('99', 99),
('100', 100),
('314', 314),
(' 314', 314),
('314 ', 314),
(' \t\t 314 \t\t ', 314),
(repr(sys.maxsize), sys.maxsize),
(' 1x', ValueError),
(' 1 ', 1),
(' 1\02 ', ValueError),
('', ValueError),
(' ', ValueError),
(' \t\t ', ValueError),
("\u0200", ValueError)
]
class IntTestCases(unittest.TestCase):
def test_basic(self):
self.assertEqual(int(314), 314)
self.assertEqual(int(3.14), 3)
# Check that conversion from float truncates towards zero
self.assertEqual(int(-3.14), -3)
self.assertEqual(int(3.9), 3)
self.assertEqual(int(-3.9), -3)
self.assertEqual(int(3.5), 3)
self.assertEqual(int(-3.5), -3)
self.assertEqual(int("-3"), -3)
self.assertEqual(int(" -3 "), -3)
self.assertEqual(int("\N{EM SPACE}-3\N{EN SPACE}"), -3)
# Different base:
self.assertEqual(int("10",16), 16)
# Test conversion from strings and various anomalies
for s, v in L:
for sign in "", "+", "-":
for prefix in "", " ", "\t", " \t\t ":
ss = prefix + sign + s
vv = v
if sign == "-" and v is not ValueError:
vv = -v
try:
self.assertEqual(int(ss), vv)
except ValueError:
pass
s = repr(-1-sys.maxsize)
x = int(s)
self.assertEqual(x+1, -sys.maxsize)
self.assertIsInstance(x, int)
# should return int
self.assertEqual(int(s[1:]), sys.maxsize+1)
# should return int
x = int(1e100)
self.assertIsInstance(x, int)
x = int(-1e100)
self.assertIsInstance(x, int)
# SF bug 434186: 0x80000000/2 != 0x80000000>>1.
# Worked by accident in Windows release build, but failed in debug build.
# Failed in all Linux builds.
x = -1-sys.maxsize
self.assertEqual(x >> 1, x//2)
self.assertRaises(ValueError, int, '123\0')
self.assertRaises(ValueError, int, '53', 40)
# SF bug 1545497: embedded NULs were not detected with
# explicit base
self.assertRaises(ValueError, int, '123\0', 10)
self.assertRaises(ValueError, int, '123\x00 245', 20)
x = int('1' * 600)
self.assertIsInstance(x, int)
self.assertRaises(TypeError, int, 1, 12)
self.assertEqual(int('0o123', 0), 83)
self.assertEqual(int('0x123', 16), 291)
# Bug 1679: "0x" is not a valid hex literal
self.assertRaises(ValueError, int, "0x", 16)
self.assertRaises(ValueError, int, "0x", 0)
self.assertRaises(ValueError, int, "0o", 8)
self.assertRaises(ValueError, int, "0o", 0)
self.assertRaises(ValueError, int, "0b", 2)
self.assertRaises(ValueError, int, "0b", 0)
# Bug #3236: Return small longs from PyLong_FromString
self.assertTrue(int("10") is 10)
self.assertTrue(int("-1") is -1)
# SF bug 1334662: int(string, base) wrong answers
# Various representations of 2**32 evaluated to 0
# rather than 2**32 in previous versions
self.assertEqual(int('100000000000000000000000000000000', 2), 4294967296)
self.assertEqual(int('102002022201221111211', 3), 4294967296)
self.assertEqual(int('10000000000000000', 4), 4294967296)
self.assertEqual(int('32244002423141', 5), 4294967296)
self.assertEqual(int('1550104015504', 6), 4294967296)
self.assertEqual(int('211301422354', 7), 4294967296)
self.assertEqual(int('40000000000', 8), 4294967296)
self.assertEqual(int('12068657454', 9), 4294967296)
self.assertEqual(int('4294967296', 10), 4294967296)
self.assertEqual(int('1904440554', 11), 4294967296)
self.assertEqual(int('9ba461594', 12), 4294967296)
self.assertEqual(int('535a79889', 13), 4294967296)
self.assertEqual(int('2ca5b7464', 14), 4294967296)
self.assertEqual(int('1a20dcd81', 15), 4294967296)
self.assertEqual(int('100000000', 16), 4294967296)
self.assertEqual(int('a7ffda91', 17), 4294967296)
self.assertEqual(int('704he7g4', 18), 4294967296)
self.assertEqual(int('4f5aff66', 19), 4294967296)
self.assertEqual(int('3723ai4g', 20), 4294967296)
self.assertEqual(int('281d55i4', 21), 4294967296)
self.assertEqual(int('1fj8b184', 22), 4294967296)
self.assertEqual(int('1606k7ic', 23), 4294967296)
self.assertEqual(int('mb994ag', 24), 4294967296)
self.assertEqual(int('hek2mgl', 25), 4294967296)
self.assertEqual(int('dnchbnm', 26), 4294967296)
self.assertEqual(int('b28jpdm', 27), 4294967296)
self.assertEqual(int('8pfgih4', 28), 4294967296)
self.assertEqual(int('76beigg', 29), 4294967296)
self.assertEqual(int('5qmcpqg', 30), 4294967296)
self.assertEqual(int('4q0jto4', 31), 4294967296)
self.assertEqual(int('4000000', 32), 4294967296)
self.assertEqual(int('3aokq94', 33), 4294967296)
self.assertEqual(int('2qhxjli', 34), 4294967296)
self.assertEqual(int('2br45qb', 35), 4294967296)
self.assertEqual(int('1z141z4', 36), 4294967296)
# tests with base 0
# this fails on 3.0, but in 2.x the old octal syntax is allowed
self.assertEqual(int(' 0o123 ', 0), 83)
self.assertEqual(int(' 0o123 ', 0), 83)
self.assertEqual(int('000', 0), 0)
self.assertEqual(int('0o123', 0), 83)
self.assertEqual(int('0x123', 0), 291)
self.assertEqual(int('0b100', 0), 4)
self.assertEqual(int(' 0O123 ', 0), 83)
self.assertEqual(int(' 0X123 ', 0), 291)
self.assertEqual(int(' 0B100 ', 0), 4)
# without base still base 10
self.assertEqual(int('0123'), 123)
self.assertEqual(int('0123', 10), 123)
# tests with prefix and base != 0
self.assertEqual(int('0x123', 16), 291)
self.assertEqual(int('0o123', 8), 83)
self.assertEqual(int('0b100', 2), 4)
self.assertEqual(int('0X123', 16), 291)
self.assertEqual(int('0O123', 8), 83)
self.assertEqual(int('0B100', 2), 4)
# the code has special checks for the first character after the
# type prefix
self.assertRaises(ValueError, int, '0b2', 2)
self.assertRaises(ValueError, int, '0b02', 2)
self.assertRaises(ValueError, int, '0B2', 2)
self.assertRaises(ValueError, int, '0B02', 2)
self.assertRaises(ValueError, int, '0o8', 8)
self.assertRaises(ValueError, int, '0o08', 8)
self.assertRaises(ValueError, int, '0O8', 8)
self.assertRaises(ValueError, int, '0O08', 8)
self.assertRaises(ValueError, int, '0xg', 16)
self.assertRaises(ValueError, int, '0x0g', 16)
self.assertRaises(ValueError, int, '0Xg', 16)
self.assertRaises(ValueError, int, '0X0g', 16)
# SF bug 1334662: int(string, base) wrong answers
# Checks for proper evaluation of 2**32 + 1
self.assertEqual(int('100000000000000000000000000000001', 2), 4294967297)
self.assertEqual(int('102002022201221111212', 3), 4294967297)
self.assertEqual(int('10000000000000001', 4), 4294967297)
self.assertEqual(int('32244002423142', 5), 4294967297)
self.assertEqual(int('1550104015505', 6), 4294967297)
self.assertEqual(int('211301422355', 7), 4294967297)
self.assertEqual(int('40000000001', 8), 4294967297)
self.assertEqual(int('12068657455', 9), 4294967297)
self.assertEqual(int('4294967297', 10), 4294967297)
self.assertEqual(int('1904440555', 11), 4294967297)
self.assertEqual(int('9ba461595', 12), 4294967297)
self.assertEqual(int('535a7988a', 13), 4294967297)
self.assertEqual(int('2ca5b7465', 14), 4294967297)
self.assertEqual(int('1a20dcd82', 15), 4294967297)
self.assertEqual(int('100000001', 16), 4294967297)
self.assertEqual(int('a7ffda92', 17), 4294967297)
self.assertEqual(int('704he7g5', 18), 4294967297)
self.assertEqual(int('4f5aff67', 19), 4294967297)
self.assertEqual(int('3723ai4h', 20), 4294967297)
self.assertEqual(int('281d55i5', 21), 4294967297)
self.assertEqual(int('1fj8b185', 22), 4294967297)
self.assertEqual(int('1606k7id', 23), 4294967297)
self.assertEqual(int('mb994ah', 24), 4294967297)
self.assertEqual(int('hek2mgm', 25), 4294967297)
self.assertEqual(int('dnchbnn', 26), 4294967297)
self.assertEqual(int('b28jpdn', 27), 4294967297)
self.assertEqual(int('8pfgih5', 28), 4294967297)
self.assertEqual(int('76beigh', 29), 4294967297)
self.assertEqual(int('5qmcpqh', 30), 4294967297)
self.assertEqual(int('4q0jto5', 31), 4294967297)
self.assertEqual(int('4000001', 32), 4294967297)
self.assertEqual(int('3aokq95', 33), 4294967297)
self.assertEqual(int('2qhxjlj', 34), 4294967297)
self.assertEqual(int('2br45qc', 35), 4294967297)
self.assertEqual(int('1z141z5', 36), 4294967297)
def test_intconversion(self):
# Test __int__()
class ClassicMissingMethods:
pass
self.assertRaises(TypeError, int, ClassicMissingMethods())
class MissingMethods(object):
pass
self.assertRaises(TypeError, int, MissingMethods())
class Foo0:
def __int__(self):
return 42
class Foo1(object):
def __int__(self):
return 42
class Foo2(int):
def __int__(self):
return 42
class Foo3(int):
def __int__(self):
return self
class Foo4(int):
def __int__(self):
return 42
class Foo5(int):
def __int__(self):
return 42.
self.assertEqual(int(Foo0()), 42)
self.assertEqual(int(Foo1()), 42)
self.assertEqual(int(Foo2()), 42)
self.assertEqual(int(Foo3()), 0)
self.assertEqual(int(Foo4()), 42)
self.assertRaises(TypeError, int, Foo5())
class Classic:
pass
for base in (object, Classic):
class IntOverridesTrunc(base):
def __int__(self):
return 42
def __trunc__(self):
return -12
self.assertEqual(int(IntOverridesTrunc()), 42)
class JustTrunc(base):
def __trunc__(self):
return 42
self.assertEqual(int(JustTrunc()), 42)
for trunc_result_base in (object, Classic):
class Integral(trunc_result_base):
def __int__(self):
return 42
class TruncReturnsNonInt(base):
def __trunc__(self):
return Integral()
self.assertEqual(int(TruncReturnsNonInt()), 42)
class NonIntegral(trunc_result_base):
def __trunc__(self):
# Check that we avoid infinite recursion.
return NonIntegral()
class TruncReturnsNonIntegral(base):
def __trunc__(self):
return NonIntegral()
try:
int(TruncReturnsNonIntegral())
except TypeError as e:
self.assertEqual(str(e),
"__trunc__ returned non-Integral"
" (type NonIntegral)")
else:
self.fail("Failed to raise TypeError with %s" %
((base, trunc_result_base),))
def test_error_message(self):
testlist = ('\xbd', '123\xbd', ' 123 456 ')
for s in testlist:
try:
int(s)
except ValueError as e:
self.assertIn(s.strip(), e.args[0])
else:
self.fail("Expected int(%r) to raise a ValueError", s)
def test_main():
run_unittest(IntTestCases)
if __name__ == "__main__":
test_main()
| gpl-3.0 | -2,974,264,190,853,167,600 | 38.090062 | 81 | 0.561373 | false | 3.752832 | true | false | false |
kashev/kashev.rocks | manage.py | 1 | 1238 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# kashev.rocks
# Kashev Dalmia - [email protected]
from flask.ext.script import Manager
from flask.ext.assets import ManageAssets
from src.kashevrocks import app
from src.assets import register_assets
manager = Manager(app)
assets_env = register_assets(app)
manager.add_command("assets", ManageAssets(assets_env))
@manager.command
def liveserver(debug=True):
""" Runs a live reloading server which watches non-python code as well. """
import livereload
app.debug = debug
assets_env.debug = debug
server = livereload.Server(app.wsgi_app)
server.watch('src/')
server.serve()
@manager.command
def clean():
""" Cleans up all generated and cache files from the project. """
import shutil
import os
paths_to_clean = ['src/static/.webassets-cache',
'src/static/generated',
'debug.log']
for path in paths_to_clean:
try:
shutil.rmtree(path)
except NotADirectoryError:
os.remove(path) # It's a file, not a directory
except FileNotFoundError:
pass # They're not there, that's fine.
if __name__ == "__main__":
manager.run()
| mit | -2,425,773,170,606,357,000 | 22.807692 | 79 | 0.642973 | false | 3.598837 | false | false | false |
brayden2544/Mystuff-final | account/cached_templates/templates/password_reset.html.py | 1 | 1886 | # -*- coding:ascii -*-
from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 9
_modified_time = 1397087641.387625
_enable_loop = True
_template_filename = 'C:\\app\\account\\templates/password_reset.html'
_template_uri = 'password_reset.html'
_source_encoding = 'ascii'
import os, os.path, re
_exports = ['content']
def _mako_get_namespace(context, name):
try:
return context.namespaces[(__name__, name)]
except KeyError:
_mako_generate_namespaces(context)
return context.namespaces[(__name__, name)]
def _mako_generate_namespaces(context):
pass
def _mako_inherit(template, context):
_mako_generate_namespaces(context)
return runtime._inherit_from(context, 'base_template.htm', _template_uri)
def render_body(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
def content():
return render_content(context._locals(__M_locals))
__M_writer = context.writer()
# SOURCE LINE 2
__M_writer('\n\n\n')
if 'parent' not in context._data or not hasattr(context._data['parent'], 'content'):
context['self'].content(**pageargs)
# SOURCE LINE 12
__M_writer(' \n\n\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_content(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
def content():
return render_content(context)
__M_writer = context.writer()
# SOURCE LINE 5
__M_writer('\n \n<h3>Password Reset</h3>\n\n<p>Your password has been reset. Please log in again with your new password.</p>\n\t\n\n')
return ''
finally:
context.caller_stack._pop_frame()
| apache-2.0 | 1,488,263,980,460,247,600 | 30.966102 | 143 | 0.626723 | false | 3.585551 | false | false | false |
nathanial/lettuce | tests/integration/lib/Django-1.3/tests/modeltests/model_inheritance/models.py | 118 | 3888 | """
XX. Model inheritance
Model inheritance exists in two varieties:
- abstract base classes which are a way of specifying common
information inherited by the subclasses. They don't exist as a separate
model.
- non-abstract base classes (the default), which are models in their own
right with their own database tables and everything. Their subclasses
have references back to them, created automatically.
Both styles are demonstrated here.
"""
from django.db import models
#
# Abstract base classes
#
class CommonInfo(models.Model):
name = models.CharField(max_length=50)
age = models.PositiveIntegerField()
class Meta:
abstract = True
ordering = ['name']
def __unicode__(self):
return u'%s %s' % (self.__class__.__name__, self.name)
class Worker(CommonInfo):
job = models.CharField(max_length=50)
class Student(CommonInfo):
school_class = models.CharField(max_length=10)
class Meta:
pass
class StudentWorker(Student, Worker):
pass
#
# Abstract base classes with related models
#
class Post(models.Model):
title = models.CharField(max_length=50)
class Attachment(models.Model):
post = models.ForeignKey(Post, related_name='attached_%(class)s_set')
content = models.TextField()
class Meta:
abstract = True
def __unicode__(self):
return self.content
class Comment(Attachment):
is_spam = models.BooleanField()
class Link(Attachment):
url = models.URLField()
#
# Multi-table inheritance
#
class Chef(models.Model):
name = models.CharField(max_length=50)
def __unicode__(self):
return u"%s the chef" % self.name
class Place(models.Model):
name = models.CharField(max_length=50)
address = models.CharField(max_length=80)
def __unicode__(self):
return u"%s the place" % self.name
class Rating(models.Model):
rating = models.IntegerField(null=True, blank=True)
class Meta:
abstract = True
ordering = ['-rating']
class Restaurant(Place, Rating):
serves_hot_dogs = models.BooleanField()
serves_pizza = models.BooleanField()
chef = models.ForeignKey(Chef, null=True, blank=True)
class Meta(Rating.Meta):
db_table = 'my_restaurant'
def __unicode__(self):
return u"%s the restaurant" % self.name
class ItalianRestaurant(Restaurant):
serves_gnocchi = models.BooleanField()
def __unicode__(self):
return u"%s the italian restaurant" % self.name
class Supplier(Place):
customers = models.ManyToManyField(Restaurant, related_name='provider')
def __unicode__(self):
return u"%s the supplier" % self.name
class ParkingLot(Place):
# An explicit link to the parent (we can control the attribute name).
parent = models.OneToOneField(Place, primary_key=True, parent_link=True)
main_site = models.ForeignKey(Place, related_name='lot')
def __unicode__(self):
return u"%s the parking lot" % self.name
#
# Abstract base classes with related models where the sub-class has the
# same name in a different app and inherits from the same abstract base
# class.
# NOTE: The actual API tests for the following classes are in
# model_inheritance_same_model_name/models.py - They are defined
# here in order to have the name conflict between apps
#
class Title(models.Model):
title = models.CharField(max_length=50)
class NamedURL(models.Model):
title = models.ForeignKey(Title, related_name='attached_%(app_label)s_%(class)s_set')
url = models.URLField()
class Meta:
abstract = True
class Copy(NamedURL):
content = models.TextField()
def __unicode__(self):
return self.content
class Mixin(object):
def __init__(self):
self.other_attr = 1
super(Mixin, self).__init__()
class MixinModel(models.Model, Mixin):
pass
| gpl-3.0 | -5,259,749,329,917,326,000 | 24.411765 | 89 | 0.675154 | false | 3.830542 | false | false | false |
steenzout/python-barcode | docs/conf.py | 1 | 11075 | # -*- coding: utf-8 -*-
#
# Copyright 2010-2013, Thorsten Weimann; 2014, Alexander Shorin; 2016 Pedro Salgado
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Sphinx documentation build configuration file.
All configuration values have a default;
values that are commented out serve to show the default.
If extensions (or modules to document with autodoc) are in another directory,
add these directories to sys.path here.
If the directory is relative to the documentation root,
use os.path.abspath to make it absolute, like shown here.
"""
import sys
import os
sys.path.insert(0, os.path.abspath('..'))
import semantic_version
from recommonmark.parser import CommonMarkParser
_package = 'steenzout.barcode'
_version = semantic_version.Version('1.0.0-beta3')
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = '.rst'
source_parsers = {
'.md': CommonMarkParser
}
source_suffix = [
'.md',
'.rst'
]
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = _package
author = 'Thorsten Weimann, Alexander Shorin, Pedro Salgado'
copyright = '2010-2013, Thorsten Weimann; 2014, Alexander Shorin; 2016 Pedro Salgado'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'%d.%d' % (_version.major, _version.minor)
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', '_templates', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u''
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = u'py_%s' % _package.replace('.', '_')
# -- Options for LaTeX output ---------------------------------------------
# latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
#
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
#
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
#
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
# }
latex_elements = {}
# Grouping the document tree into LaTeX files.
# List of tuples (
# source start file,
# target name,
# title,
# author,
# documentclass [howto, manual, or own class]
# ).
latex_documents = [(
master_doc,
u'py_%s.tex' % _package.replace('.', '_'),
u'%s documentation' % _package,
'Thorsten Weimann, Alexander Shorin, Pedro Salgado',
u'manual'
)]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# sphinxs.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page.
# List of tuples (
# source start file,
# name,
# description,
# authors,
# manual section
# ).
man_pages = [(
master_doc,
'py_%s' % _package.replace('.', '_'),
'%s documentation' % _package,
['Thorsten Weimann, Alexander Shorin, Pedro Salgado'],
1
)]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files.
# List of tuples (
# source start file,
# target name,
# title,
# author,
# dir menu entry,
# description,
# category
# ).
texinfo_documents = [(
master_doc,
u'py_%s' % _package.replace('.', '_'),
u'%s documentation' % _package,
'Thorsten Weimann, Alexander Shorin, Pedro Salgado',
u'',
u'%s documentation.' % _package,
u'Miscellaneous'
)]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
# Generate API doc
from sphinx import apidoc
apidoc.main(['-f', '-T', '--separate', '-o', 'apidoc', '../steenzout'])
| mit | 4,455,014,411,438,905,000 | 26.413366 | 85 | 0.68614 | false | 3.49921 | false | false | false |
SpredfastLegacy/JsBeautify | jsbeautifier/unpackers/packer.py | 76 | 3302 | #
# Unpacker for Dean Edward's p.a.c.k.e.r, a part of javascript beautifier
# by Einar Lielmanis <[email protected]>
#
# written by Stefano Sanfilippo <[email protected]>
#
# usage:
#
# if detect(some_string):
# unpacked = unpack(some_string)
#
"""Unpacker for Dean Edward's p.a.c.k.e.r"""
import re
import string
from jsbeautifier.unpackers import UnpackingError
PRIORITY = 1
def detect(source):
"""Detects whether `source` is P.A.C.K.E.R. coded."""
return source.replace(' ', '').startswith('eval(function(p,a,c,k,e,r')
def unpack(source):
"""Unpacks P.A.C.K.E.R. packed js code."""
payload, symtab, radix, count = _filterargs(source)
if count != len(symtab):
raise UnpackingError('Malformed p.a.c.k.e.r. symtab.')
try:
unbase = Unbaser(radix)
except TypeError:
raise UnpackingError('Unknown p.a.c.k.e.r. encoding.')
def lookup(match):
"""Look up symbols in the synthetic symtab."""
word = match.group(0)
return symtab[unbase(word)] or word
source = re.sub(r'\b\w+\b', lookup, payload)
return _replacestrings(source)
def _filterargs(source):
"""Juice from a source file the four args needed by decoder."""
argsregex = (r"}\('(.*)', *(\d+), *(\d+), *'(.*)'\."
r"split\('\|'\), *(\d+), *(.*)\)\)")
args = re.search(argsregex, source, re.DOTALL).groups()
try:
return args[0], args[3].split('|'), int(args[1]), int(args[2])
except ValueError:
raise UnpackingError('Corrupted p.a.c.k.e.r. data.')
def _replacestrings(source):
"""Strip string lookup table (list) and replace values in source."""
match = re.search(r'var *(_\w+)\=\["(.*?)"\];', source, re.DOTALL)
if match:
varname, strings = match.groups()
startpoint = len(match.group(0))
lookup = strings.split('","')
variable = '%s[%%d]' % varname
for index, value in enumerate(lookup):
source = source.replace(variable % index, '"%s"' % value)
return source[startpoint:]
return source
class Unbaser(object):
"""Functor for a given base. Will efficiently convert
strings to natural numbers."""
ALPHABET = {
62 : '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ',
95 : (' !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'[\]^_`abcdefghijklmnopqrstuvwxyz{|}~')
}
def __init__(self, base):
self.base = base
# If base can be handled by int() builtin, let it do it for us
if 2 <= base <= 36:
self.unbase = lambda string: int(string, base)
else:
# Build conversion dictionary cache
try:
self.dictionary = dict((cipher, index) for
index, cipher in enumerate(self.ALPHABET[base]))
except KeyError:
raise TypeError('Unsupported base encoding.')
self.unbase = self._dictunbaser
def __call__(self, string):
return self.unbase(string)
def _dictunbaser(self, string):
"""Decodes a value to an integer."""
ret = 0
for index, cipher in enumerate(string[::-1]):
ret += (self.base ** index) * self.dictionary[cipher]
return ret
| bsd-3-clause | -3,147,354,450,630,264,000 | 30.75 | 78 | 0.586917 | false | 3.475789 | false | false | false |
midma101/AndIWasJustGoingToBed | .venv/lib/python2.7/site-packages/markupsafe/__init__.py | 371 | 8205 | # -*- coding: utf-8 -*-
"""
markupsafe
~~~~~~~~~~
Implements a Markup string.
:copyright: (c) 2010 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import re
from markupsafe._compat import text_type, string_types, int_types, \
unichr, PY2
__all__ = ['Markup', 'soft_unicode', 'escape', 'escape_silent']
_striptags_re = re.compile(r'(<!--.*?-->|<[^>]*>)')
_entity_re = re.compile(r'&([^;]+);')
class Markup(text_type):
r"""Marks a string as being safe for inclusion in HTML/XML output without
needing to be escaped. This implements the `__html__` interface a couple
of frameworks and web applications use. :class:`Markup` is a direct
subclass of `unicode` and provides all the methods of `unicode` just that
it escapes arguments passed and always returns `Markup`.
The `escape` function returns markup objects so that double escaping can't
happen.
The constructor of the :class:`Markup` class can be used for three
different things: When passed an unicode object it's assumed to be safe,
when passed an object with an HTML representation (has an `__html__`
method) that representation is used, otherwise the object passed is
converted into a unicode string and then assumed to be safe:
>>> Markup("Hello <em>World</em>!")
Markup(u'Hello <em>World</em>!')
>>> class Foo(object):
... def __html__(self):
... return '<a href="#">foo</a>'
...
>>> Markup(Foo())
Markup(u'<a href="#">foo</a>')
If you want object passed being always treated as unsafe you can use the
:meth:`escape` classmethod to create a :class:`Markup` object:
>>> Markup.escape("Hello <em>World</em>!")
Markup(u'Hello <em>World</em>!')
Operations on a markup string are markup aware which means that all
arguments are passed through the :func:`escape` function:
>>> em = Markup("<em>%s</em>")
>>> em % "foo & bar"
Markup(u'<em>foo & bar</em>')
>>> strong = Markup("<strong>%(text)s</strong>")
>>> strong % {'text': '<blink>hacker here</blink>'}
Markup(u'<strong><blink>hacker here</blink></strong>')
>>> Markup("<em>Hello</em> ") + "<foo>"
Markup(u'<em>Hello</em> <foo>')
"""
__slots__ = ()
def __new__(cls, base=u'', encoding=None, errors='strict'):
if hasattr(base, '__html__'):
base = base.__html__()
if encoding is None:
return text_type.__new__(cls, base)
return text_type.__new__(cls, base, encoding, errors)
def __html__(self):
return self
def __add__(self, other):
if isinstance(other, string_types) or hasattr(other, '__html__'):
return self.__class__(super(Markup, self).__add__(self.escape(other)))
return NotImplemented
def __radd__(self, other):
if hasattr(other, '__html__') or isinstance(other, string_types):
return self.escape(other).__add__(self)
return NotImplemented
def __mul__(self, num):
if isinstance(num, int_types):
return self.__class__(text_type.__mul__(self, num))
return NotImplemented
__rmul__ = __mul__
def __mod__(self, arg):
if isinstance(arg, tuple):
arg = tuple(_MarkupEscapeHelper(x, self.escape) for x in arg)
else:
arg = _MarkupEscapeHelper(arg, self.escape)
return self.__class__(text_type.__mod__(self, arg))
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
text_type.__repr__(self)
)
def join(self, seq):
return self.__class__(text_type.join(self, map(self.escape, seq)))
join.__doc__ = text_type.join.__doc__
def split(self, *args, **kwargs):
return list(map(self.__class__, text_type.split(self, *args, **kwargs)))
split.__doc__ = text_type.split.__doc__
def rsplit(self, *args, **kwargs):
return list(map(self.__class__, text_type.rsplit(self, *args, **kwargs)))
rsplit.__doc__ = text_type.rsplit.__doc__
def splitlines(self, *args, **kwargs):
return list(map(self.__class__, text_type.splitlines(self, *args, **kwargs)))
splitlines.__doc__ = text_type.splitlines.__doc__
def unescape(self):
r"""Unescape markup again into an text_type string. This also resolves
known HTML4 and XHTML entities:
>>> Markup("Main » <em>About</em>").unescape()
u'Main \xbb <em>About</em>'
"""
from markupsafe._constants import HTML_ENTITIES
def handle_match(m):
name = m.group(1)
if name in HTML_ENTITIES:
return unichr(HTML_ENTITIES[name])
try:
if name[:2] in ('#x', '#X'):
return unichr(int(name[2:], 16))
elif name.startswith('#'):
return unichr(int(name[1:]))
except ValueError:
pass
return u''
return _entity_re.sub(handle_match, text_type(self))
def striptags(self):
r"""Unescape markup into an text_type string and strip all tags. This
also resolves known HTML4 and XHTML entities. Whitespace is
normalized to one:
>>> Markup("Main » <em>About</em>").striptags()
u'Main \xbb About'
"""
stripped = u' '.join(_striptags_re.sub('', self).split())
return Markup(stripped).unescape()
@classmethod
def escape(cls, s):
"""Escape the string. Works like :func:`escape` with the difference
that for subclasses of :class:`Markup` this function would return the
correct subclass.
"""
rv = escape(s)
if rv.__class__ is not cls:
return cls(rv)
return rv
def make_wrapper(name):
orig = getattr(text_type, name)
def func(self, *args, **kwargs):
args = _escape_argspec(list(args), enumerate(args), self.escape)
#_escape_argspec(kwargs, kwargs.iteritems(), None)
return self.__class__(orig(self, *args, **kwargs))
func.__name__ = orig.__name__
func.__doc__ = orig.__doc__
return func
for method in '__getitem__', 'capitalize', \
'title', 'lower', 'upper', 'replace', 'ljust', \
'rjust', 'lstrip', 'rstrip', 'center', 'strip', \
'translate', 'expandtabs', 'swapcase', 'zfill':
locals()[method] = make_wrapper(method)
# new in python 2.5
if hasattr(text_type, 'partition'):
def partition(self, sep):
return tuple(map(self.__class__,
text_type.partition(self, self.escape(sep))))
def rpartition(self, sep):
return tuple(map(self.__class__,
text_type.rpartition(self, self.escape(sep))))
# new in python 2.6
if hasattr(text_type, 'format'):
format = make_wrapper('format')
# not in python 3
if hasattr(text_type, '__getslice__'):
__getslice__ = make_wrapper('__getslice__')
del method, make_wrapper
def _escape_argspec(obj, iterable, escape):
"""Helper for various string-wrapped functions."""
for key, value in iterable:
if hasattr(value, '__html__') or isinstance(value, string_types):
obj[key] = escape(value)
return obj
class _MarkupEscapeHelper(object):
"""Helper for Markup.__mod__"""
def __init__(self, obj, escape):
self.obj = obj
self.escape = escape
__getitem__ = lambda s, x: _MarkupEscapeHelper(s.obj[x], s.escape)
__unicode__ = __str__ = lambda s: text_type(s.escape(s.obj))
__repr__ = lambda s: str(s.escape(repr(s.obj)))
__int__ = lambda s: int(s.obj)
__float__ = lambda s: float(s.obj)
# we have to import it down here as the speedups and native
# modules imports the markup type which is define above.
try:
from markupsafe._speedups import escape, escape_silent, soft_unicode
except ImportError:
from markupsafe._native import escape, escape_silent, soft_unicode
if not PY2:
soft_str = soft_unicode
__all__.append('soft_str')
| mit | 1,277,062,579,156,340,500 | 34.064103 | 85 | 0.577331 | false | 3.848499 | false | false | false |
denfromufa/pythonnet | demo/helloform.py | 8 | 1769 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import clr
SWF = clr.AddReference("System.Windows.Forms")
print (SWF.Location)
import System.Windows.Forms as WinForms
from System.Drawing import Size, Point
class HelloApp(WinForms.Form):
"""A simple hello world app that demonstrates the essentials of
winforms programming and event-based programming in Python."""
def __init__(self):
self.Text = "Hello World From Python"
self.AutoScaleBaseSize = Size(5, 13)
self.ClientSize = Size(392, 117)
h = WinForms.SystemInformation.CaptionHeight
self.MinimumSize = Size(392, (117 + h))
# Create the button
self.button = WinForms.Button()
self.button.Location = Point(160, 64)
self.button.Size = Size(820, 20)
self.button.TabIndex = 2
self.button.Text = "Click Me!"
# Register the event handler
self.button.Click += self.button_Click
# Create the text box
self.textbox = WinForms.TextBox()
self.textbox.Text = "Hello World"
self.textbox.TabIndex = 1
self.textbox.Size = Size(1260, 40)
self.textbox.Location = Point(160, 24)
# Add the controls to the form
self.AcceptButton = self.button
self.Controls.Add(self.button)
self.Controls.Add(self.textbox)
def button_Click(self, sender, args):
"""Button click event handler"""
print ("Click")
WinForms.MessageBox.Show("Please do not press this button again.")
def run(self):
WinForms.Application.Run(self)
def main():
form = HelloApp()
print ("form created")
app = WinForms.Application
print ("app referenced")
app.Run(form)
if __name__ == '__main__':
main()
| mit | -985,810,494,902,420,700 | 27.079365 | 74 | 0.626343 | false | 3.76383 | false | false | false |
SpOOnman/claws | tools/vcard2xml.py | 11 | 8810 | #!/usr/bin/env python
# -*- coding: latin-1 -*-
"""
Copyright © 2003 Bogdan Sumanariu <[email protected]>
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
script name : evolutionvcard2claws.py
script purpose : convert an evolution addressbook VCARD file
into a Claws Mail addressbook
tested with evolution 1.2.x, and 1.4.x
"""
import string
import sys
import time
import os
import StringIO
keywds = ('x-evolution-file-as','fn', 'n','email;internet','nickname', 'url', 'org')
def normalizeLongLines(file):
"""
Skip line breaks after 72 chars
"""
buf = ''
line = file.readline()
while line:
if line[0] == ' ':
buf = buf.rstrip('\n')
line = line.lstrip();
buf += line
else:
buf += line
line = file.readline()
return buf
def getEmailAddress(vcard):
"""
Get email address.
Supported formats:
- email;something
- email;type=something
something := (internet,work,home, other)
"""
for key in vcard:
items = key.split(';')
if len(items) == 2:
if items[0].lower() == 'email':
list = vcard[key]
return list[0]
else:
if key.lower() == 'email':
list = vcard[key]
return list[0]
return ""
def findName(vcard):
"""
Find a version 3.0 name
"""
for key in vcard:
items = key.split(';')
if len(items) == 2:
if items[0].lower() == 'n':
return vcard[key]
else:
if key.lower() == 'n':
return vcard[key]
return None
################################################################################
## reads a vcard and stores as hash pairs key/value where value is a list ##
################################################################################
def readVCARD (buffer) :
"""
skips fom <file> until a 'begin' tag from VCARD is encountered.
from this point starts constructing a map (key, [values] )
VCARD entry format -> tag:value
key <- tag
[values] <- list with the values of <tag> if there are more tags with the same name
"""
r=' '
bgn,end = -1, -1;
d = dict()
while r and bgn < 0 :
r = buffer.readline()
if len (r) == 0 : return dict()
if string.find('begin',string.lower(string.strip(r))) :
bgn = 1
while r and end < 0 :
r = buffer.readline()
s = string.split(string.lower(string.strip(r)),':')
if s[0] <> '' :
if d.has_key(s[0]) :
d[s[0]].append(s[1])
elif len(s) > 1:
d[s[0]] = [s[1]]
else :
d[s[0]] = ['']
if s[0] == 'end' : end = 1
return d
##################################################################################
###############################################################################################
## writes on a given file an xml representation for claws-mail addressbook received as a hash ##
###############################################################################################
def writeXMLREPR (vcard,file,uid) :
"""
based on <vcard> and <uid> writes only recognized tags (the ones defined in <keywds> list)
NOTE: <url> and <org> tag will be written as attributes (there are such tags in claws-mail's
XML schema)
"""
if len (vcard.keys()) == 0 : return
item = vcard.get(keywds[2]);
if item:
name = string.split(item[0],';')
else:
""" version 3.0 n ?"""
name = findName(vcard)
if not name:
return
fn, ln, nick, cn, a = '', '', '', '', ''
if len(name) >= 2 :
fn = name[0]
ln = name[1]
elif len(name) ==1 :
fn = name[0]
if vcard.has_key(keywds[4]) :
nick = vcard.get(keywds[4])[0]
if len(vcard.get(keywds[1])[0]) :
cn = vcard.get(keywds[1])[0]
else :
cn = vcard.get(keywds[0])[0];
a += str('\n<person uid=\"' + str(uid[0]) + '\" first-name=\"' + fn + '\" last-name=\"' + ln
+ '\" nick-name=\"' + nick + '\" cn=\"' + cn + '\" >\n')
a += '\t<address-list>\n'
if vcard.get(keywds[3]) :
for c in vcard.get(keywds[3]) :
uid[0] = uid[0] + 1
a += '\t\t<address uid=\"' + str(uid[0]) + '\" alias=\"' + nick + '\" email=\"' + c + '\" remarks=\"\" />\n'
else :
email = getEmailAddress(vcard)
uid[0] = uid[0]+1
a += '\t\t<address uid=\"' + str(uid[0]) + '\" alias=\"' + nick + '\" email=\"' + email + '\" remarks=\"\" />\n'
a += '\t</address-list>\n'
a += '\t<attribute-list>\n'
for key in keywds[5:] :
if vcard.get(key) :
for c in vcard.get(key) :
uid[0] = uid[0] + 1
a += '\t\t<attribute uid=\"' + str(uid[0]) + '\" name=\"' + key +'\">'+c+'</attribute>\n'
a += '\t</attribute-list>\n'
a += '</person>\n'
file.write(a)
file.flush()
###################################################################################################
def convert (in_f, o_f, name='INBOX') :
d = {'d':1}
uid = [int(time.time())]
try :
print 'proccessing...\n'
o_f.write('<?xml version="1.0" encoding="ISO-8859-1" ?>\n<address-book name="'+name+'" >\n');
buf = normalizeLongLines(in_f)
buffer = StringIO.StringIO(buf)
while len(d.keys()) > 0 :
d = readVCARD(buffer)
writeXMLREPR (d, o_f, uid)
uid[0] = uid [0]+1
o_f.write('\n</address-book>')
print 'finished processing...\n'
except IOError, err :
print 'Caught an IOError : ',err,'\t ABORTING!!!'
raise err
#################################################################################################
def execute () :
if len(sys.argv) <> 3 and len(sys.argv) <> 2 :
print str("\nUsage: vcard2xml.py source_file [destination_file]\n\n" +
'\tWhen only <source_file> is specified will overwrite the existing addressbook.\n'+
'\tWhen both arguments are suplied will create a new additional addressbook named \n\tas the destination file.'+'\n\tNOTE: in both cases the Claws Mail must be closed and ran at least once.\n\n')
sys.exit(1)
in_file = None
out_file = None
path_to_out = os.environ['HOME']+'/.claws-mail/'
adr_idx = 'addrbook--index.xml'
adr_idx_file = None
tmp_adr_idx_file= None
got_ex = 0
try :
in_file = open(sys.argv[1])
except IOError, e:
print 'Could not open input file <',sys.argv[1],'> ABORTING'
sys.exit(1)
if len(sys.argv) == 2 :
try :
dlist = os.listdir(path_to_out);
flist=[]
for l in dlist :
if l.find('addrbook') == 0 and l.find("addrbook--index.xml") < 0 and l.find('bak') < 0 :
flist.append(l)
flist.sort()
out_file = flist.pop()
os.rename(path_to_out+out_file, path_to_out+out_file+'.tmp')
out_file = open(path_to_out+out_file,'w')
convert(in_file, out_file)
except Exception, e:
got_ex = 1
print 'got exception: ', e
else :
try :
os.rename(path_to_out+adr_idx, path_to_out+adr_idx+'.tmp')
tmp_adr_idx_file = open(path_to_out+adr_idx+'.tmp')
adr_idx_file = open(path_to_out+adr_idx,'w')
except Exception, e :
print 'Could not open <', path_to_out+adr_idx,'> file. Make sure you started Claws Mail at least once.'
sys.exit(1)
try :
out_file = open(path_to_out+sys.argv[2],'w')
convert(in_file, out_file, sys.argv[2].split('.xml')[0])
l = tmp_adr_idx_file.readline()
while l :
if l.strip() == '</book_list>' :
adr_idx_file.write('\t<book name="'+sys.argv[2].split('.xml')[0] +'" file="'+sys.argv[2]+'" />\n')
adr_idx_file.write(l)
else :
adr_idx_file.write(l)
l = tmp_adr_idx_file.readline()
except Exception, e:
got_ex = 1
print 'got exception: ', e
if got_ex :
#clean up the mess
print 'got exception, cleaning up the mess... changed files will be restored...\n'
if adr_idx_file :
adr_idx_file.close()
if out_file :
out_file.close()
if len(sys.argv) == 2 :
os.rename(out_file.name+'.tmp', out_file.name)
else :
os.remove(out_file.name)
os.rename(path_to_out+adr_idx+'.tmp', path_to_out+adr_idx)
if tmp_adr_idx_file :
tmp_adr_idx_file.close()
else :
#closing all and moving temporary data into place
print 'closing open files...\n'
in_file.close()
out_file.close()
if len(sys.argv) == 3 :
os.rename(path_to_out+adr_idx+'.tmp',path_to_out+adr_idx+'.bak' )
if len(sys.argv) == 2 :
os.rename(out_file.name+'.tmp', out_file.name+'.bak')
if adr_idx_file :
adr_idx_file.close()
if tmp_adr_idx_file :
tmp_adr_idx_file.close()
print 'done!'
if __name__ == '__main__':
execute ()
| gpl-3.0 | 1,002,031,994,638,320,500 | 27.419355 | 197 | 0.56084 | false | 2.838273 | false | false | false |
overtherain/scriptfile | software/googleAppEngine/lib/django_1_4/django/contrib/localflavor/za/forms.py | 89 | 1940 | """
South Africa-specific Form helpers
"""
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import CharField, RegexField
from django.utils.checksums import luhn
from django.utils.translation import gettext as _
import re
from datetime import date
id_re = re.compile(r'^(?P<yy>\d\d)(?P<mm>\d\d)(?P<dd>\d\d)(?P<mid>\d{4})(?P<end>\d{3})')
class ZAIDField(CharField):
"""A form field for South African ID numbers -- the checksum is validated
using the Luhn checksum, and uses a simlistic (read: not entirely accurate)
check for the birthdate
"""
default_error_messages = {
'invalid': _(u'Enter a valid South African ID number'),
}
def clean(self, value):
super(ZAIDField, self).clean(value)
if value in EMPTY_VALUES:
return u''
# strip spaces and dashes
value = value.strip().replace(' ', '').replace('-', '')
match = re.match(id_re, value)
if not match:
raise ValidationError(self.error_messages['invalid'])
g = match.groupdict()
try:
# The year 2000 is conveniently a leapyear.
# This algorithm will break in xx00 years which aren't leap years
# There is no way to guess the century of a ZA ID number
d = date(int(g['yy']) + 2000, int(g['mm']), int(g['dd']))
except ValueError:
raise ValidationError(self.error_messages['invalid'])
if not luhn(value):
raise ValidationError(self.error_messages['invalid'])
return value
class ZAPostCodeField(RegexField):
default_error_messages = {
'invalid': _(u'Enter a valid South African postal code'),
}
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
super(ZAPostCodeField, self).__init__(r'^\d{4}$',
max_length, min_length, *args, **kwargs)
| mit | 3,290,177,538,023,332,400 | 31.333333 | 88 | 0.624742 | false | 3.796477 | false | false | false |
gomiero/PTVS | Python/Product/Pyvot/Pyvot/xl/__init__.py | 18 | 4494 | # Copyright (c) Microsoft Corporation.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the LICENSE.txt file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# [email protected]. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
"""Pyvot - Pythonic interface for data exploration in Excel
The user-level API for the `xl` package follows. For interactive use, consider running the :ref:`interactive shell <interactive>`::
python -m xl.shell
**Managing Excel workbooks**:
- :class:`xl.Workbook() <xl.sheet.Workbook>` opens a new workbook
- xl.Workbook("filename") attaches to an existing workbook, or opens it
- :func:`xl.workbooks() <xl.tools.workbooks>` returns a Workbook for each that is currently open
**Excel Ranges**:
- :class:`xl.Range <xl.range.Range>` is the base type for a contiguous range of Excel cells.
- :func:`xl.get() <xl.tools.get>` / :meth:`Workbook.get <xl.sheet.Workbook.get>` / etc. return Ranges; namely, subclasses such as
:class:`xl.RowVector <xl.range.RowVector>`, :class:`xl.ColumnVector <xl.range.ColumnVector>`,
:class:`xl.Matrix <xl.range.Matrix>`, or :class:`xl.Scalar <xl.range.Scalar>`
- :meth:`xl.Range.get` / :meth:`xl.Range.set` allow reading from / writing to Excel
**Tools**:
- :func:`xl.map <xl.tools.map>` / :func:`xl.apply <xl.tools.apply>` / :func:`xl.filter <xl.tools.filter>` operate
like their Python counterparts, but read and write from an Excel workbook
``from xl import *`` imports :func:`xlmap`, etc. instead, to avoid overriding builtins.
- :func:`xl.join() <xl.tools.join>` allows joining two Excel tables by a pair of key columns
- :func:`xl.get() <xl.tools.get>` fetches a Range for a table column (by column name), named Excel range, or for an
Excel address (ex. A1:B1). It attempts to guess the active Workbook, and begins looking in the active sheet.
See also :meth:`Workbook.get <xl.sheet.Workbook.get>`
- :func:`xl.view() <xl.tools.view>` splats a list of Python values to an empty column in Excel
- :func:`xl.selected_range() <xl.tools.selected_range>` / :func:`xl.selected_value() <xl.tools.selected_value>`
provide the active sheet's selection"""
try:
__import__('win32com')
except ImportError as e:
import ctypes
import sys
is_64bit = ctypes.sizeof(ctypes.c_voidp) > 4
arch_str = "64-bit" if is_64bit else "32-bit"
ver = "%d.%d" % (sys.version_info.major, sys.version_info.minor)
raise Exception("pywin32 does not appear to be installed. Visit http://sourceforge.net/projects/pywin32/ and download "
"build 216 or above for Python %s (%s)" % (ver, arch_str), e)
from .version import __version__
# Conventions:
# - prefix excel COM objectss with "xl". Apply to field and method names.
# Design conventions:
# - Very low activation energy for users.
# Layer between "precise (dumb)" operations (which are often not useful) and "guess user intent (smart)" operations
# (which can be much more useful).
# Users start with "smart" general operations and work towards the precise ones.
# - Global functions user "current" workbook, which iterates all sheets.
from .range import Range, Vector, Scalar, RowVector, ColumnVector, Matrix, ExcelRangeError
from .cache import CacheManager, enable_caching, cache_result
from .tools import get, view, join, map, apply, filter, selected_range, selected_value, workbooks
from .sheet import Workbook
# We want to allow 'from xl import *' without clobbering builtin map / apply / filter.
# We define these aliases, and exclude map / apply / filter from __all__.
# This way xl.map works, but 'from xl import *' imports xlmap instead
xlmap, xlapply, xlfilter = map, apply, filter
__all__ = ['Range', 'Vector', 'Scalar', 'RowVector', 'ColumnVector', 'Matrix', 'ExcelRangeError',
'CacheManager', 'enable_caching', 'cache_result',
'get', 'view', 'join', 'selected_range', 'selected_value', 'workbooks',
'xlmap', 'xlapply', 'xlfilter', # We omit map / apply / filter from __all__ but include these. See above
'Workbook'] | apache-2.0 | -5,316,928,215,186,286,000 | 56.38961 | 133 | 0.683133 | false | 3.533019 | false | false | false |
odootr/odoo | addons/account/wizard/account_tax_chart.py | 385 | 3247 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_tax_chart(osv.osv_memory):
"""
For Chart of taxes
"""
_name = "account.tax.chart"
_description = "Account tax chart"
_columns = {
'period_id': fields.many2one('account.period', \
'Period', \
),
'target_move': fields.selection([('posted', 'All Posted Entries'),
('all', 'All Entries'),
], 'Target Moves', required=True),
}
def _get_period(self, cr, uid, context=None):
"""Return default period value"""
period_ids = self.pool.get('account.period').find(cr, uid, context=context)
return period_ids and period_ids[0] or False
def account_tax_chart_open_window(self, cr, uid, ids, context=None):
"""
Opens chart of Accounts
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of account chart’s IDs
@return: dictionary of Open account chart window on given fiscalyear and all Entries or posted entries
"""
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
if context is None:
context = {}
data = self.browse(cr, uid, ids, context=context)[0]
result = mod_obj.get_object_reference(cr, uid, 'account', 'action_tax_code_tree')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id], context=context)[0]
if data.period_id:
result['context'] = str({'period_id': data.period_id.id, \
'fiscalyear_id': data.period_id.fiscalyear_id.id, \
'state': data.target_move})
period_code = data.period_id.code
result['name'] += period_code and (':' + period_code) or ''
else:
result['context'] = str({'state': data.target_move})
return result
_defaults = {
'period_id': _get_period,
'target_move': 'posted'
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 1,979,491,283,107,187,200 | 41.116883 | 110 | 0.55905 | false | 4.157692 | false | false | false |
blitzmann/Pyfa | eos/saveddata/fighterAbility.py | 1 | 5887 | # ===============================================================================
# Copyright (C) 2010 Diego Duclos
#
# This file is part of eos.
#
# eos is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# eos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with eos. If not, see <http://www.gnu.org/licenses/>.
# ===============================================================================
from logbook import Logger
from sqlalchemy.orm import reconstructor
from eos.utils.stats import DmgTypes
pyfalog = Logger(__name__)
class FighterAbility(object):
# We aren't able to get data on the charges that can be stored with fighters. So we hardcode that data here, keyed
# with the fighter squadron role
NUM_SHOTS_MAPPING = {
1: 0, # Superiority fighter / Attack
2: 12, # Light fighter / Attack
4: 6, # Heavy fighter / Heavy attack
5: 3, # Heavy fighter / Long range attack
}
# Same as above
REARM_TIME_MAPPING = {
1: 0, # Superiority fighter / Attack
2: 4000, # Light fighter / Attack
4: 6000, # Heavy fighter / Heavy attack
5: 20000, # Heavy fighter / Long range attack
}
def __init__(self, effect):
"""Initialize from the program"""
self.__effect = effect
self.effectID = effect.ID if effect is not None else None
self.active = False
self.build()
@reconstructor
def init(self):
"""Initialize from the database"""
self.__effect = None
if self.effectID:
self.__effect = next((x for x in self.fighter.item.effects.values() if x.ID == self.effectID), None)
if self.__effect is None:
pyfalog.error("Effect (id: {0}) does not exist", self.effectID)
return
self.build()
def build(self):
pass
@property
def effect(self):
return self.__effect
@property
def name(self):
return self.__effect.getattr('displayName') or self.__effect.name
@property
def attrPrefix(self):
return self.__effect.getattr('prefix')
@property
def dealsDamage(self):
attr = "{}DamageMultiplier".format(self.attrPrefix)
return attr in self.fighter.itemModifiedAttributes or self.fighter.charge is not None
@property
def grouped(self):
# is the ability applied per fighter (webs, returns False), or as a group (MWD, returned True)
return self.__effect.getattr('grouped')
@property
def hasCharges(self):
return self.__effect.getattr('hasCharges')
@property
def reloadTime(self):
rearm_time = (self.REARM_TIME_MAPPING[self.fighter.getModifiedItemAttr("fighterSquadronRole")] or 0 if self.hasCharges else 0)
return self.fighter.getModifiedItemAttr("fighterRefuelingTime") + rearm_time * self.numShots
@property
def numShots(self):
return self.NUM_SHOTS_MAPPING[self.fighter.getModifiedItemAttr("fighterSquadronRole")] or 0 if self.hasCharges else 0
@property
def cycleTime(self):
speed = self.fighter.getModifiedItemAttr("{}Duration".format(self.attrPrefix))
# Factor in reload
'''
reload = self.reloadTime
if self.fighter.owner.factorReload:
numShots = self.numShots
# Speed here already takes into consideration reactivation time
speed = (speed * numShots + reload) / numShots if numShots > 0 else speed
'''
return speed
def getVolley(self, targetResists=None):
if not self.dealsDamage or not self.active:
return DmgTypes(0, 0, 0, 0)
if self.attrPrefix == "fighterAbilityLaunchBomb":
em = self.fighter.getModifiedChargeAttr("emDamage", 0)
therm = self.fighter.getModifiedChargeAttr("thermalDamage", 0)
kin = self.fighter.getModifiedChargeAttr("kineticDamage", 0)
exp = self.fighter.getModifiedChargeAttr("explosiveDamage", 0)
else:
em = self.fighter.getModifiedItemAttr("{}DamageEM".format(self.attrPrefix), 0)
therm = self.fighter.getModifiedItemAttr("{}DamageTherm".format(self.attrPrefix), 0)
kin = self.fighter.getModifiedItemAttr("{}DamageKin".format(self.attrPrefix), 0)
exp = self.fighter.getModifiedItemAttr("{}DamageExp".format(self.attrPrefix), 0)
dmgMult = self.fighter.amountActive * self.fighter.getModifiedItemAttr("{}DamageMultiplier".format(self.attrPrefix), 1)
volley = DmgTypes(
em=em * dmgMult * (1 - getattr(targetResists, "emAmount", 0)),
thermal=therm * dmgMult * (1 - getattr(targetResists, "thermalAmount", 0)),
kinetic=kin * dmgMult * (1 - getattr(targetResists, "kineticAmount", 0)),
explosive=exp * dmgMult * (1 - getattr(targetResists, "explosiveAmount", 0)))
return volley
def getDps(self, targetResists=None):
volley = self.getVolley(targetResists=targetResists)
if not volley:
return DmgTypes(0, 0, 0, 0)
dpsFactor = 1 / (self.cycleTime / 1000)
dps = DmgTypes(
em=volley.em * dpsFactor,
thermal=volley.thermal * dpsFactor,
kinetic=volley.kinetic * dpsFactor,
explosive=volley.explosive * dpsFactor)
return dps
def clear(self):
self.__dps = None
self.__volley = None
| gpl-3.0 | -2,150,383,331,423,504,100 | 36.737179 | 134 | 0.625786 | false | 3.6702 | false | false | false |
davidwaroquiers/pymatgen | pymatgen/io/abinit/tests/test_inputs.py | 5 | 10923 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import os
import tempfile
import unittest
import numpy as np
from pymatgen.core.structure import Structure
from pymatgen.io.abinit.inputs import (
BasicAbinitInput,
BasicMultiDataset,
ShiftMode,
calc_shiftk,
ebands_input,
gs_input,
ion_ioncell_relax_input,
num_valence_electrons,
)
from pymatgen.util.testing import PymatgenTest
_test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..", "test_files", "abinit")
def abiref_file(filename):
"""Return absolute path to filename in ~pymatgen/test_files/abinit"""
return os.path.join(_test_dir, filename)
def abiref_files(*filenames):
"""Return list of absolute paths to filenames in ~pymatgen/test_files/abinit"""
return [os.path.join(_test_dir, f) for f in filenames]
class AbinitInputTestCase(PymatgenTest):
"""Unit tests for BasicAbinitInput."""
def test_api(self):
"""Testing BasicAbinitInput API."""
# Build simple input with structure and pseudos
unit_cell = {
"acell": 3 * [10.217],
"rprim": [[0.0, 0.5, 0.5], [0.5, 0.0, 0.5], [0.5, 0.5, 0.0]],
"ntypat": 1,
"znucl": [14],
"natom": 2,
"typat": [1, 1],
"xred": [[0.0, 0.0, 0.0], [0.25, 0.25, 0.25]],
}
inp = BasicAbinitInput(structure=unit_cell, pseudos=abiref_file("14si.pspnc"))
shiftk = [[0.5, 0.5, 0.5], [0.5, 0.0, 0.0], [0.0, 0.5, 0.0], [0.0, 0.0, 0.5]]
self.assertArrayEqual(calc_shiftk(inp.structure), shiftk)
assert num_valence_electrons(inp.structure, inp.pseudos) == 8
repr(inp), str(inp)
assert len(inp) == 0 and not inp
assert inp.get("foo", "bar") == "bar" and inp.pop("foo", "bar") == "bar"
assert inp.comment is None
inp.set_comment("This is a comment")
assert inp.comment == "This is a comment"
assert inp.isnc and not inp.ispaw
inp["ecut"] = 1
assert inp.get("ecut") == 1 and len(inp) == 1 and "ecut" in inp.keys() and "foo" not in inp
# Test to_string
assert inp.to_string(with_structure=True, with_pseudos=True)
assert inp.to_string(with_structure=False, with_pseudos=False)
inp.set_vars(ecut=5, toldfe=1e-6)
assert inp["ecut"] == 5
inp.set_vars_ifnotin(ecut=-10)
assert inp["ecut"] == 5
_, tmpname = tempfile.mkstemp(text=True)
inp.write(filepath=tmpname)
# Cannot change structure variables directly.
with self.assertRaises(inp.Error):
inp.set_vars(unit_cell)
with self.assertRaises(TypeError):
inp.add_abiobjects({})
with self.assertRaises(KeyError):
inp.remove_vars("foo", strict=True)
assert not inp.remove_vars("foo", strict=False)
# Test deepcopy and remove_vars.
inp["bdgw"] = [1, 2]
inp_copy = inp.deepcopy()
inp_copy["bdgw"][1] = 3
assert inp["bdgw"] == [1, 2]
assert inp.remove_vars("bdgw") and "bdgw" not in inp
removed = inp.pop_tolerances()
assert len(removed) == 1 and removed["toldfe"] == 1e-6
# Test set_spin_mode
old_vars = inp.set_spin_mode("polarized")
assert "nsppol" in inp and inp["nspden"] == 2 and inp["nspinor"] == 1
inp.set_vars(old_vars)
# Test set_structure
new_structure = inp.structure.copy()
new_structure.perturb(distance=0.1)
inp.set_structure(new_structure)
assert inp.structure == new_structure
# Compatible with Pickle and MSONable?
self.serialize_with_pickle(inp, test_eq=False)
def test_input_errors(self):
"""Testing typical BasicAbinitInput Error"""
si_structure = Structure.from_file(abiref_file("si.cif"))
# Ambiguous list of pseudos.
with self.assertRaises(BasicAbinitInput.Error):
BasicAbinitInput(si_structure, pseudos=abiref_files("14si.pspnc", "14si.4.hgh"))
# Pseudos do not match structure.
with self.assertRaises(BasicAbinitInput.Error):
BasicAbinitInput(si_structure, pseudos=abiref_file("H-wdr.oncvpsp"))
si1_negative_volume = dict(
ntypat=1,
natom=1,
typat=[1],
znucl=14,
acell=3 * [7.60],
rprim=[[0.0, 0.5, 0.5], [-0.5, -0.0, -0.5], [0.5, 0.5, 0.0]],
xred=[[0.0, 0.0, 0.0]],
)
# Negative triple product.
with self.assertRaises(BasicAbinitInput.Error):
BasicAbinitInput(si1_negative_volume, pseudos=abiref_files("14si.pspnc"))
def test_helper_functions(self):
"""Testing BasicAbinitInput helper functions."""
inp = BasicAbinitInput(structure=abiref_file("si.cif"), pseudos="14si.pspnc", pseudo_dir=_test_dir)
inp.set_kmesh(ngkpt=(1, 2, 3), shiftk=(1, 2, 3, 4, 5, 6))
assert inp["kptopt"] == 1 and inp["nshiftk"] == 2
inp.set_gamma_sampling()
assert inp["kptopt"] == 1 and inp["nshiftk"] == 1
assert np.all(inp["shiftk"] == 0)
inp.set_kpath(ndivsm=3, kptbounds=None)
assert inp["ndivsm"] == 3 and inp["iscf"] == -2 and len(inp["kptbounds"]) == 12
class TestMultiDataset(PymatgenTest):
"""Unit tests for BasicMultiDataset."""
def test_api(self):
"""Testing BasicMultiDataset API."""
structure = Structure.from_file(abiref_file("si.cif"))
pseudo = abiref_file("14si.pspnc")
pseudo_dir = os.path.dirname(pseudo)
multi = BasicMultiDataset(structure=structure, pseudos=pseudo)
with self.assertRaises(ValueError):
BasicMultiDataset(structure=structure, pseudos=pseudo, ndtset=-1)
multi = BasicMultiDataset(structure=structure, pseudos=pseudo, pseudo_dir=pseudo_dir)
assert len(multi) == 1 and multi.ndtset == 1
assert multi.isnc
for i, inp in enumerate(multi):
assert list(inp.keys()) == list(multi[i].keys())
multi.addnew_from(0)
assert multi.ndtset == 2 and multi[0] is not multi[1]
assert multi[0].structure == multi[1].structure
assert multi[0].structure is not multi[1].structure
multi.set_vars(ecut=2)
assert all(inp["ecut"] == 2 for inp in multi)
self.assertEqual(multi.get("ecut"), [2, 2])
multi[1].set_vars(ecut=1)
assert multi[0]["ecut"] == 2 and multi[1]["ecut"] == 1
self.assertEqual(multi.get("ecut"), [2, 1])
self.assertEqual(multi.get("foo", "default"), ["default", "default"])
multi[1].set_vars(paral_kgb=1)
assert "paral_kgb" not in multi[0]
self.assertEqual(multi.get("paral_kgb"), [None, 1])
pert_structure = structure.copy()
pert_structure.perturb(distance=0.1)
assert structure != pert_structure
assert multi.set_structure(structure) == multi.ndtset * [structure]
assert all(s == structure for s in multi.structure)
assert multi.has_same_structures
multi[1].set_structure(pert_structure)
assert multi[0].structure != multi[1].structure and multi[1].structure == pert_structure
assert not multi.has_same_structures
split = multi.split_datasets()
assert len(split) == 2 and all(split[i] == multi[i] for i in range(multi.ndtset))
repr(multi)
str(multi)
assert multi.to_string(with_pseudos=False)
tmpdir = tempfile.mkdtemp()
filepath = os.path.join(tmpdir, "run.abi")
inp.write(filepath=filepath)
multi.write(filepath=filepath)
new_multi = BasicMultiDataset.from_inputs([inp for inp in multi])
assert new_multi.ndtset == multi.ndtset
assert new_multi.structure == multi.structure
for old_inp, new_inp in zip(multi, new_multi):
assert old_inp is not new_inp
self.assertDictEqual(old_inp.as_dict(), new_inp.as_dict())
ref_input = multi[0]
new_multi = BasicMultiDataset.replicate_input(input=ref_input, ndtset=4)
assert new_multi.ndtset == 4
for inp in new_multi:
assert ref_input is not inp
self.assertDictEqual(ref_input.as_dict(), inp.as_dict())
# Compatible with Pickle and MSONable?
self.serialize_with_pickle(multi, test_eq=False)
class ShiftModeTest(PymatgenTest):
def test_shiftmode(self):
"""Testing shiftmode"""
gamma = ShiftMode.GammaCentered
assert ShiftMode.from_object("G") == gamma
assert ShiftMode.from_object(gamma) == gamma
with self.assertRaises(TypeError):
ShiftMode.from_object({})
class FactoryTest(PymatgenTest):
def setUp(self):
# Si ebands
self.si_structure = Structure.from_file(abiref_file("si.cif"))
self.si_pseudo = abiref_file("14si.pspnc")
def test_gs_input(self):
"""Testing gs_input factory."""
inp = gs_input(self.si_structure, self.si_pseudo, kppa=10, ecut=10, spin_mode="polarized")
str(inp)
assert inp["nsppol"] == 2
assert inp["nband"] == 14
self.assertArrayEqual(inp["ngkpt"], [2, 2, 2])
def test_ebands_input(self):
"""Testing ebands_input factory."""
multi = ebands_input(self.si_structure, self.si_pseudo, kppa=10, ecut=2)
str(multi)
scf_inp, nscf_inp = multi.split_datasets()
# Test dos_kppa and other options.
multi_dos = ebands_input(
self.si_structure,
self.si_pseudo,
nscf_nband=10,
kppa=10,
ecut=2,
spin_mode="unpolarized",
smearing=None,
charge=2.0,
dos_kppa=50,
)
assert len(multi_dos) == 3
assert all(i["charge"] == 2 for i in multi_dos)
self.assertEqual(multi_dos.get("nsppol"), [1, 1, 1])
self.assertEqual(multi_dos.get("iscf"), [None, -2, -2])
multi_dos = ebands_input(
self.si_structure,
self.si_pseudo,
nscf_nband=10,
kppa=10,
ecut=2,
spin_mode="unpolarized",
smearing=None,
charge=2.0,
dos_kppa=[50, 100],
)
assert len(multi_dos) == 4
self.assertEqual(multi_dos.get("iscf"), [None, -2, -2, -2])
str(multi_dos)
def test_ion_ioncell_relax_input(self):
"""Testing ion_ioncell_relax_input factory."""
multi = ion_ioncell_relax_input(self.si_structure, self.si_pseudo, kppa=10, ecut=2)
str(multi)
ion_inp, ioncell_inp = multi.split_datasets()
assert ion_inp["chksymbreak"] == 0
assert ion_inp["ionmov"] == 3 and ion_inp["optcell"] == 0
assert ioncell_inp["ionmov"] == 3 and ioncell_inp["optcell"] == 2
| mit | -6,020,666,609,939,037,000 | 34.349515 | 107 | 0.590955 | false | 3.317036 | true | false | false |
yasoob/youtube-dl-GUI | youtube_dl/extractor/glide.py | 64 | 1543 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
class GlideIE(InfoExtractor):
IE_DESC = 'Glide mobile video messages (glide.me)'
_VALID_URL = r'https?://share\.glide\.me/(?P<id>[A-Za-z0-9\-=_+]+)'
_TEST = {
'url': 'http://share.glide.me/UZF8zlmuQbe4mr+7dCiQ0w==',
'md5': '4466372687352851af2d131cfaa8a4c7',
'info_dict': {
'id': 'UZF8zlmuQbe4mr+7dCiQ0w==',
'ext': 'mp4',
'title': "Damon's Glide message",
'thumbnail': r're:^https?://.*?\.cloudfront\.net/.*\.jpg$',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(
r'<title>(.+?)</title>', webpage,
'title', default=None) or self._og_search_title(webpage)
video_url = self._proto_relative_url(self._search_regex(
r'<source[^>]+src=(["\'])(?P<url>.+?)\1',
webpage, 'video URL', default=None,
group='url')) or self._og_search_video_url(webpage)
thumbnail = self._proto_relative_url(self._search_regex(
r'<img[^>]+id=["\']video-thumbnail["\'][^>]+src=(["\'])(?P<url>.+?)\1',
webpage, 'thumbnail url', default=None,
group='url')) or self._og_search_thumbnail(webpage)
return {
'id': video_id,
'title': title,
'url': video_url,
'thumbnail': thumbnail,
}
| mit | 8,006,231,734,576,173,000 | 34.883721 | 83 | 0.52884 | false | 3.262156 | false | false | false |
daafgo/Server_LRS | lrs/util/retrieve_statement.py | 1 | 11615 | import bencode
import hashlib
import json
from datetime import datetime
from itertools import chain
from django.core.cache import cache
from django.conf import settings
from django.core.paginator import Paginator
from django.db.models import Q
from util import convert_to_utc, convert_to_dict
from ..models import Statement, Agent
from ..objects.AgentManager import AgentManager
from ..exceptions import NotFound, IDNotFoundError
MORE_ENDPOINT = '/xapi/statements/more/'
def complex_get(param_dict, limit, language, format, attachments):
# Tests if value is True or "true"
voidQ = Q(voided=False)
# keep track if a filter other than time or sequence is used
reffilter = False
sinceQ = Q()
if 'since' in param_dict:
sinceQ = Q(stored__gt=convert_to_utc(param_dict['since']))
untilQ = Q()
if 'until' in param_dict:
untilQ = Q(stored__lte=convert_to_utc(param_dict['until']))
# For statements/read/mine oauth scope
authQ = Q()
if 'auth' in param_dict and (param_dict['auth'] and 'statements_mine_only' in param_dict['auth']):
q_auth = param_dict['auth']['authority']
# If oauth - set authority to look for as the user
if q_auth.oauth_identifier:
authQ = Q(authority=q_auth) | Q(authority=q_auth.get_user_from_oauth_group())
# Chain all of user's oauth clients as well
else:
oauth_clients = Agent.objects.filter(member__in=[q_auth])
authQ = Q(authority=q_auth)
for client in oauth_clients:
authQ = authQ | Q(authority=client.get_user_from_oauth_group())
agentQ = Q()
if 'agent' in param_dict:
reffilter = True
agent = None
data = param_dict['agent']
related = 'related_agents' in param_dict and param_dict['related_agents']
if not type(data) is dict:
data = convert_to_dict(data)
try:
agent = AgentManager(data).Agent
if agent.objectType == "Group":
groups = []
else:
groups = agent.member.all()
agentQ = Q(actor=agent)
for g in groups:
agentQ = agentQ | Q(actor=g)
if related:
me = chain([agent], groups)
for a in me:
agentQ = agentQ | Q(object_agent=a) | Q(authority=a) \
| Q(context_instructor=a) | Q(context_team=a) \
| Q(object_substatement__actor=a) \
| Q(object_substatement__object_agent=a) \
| Q(object_substatement__context_instructor=a) \
| Q(object_substatement__context_team=a)
except IDNotFoundError:
return[]
verbQ = Q()
if 'verb' in param_dict:
reffilter = True
verbQ = Q(verb__verb_id=param_dict['verb'])
# activity
activityQ = Q()
if 'activity' in param_dict:
reffilter = True
activityQ = Q(object_activity__activity_id=param_dict['activity'])
if 'related_activities' in param_dict and param_dict['related_activities']:
activityQ = activityQ | Q(statementcontextactivity__context_activity__activity_id=param_dict['activity']) \
| Q(object_substatement__object_activity__activity_id=param_dict['activity']) \
| Q(object_substatement__substatementcontextactivity__context_activity__activity_id=param_dict['activity'])
registrationQ = Q()
if 'registration' in param_dict:
reffilter = True
registrationQ = Q(context_registration=param_dict['registration'])
# If want ordered by ascending
stored_param = '-stored'
if 'ascending' in param_dict and param_dict['ascending']:
stored_param = 'stored'
stmtset = Statement.objects.filter(voidQ & untilQ & sinceQ & authQ & agentQ & verbQ & activityQ & registrationQ)
# only find references when a filter other than
# since, until, or limit was used
if reffilter:
stmtset = findstmtrefs(stmtset.distinct(), sinceQ, untilQ)
# Calculate limit of stmts to return
return_limit = set_limit(limit)
# If there are more stmts than the limit, need to break it up and return more id
if stmtset.count() > return_limit:
return initial_cache_return(stmtset, stored_param, return_limit, language, format, attachments)
else:
return create_stmt_result(stmtset, stored_param, language, format)
def create_stmt_result(stmt_set, stored, language, format):
stmt_result = {}
# blows up if the idlist is empty... so i gotta check for that
idlist = stmt_set.values_list('id', flat=True)
if idlist > 0:
if format == 'exact':
stmt_result = '{"statements": [%s], "more": ""}' % ",".join([json.dumps(stmt.full_statement) for stmt in \
Statement.objects.filter(id__in=idlist).order_by(stored)])
else:
stmt_result['statements'] = [stmt.to_dict(language, format) for stmt in \
Statement.objects.filter(id__in=idlist).order_by(stored)]
stmt_result['more'] = ""
else:
stmt_result['statements'] = []
stmt_result['more'] = ""
return stmt_result
def findstmtrefs(stmtset, sinceQ, untilQ):
if stmtset.count() == 0:
return stmtset
q = Q()
for s in stmtset:
q = q | Q(object_statementref__ref_id=s.statement_id)
if sinceQ and untilQ:
q = q & Q(sinceQ, untilQ)
elif sinceQ:
q = q & sinceQ
elif untilQ:
q = q & untilQ
# finally weed out voided statements in this lookup
q = q & Q(voided=False)
return findstmtrefs(Statement.objects.filter(q).distinct(), sinceQ, untilQ) | stmtset
def create_cache_key(stmt_list):
# Create unique hash data to use for the cache key
hash_data = []
hash_data.append(str(datetime.now()))
hash_data.append(str(stmt_list))
# Create cache key from hashed data (always 32 digits)
key = hashlib.md5(bencode.bencode(hash_data)).hexdigest()
return key
def initial_cache_return(stmt_list, stored, limit, language, format, attachments):
# First time someone queries POST/GET
result = {}
cache_list = []
cache_list.append([s for s in stmt_list.order_by(stored).values_list('id', flat=True)])
stmt_pager = Paginator(cache_list[0], limit)
# Always start on first page
current_page = 1
total_pages = stmt_pager.num_pages
# Create cache key from hashed data (always 32 digits)
cache_key = create_cache_key(cache_list[0])
# Add data to cache
cache_list.append(current_page)
cache_list.append(total_pages)
cache_list.append(limit)
cache_list.append(attachments)
cache_list.append(language)
cache_list.append(format)
cache_list.append(stored)
# Encode data
encoded_info = json.dumps(cache_list)
# Save encoded_dict in cache
cache.set(cache_key,encoded_info)
# Return first page of results
if format == 'exact':
result = '{"statements": [%s], "more": "%s"}' % (",".join([json.dumps(stmt.full_statement) for stmt in \
Statement.objects.filter(id__in=stmt_pager.page(1).object_list).order_by(stored)]), MORE_ENDPOINT + cache_key)
else:
result['statements'] = [stmt.to_dict(language, format) for stmt in \
Statement.objects.filter(id__in=stmt_pager.page(1).object_list).order_by(stored)]
result['more'] = MORE_ENDPOINT + cache_key
return result
def set_limit(req_limit):
if not req_limit or req_limit > settings.SERVER_STMT_LIMIT:
req_limit = settings.SERVER_STMT_LIMIT
return req_limit
def get_more_statement_request(req_id):
# Retrieve encoded info for statements
encoded_info = cache.get(req_id)
# Could have expired or never existed
if not encoded_info:
raise NotFound("List does not exist - may have expired after 24 hours")
# Decode info
decoded_info = json.loads(encoded_info)
# Info is always cached as [stmt_list, start_page, total_pages, limit, attachments, language, format]
stmt_list = decoded_info[0]
start_page = decoded_info[1]
total_pages = decoded_info[2]
limit = decoded_info[3]
attachments = decoded_info[4]
language = decoded_info[5]
format = decoded_info[6]
stored = decoded_info[7]
# Build statementResult
stmt_result = build_statement_result(stmt_list, start_page, total_pages, limit, attachments, language, format, stored, req_id)
return stmt_result, attachments
# Gets called from req_process after complex_get with list of django objects and also gets called from get_more_statement_request when
# more_id is used so list will be serialized
def build_statement_result(stmt_list, start_page, total_pages, limit, attachments, language, format, stored, more_id):
result = {}
current_page = start_page + 1
# If that was the last page to display then just return the remaining stmts
if current_page == total_pages:
stmt_pager = Paginator(stmt_list, limit)
# Return first page of results
if format == 'exact':
result = '{"statements": [%s], "more": ""}' % ",".join([json.dumps(stmt.to_dict(language, format)) for stmt in \
Statement.objects.filter(id__in=stmt_pager.page(current_page).object_list).order_by(stored)])
else:
result['statements'] = [stmt.to_dict(language, format) for stmt in \
Statement.objects.filter(id__in=stmt_pager.page(current_page).object_list).order_by(stored)]
result['more'] = ""
# Set current page back for when someone hits the URL again
current_page -= 1
# Retrieve list stored in cache
encoded_list = cache.get(more_id)
# Decode info to set the current page back then encode again
decoded_list = json.loads(encoded_list)
decoded_list[1] = current_page
encoded_list = json.dumps(decoded_list)
cache.set(more_id, encoded_list)
# There are more pages to display
else:
stmt_pager = Paginator(stmt_list, limit)
# Create cache key from hashed data (always 32 digits)
cache_key = create_cache_key(stmt_list)
# Return first page of results
if format == 'exact':
result = '{"statements": [%s], "more": "%s"}' % (",".join([json.dumps(stmt.to_dict(language, format)) for stmt in \
Statement.objects.filter(id__in=stmt_pager.page(current_page).object_list).order_by(stored)]), MORE_ENDPOINT + cache_key)
else:
# Set result to have selected page of stmts and more endpoint
result['statements'] = [stmt.to_dict(language, format) for stmt in \
Statement.objects.filter(id__in=stmt_pager.page(current_page).object_list).order_by(stored)]
result['more'] = MORE_ENDPOINT + cache_key
more_cache_list = []
# Increment next page
start_page = current_page
more_cache_list.append(stmt_list)
more_cache_list.append(start_page)
more_cache_list.append(total_pages)
more_cache_list.append(limit)
more_cache_list.append(attachments)
more_cache_list.append(language)
more_cache_list.append(format)
more_cache_list.append(stored)
# Encode info
encoded_list = json.dumps(more_cache_list)
cache.set(cache_key, encoded_list)
return result
| apache-2.0 | 5,895,940,552,125,944,000 | 39.190311 | 137 | 0.624193 | false | 3.771104 | false | false | false |
RAtechntukan/CouchPotatoServer | libs/subliminal/services/tvsubtitles.py | 106 | 6240 | # -*- coding: utf-8 -*-
# Copyright 2012 Nicolas Wack <[email protected]>
#
# This file is part of subliminal.
#
# subliminal is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# subliminal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with subliminal. If not, see <http://www.gnu.org/licenses/>.
from . import ServiceBase
from ..cache import cachedmethod
from ..language import language_set, Language
from ..subtitles import get_subtitle_path, ResultSubtitle
from ..utils import get_keywords
from ..videos import Episode
from bs4 import BeautifulSoup
import logging
import re
logger = logging.getLogger(__name__)
def match(pattern, string):
try:
return re.search(pattern, string).group(1)
except AttributeError:
logger.debug(u'Could not match %r on %r' % (pattern, string))
return None
class TvSubtitles(ServiceBase):
server_url = 'http://www.tvsubtitles.net'
api_based = False
languages = language_set(['ar', 'bg', 'cs', 'da', 'de', 'el', 'en', 'es', 'fi', 'fr', 'hu',
'it', 'ja', 'ko', 'nl', 'pl', 'pt', 'ro', 'ru', 'sv', 'tr', 'uk',
'zh', 'pt-br'])
#TODO: Find more exceptions
language_map = {'gr': Language('gre'), 'cz': Language('cze'), 'ua': Language('ukr'),
'cn': Language('chi')}
videos = [Episode]
require_video = False
required_features = ['permissive']
@cachedmethod
def get_likely_series_id(self, name):
r = self.session.post('%s/search.php' % self.server_url, data={'q': name})
soup = BeautifulSoup(r.content, self.required_features)
maindiv = soup.find('div', 'left')
results = []
for elem in maindiv.find_all('li'):
sid = int(match('tvshow-([0-9]+)\.html', elem.a['href']))
show_name = match('(.*) \(', elem.a.text)
results.append((show_name, sid))
#TODO: pick up the best one in a smart way
result = results[0]
return result[1]
@cachedmethod
def get_episode_id(self, series_id, season, number):
"""Get the TvSubtitles id for the given episode. Raises KeyError if none
could be found."""
# download the page of the season, contains ids for all episodes
episode_id = None
r = self.session.get('%s/tvshow-%d-%d.html' % (self.server_url, series_id, season))
soup = BeautifulSoup(r.content, self.required_features)
table = soup.find('table', id='table5')
for row in table.find_all('tr'):
cells = row.find_all('td')
if not cells:
continue
episode_number = match('x([0-9]+)', cells[0].text)
if not episode_number:
continue
episode_number = int(episode_number)
episode_id = int(match('episode-([0-9]+)', cells[1].a['href']))
# we could just return the id of the queried episode, but as we
# already downloaded the whole page we might as well fill in the
# information for all the episodes of the season
self.cache_for(self.get_episode_id, args=(series_id, season, episode_number), result=episode_id)
# raises KeyError if not found
return self.cached_value(self.get_episode_id, args=(series_id, season, number))
# Do not cache this method in order to always check for the most recent
# subtitles
def get_sub_ids(self, episode_id):
subids = []
r = self.session.get('%s/episode-%d.html' % (self.server_url, episode_id))
epsoup = BeautifulSoup(r.content, self.required_features)
for subdiv in epsoup.find_all('a'):
if 'href' not in subdiv.attrs or not subdiv['href'].startswith('/subtitle'):
continue
subid = int(match('([0-9]+)', subdiv['href']))
lang = self.get_language(match('flags/(.*).gif', subdiv.img['src']))
result = {'subid': subid, 'language': lang}
for p in subdiv.find_all('p'):
if 'alt' in p.attrs and p['alt'] == 'rip':
result['rip'] = p.text.strip()
if 'alt' in p.attrs and p['alt'] == 'release':
result['release'] = p.text.strip()
subids.append(result)
return subids
def list_checked(self, video, languages):
return self.query(video.path or video.release, languages, get_keywords(video.guess), video.series, video.season, video.episode)
def query(self, filepath, languages, keywords, series, season, episode):
logger.debug(u'Getting subtitles for %s season %d episode %d with languages %r' % (series, season, episode, languages))
self.init_cache()
sid = self.get_likely_series_id(series.lower())
try:
ep_id = self.get_episode_id(sid, season, episode)
except KeyError:
logger.debug(u'Could not find episode id for %s season %d episode %d' % (series, season, episode))
return []
subids = self.get_sub_ids(ep_id)
# filter the subtitles with our queried languages
subtitles = []
for subid in subids:
language = subid['language']
if language not in languages:
continue
path = get_subtitle_path(filepath, language, self.config.multi)
subtitle = ResultSubtitle(path, language, self.__class__.__name__.lower(), '%s/download-%d.html' % (self.server_url, subid['subid']),
keywords=[subid['rip'], subid['release']])
subtitles.append(subtitle)
return subtitles
def download(self, subtitle):
self.download_zip_file(subtitle.link, subtitle.path)
return subtitle
Service = TvSubtitles
| gpl-3.0 | 3,814,871,655,794,799,000 | 42.943662 | 145 | 0.604487 | false | 3.861386 | false | false | false |
ds-hwang/chromium-crosswalk | remoting/tools/zip2msi.py | 89 | 8691 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generates .msi from a .zip archive or an unpacked directory.
The structure of the input archive or directory should look like this:
+- archive.zip
+- archive
+- parameters.json
The name of the archive and the top level directory in the archive must match.
When an unpacked directory is used as the input "archive.zip/archive" should
be passed via the command line.
'parameters.json' specifies the parameters to be passed to candle/light and
must have the following structure:
{
"defines": { "name": "value" },
"extensions": [ "WixFirewallExtension.dll" ],
"switches": [ '-nologo' ],
"source": "chromoting.wxs",
"bind_path": "files",
"sign": [ ... ],
"candle": { ... },
"light": { ... }
}
"source" specifies the name of the input .wxs relative to
"archive.zip/archive".
"bind_path" specifies the path where to look for binary files referenced by
.wxs relative to "archive.zip/archive".
This script is used for both building Chromoting Host installation during
Chromuim build and for signing Chromoting Host installation later. There are two
copies of this script because of that:
- one in Chromium tree at src/remoting/tools/zip2msi.py.
- another one next to the signing scripts.
The copies of the script can be out of sync so make sure that a newer version is
compatible with the older ones when updating the script.
"""
import copy
import json
from optparse import OptionParser
import os
import re
import subprocess
import sys
import zipfile
def UnpackZip(target, source):
"""Unpacks |source| archive to |target| directory."""
target = os.path.normpath(target)
archive = zipfile.ZipFile(source, 'r')
for f in archive.namelist():
target_file = os.path.normpath(os.path.join(target, f))
# Sanity check to make sure .zip uses relative paths.
if os.path.commonprefix([target_file, target]) != target:
print "Failed to unpack '%s': '%s' is not under '%s'" % (
source, target_file, target)
return 1
# Create intermediate directories.
target_dir = os.path.dirname(target_file)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
archive.extract(f, target)
return 0
def Merge(left, right):
"""Merges two values.
Raises:
TypeError: |left| and |right| cannot be merged.
Returns:
- if both |left| and |right| are dictionaries, they are merged recursively.
- if both |left| and |right| are lists, the result is a list containing
elements from both lists.
- if both |left| and |right| are simple value, |right| is returned.
- |TypeError| exception is raised if a dictionary or a list are merged with
a non-dictionary or non-list correspondingly.
"""
if isinstance(left, dict):
if isinstance(right, dict):
retval = copy.copy(left)
for key, value in right.iteritems():
if key in retval:
retval[key] = Merge(retval[key], value)
else:
retval[key] = value
return retval
else:
raise TypeError('Error: merging a dictionary and non-dictionary value')
elif isinstance(left, list):
if isinstance(right, list):
return left + right
else:
raise TypeError('Error: merging a list and non-list value')
else:
if isinstance(right, dict):
raise TypeError('Error: merging a dictionary and non-dictionary value')
elif isinstance(right, list):
raise TypeError('Error: merging a dictionary and non-dictionary value')
else:
return right
quote_matcher_regex = re.compile(r'\s|"')
quote_replacer_regex = re.compile(r'(\\*)"')
def QuoteArgument(arg):
"""Escapes a Windows command-line argument.
So that the Win32 CommandLineToArgv function will turn the escaped result back
into the original string.
See http://msdn.microsoft.com/en-us/library/17w5ykft.aspx
("Parsing C++ Command-Line Arguments") to understand why we have to do
this.
Args:
arg: the string to be escaped.
Returns:
the escaped string.
"""
def _Replace(match):
# For a literal quote, CommandLineToArgv requires an odd number of
# backslashes preceding it, and it produces half as many literal backslashes
# (rounded down). So we need to produce 2n+1 backslashes.
return 2 * match.group(1) + '\\"'
if re.search(quote_matcher_regex, arg):
# Escape all quotes so that they are interpreted literally.
arg = quote_replacer_regex.sub(_Replace, arg)
# Now add unescaped quotes so that any whitespace is interpreted literally.
return '"' + arg + '"'
else:
return arg
def GenerateCommandLine(tool, source, dest, parameters):
"""Generates the command line for |tool|."""
# Merge/apply tool-specific parameters
params = copy.copy(parameters)
if tool in parameters:
params = Merge(params, params[tool])
wix_path = os.path.normpath(params.get('wix_path', ''))
switches = [os.path.join(wix_path, tool), '-nologo']
# Append the list of defines and extensions to the command line switches.
for name, value in params.get('defines', {}).iteritems():
switches.append('-d%s=%s' % (name, value))
for ext in params.get('extensions', []):
switches += ('-ext', os.path.join(wix_path, ext))
# Append raw switches
switches += params.get('switches', [])
# Append the input and output files
switches += ('-out', dest, source)
# Generate the actual command line
#return ' '.join(map(QuoteArgument, switches))
return switches
def Run(args):
"""Runs a command interpreting the passed |args| as a command line."""
command = ' '.join(map(QuoteArgument, args))
popen = subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
if popen.returncode:
print command
for line in out.splitlines():
print line
print '%s returned %d' % (args[0], popen.returncode)
return popen.returncode
def GenerateMsi(target, source, parameters):
"""Generates .msi from the installation files prepared by Chromium build."""
parameters['basename'] = os.path.splitext(os.path.basename(source))[0]
# The script can handle both forms of input a directory with unpacked files or
# a ZIP archive with the same files. In the latter case the archive should be
# unpacked to the intermediate directory.
source_dir = None
if os.path.isdir(source):
# Just use unpacked files from the supplied directory.
source_dir = source
else:
# Unpack .zip
rc = UnpackZip(parameters['intermediate_dir'], source)
if rc != 0:
return rc
source_dir = '%(intermediate_dir)s\\%(basename)s' % parameters
# Read parameters from 'parameters.json'.
f = open(os.path.join(source_dir, 'parameters.json'))
parameters = Merge(json.load(f), parameters)
f.close()
if 'source' not in parameters:
print 'The source .wxs is not specified'
return 1
if 'bind_path' not in parameters:
print 'The binding path is not specified'
return 1
wxs = os.path.join(source_dir, parameters['source'])
# Add the binding path to the light-specific parameters.
bind_path = os.path.join(source_dir, parameters['bind_path'])
parameters = Merge(parameters, {'light': {'switches': ['-b', bind_path]}})
target_arch = parameters['target_arch']
if target_arch == 'ia32':
arch_param = 'x86'
elif target_arch == 'x64':
arch_param = 'x64'
else:
print 'Invalid target_arch parameter value'
return 1
# Add the architecture to candle-specific parameters.
parameters = Merge(
parameters, {'candle': {'switches': ['-arch', arch_param]}})
# Run candle and light to generate the installation.
wixobj = '%(intermediate_dir)s\\%(basename)s.wixobj' % parameters
args = GenerateCommandLine('candle', wxs, wixobj, parameters)
rc = Run(args)
if rc:
return rc
args = GenerateCommandLine('light', wixobj, target, parameters)
rc = Run(args)
if rc:
return rc
return 0
def main():
usage = 'Usage: zip2msi [options] <input.zip> <output.msi>'
parser = OptionParser(usage=usage)
parser.add_option('--intermediate_dir', dest='intermediate_dir', default='.')
parser.add_option('--wix_path', dest='wix_path', default='.')
parser.add_option('--target_arch', dest='target_arch', default='x86')
options, args = parser.parse_args()
if len(args) != 2:
parser.error('two positional arguments expected')
return GenerateMsi(args[1], args[0], dict(options.__dict__))
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause | 7,209,941,797,004,164,000 | 30.952206 | 80 | 0.682085 | false | 3.79354 | false | false | false |
rolobio/DictORM | dictorm/pg.py | 1 | 10974 | """
This module is used to build queries for Postgresql. You shouldn't really need
to import anything from this file because they can all be built using
dictorm.Table.
Sqlite queries are slightly different, but use these methods as their base.
"""
from copy import copy
from typing import Union
global sort_keys
sort_keys = False
__all__ = [
'And',
'Column',
'Comparison',
'Delete',
'Insert',
'Null',
'Operator',
'Or',
'Select',
'set_sort_keys',
'Update',
]
def set_sort_keys(val):
"""Used only for testing"""
global sort_keys
sort_keys = val
class Select(object):
query = 'SELECT * FROM "{table}"'
def __init__(self, table, operators_or_comp=None, returning=None):
self.table = table
self.operators_or_comp = operators_or_comp or []
self.returning = returning
self._order_by = None
self._limit = None
self._offset = None
def __repr__(self): # pragma: no cover
return 'Select({0}, {1}, ret:{2}, order:{3}, limit:{4}, offset:{5}'.format(
self.table,
repr(self.operators_or_comp),
self.returning,
self._order_by,
self._limit,
self._offset)
def _copy(self):
try:
ooc = self.operators_or_comp[:]
except TypeError:
ooc = self.operators_or_comp._copy()
new = type(self)(self.table, ooc, copy(self.returning))
new._order_by = copy(self._order_by)
new._limit = copy(self._limit)
new._offset = copy(self._offset)
return new
def __str__(self):
parts = []
formats = {'table': self.table, }
ooc = self.operators_or_comp
if (isinstance(ooc, Operator) and ooc.operators_or_comp) or (
isinstance(ooc, Comparison)
):
parts.append(' WHERE {comp}')
formats['comp'] = str(ooc)
if self._order_by:
parts.append(' ORDER BY {0}'.format(str(self._order_by)))
if self.returning == '*':
parts.append(' RETURNING *')
elif self.returning:
parts.append(' RETURNING "{0}"'.format(str(self.returning)))
if self._limit:
parts.append(' LIMIT {0}'.format(str(self._limit)))
if self._offset:
parts.append(' OFFSET {0}'.format(str(self._offset)))
sql = self.query + ''.join(parts)
return sql.format(**formats)
def values(self):
return list(self.operators_or_comp or [])
def build(self):
return (str(self), self.values())
def order_by(self, order_by):
self._order_by = order_by
return self
def limit(self, limit):
self._limit = limit
return self
def offset(self, offset):
self._offset = offset
return self
def __add__(self, item):
self.operators_or_comp += item
return self
class Insert(object):
query = 'INSERT INTO "{table}" {cvp}'
cvp = '({0}) VALUES ({1})'
interpolation_str = '%s'
append_returning = None
last_row = 'SELECT {0} FROM "{1}" WHERE "rowid" = last_insert_rowid()'
def __init__(self, table, **values):
self.table = table
self._values = values
self._returning = None
self._ordered_keys = values.keys()
if sort_keys:
self._ordered_keys = sorted(self._ordered_keys)
def _build_cvp(self):
return (', '.join(['"{}"'.format(i) for i in self._ordered_keys]),
', '.join([self.interpolation_str, ] * len(self._values)))
def __str__(self):
sql = self.query
if self._returning == '*':
sql += ' RETURNING *'
elif self._returning:
sql += ' RETURNING "{0}"'.format(self._returning)
if not self._values:
return sql.format(table=self.table, cvp='DEFAULT VALUES')
return sql.format(table=self.table,
cvp=self.cvp.format(*self._build_cvp()))
def values(self):
return [self._values[k] for k in self._ordered_keys]
def build(self):
sql, values = str(self), self.values()
if self.append_returning:
ret = [(sql, values), ]
ret.append((self.last_row.format(
self.append_returning, self.table),
[]))
return ret
return (sql, values)
def returning(self, returning):
self._returning = returning
return self
class Update(Insert):
query = 'UPDATE "{table}" SET {cvp}'
interpolation_str = '%s'
def __init__(self, table, **values):
self.operators_or_comp = None
super(Update, self).__init__(table, **values)
def _build_cvp(self):
return ', '.join(('"{0}"={1}'.format(k, self.interpolation_str) \
for k in self._ordered_keys))
def __str__(self):
parts = []
formats = {'table': self.table, 'cvp': self._build_cvp()}
if self.operators_or_comp:
parts.append(' WHERE {comps}')
formats['comps'] = str(self.operators_or_comp)
if self._returning == '*':
parts.append(' RETURNING *')
elif self._returning:
parts.append(' RETURNING "{0}"'.format(self._returning))
sql = self.query + ''.join(parts)
return sql.format(**formats)
def values(self):
values = super(Update, self).values()
if self.operators_or_comp:
values.extend(list(self.operators_or_comp))
return values
def where(self, operators_or_comp):
self.operators_or_comp = operators_or_comp
return self
class Delete(Update):
query = 'DELETE FROM "{table}"'
QueryHint = Union[Select, Insert, Update, Delete]
class Comparison(object):
interpolation_str = '%s'
many = False
def __init__(self, column1, column2, kind):
self.column1 = column1
self.column2 = column2
self.kind = kind
self._substratum = None
self._aggregate = False
self._array_exp = False
def __repr__(self): # pragma: no cover
if isinstance(self.column2, Null):
ret = 'Comparison({0}{1})'.format(self.column1, self.kind)
ret = 'Comparison{0}({1}{2}{3})'.format('Many' if self.many else '',
self.column1, str(self.kind), self.column2)
if self._substratum:
ret += '.substratum({0})'.format(self._substratum)
return ret
def __str__(self):
c1 = self.column1.column
if self._null_kind():
return '"{0}"{1}'.format(c1, self.kind)
# Surround the expression with parentheses
if self._array_exp:
return '"{0}"{1}({2})'.format(c1, self.kind, self.interpolation_str)
return '"{0}"{1}{2}'.format(c1, self.kind, self.interpolation_str)
def _copy(self):
new = type(self)(self.column1, self.column2, self.kind)
new._substratum = self._substratum
new._aggregate = self._aggregate
return new
def value(self):
return self.column2
def __iter__(self):
if self._null_kind():
return iter([])
return iter([self.column2, ])
def substratum(self, column):
comp = Comparison(self.column1, self.column2, self.kind)
comp._substratum = column
comp.many = self.many
return comp
def aggregate(self, column):
comp = self.substratum(column)
comp._aggregate = True
return comp
def _null_kind(self):
return isinstance(self.column2, Null)
def Or(self, comp2):
return Or(self, comp2)
def And(self, comp2):
return And(self, comp2)
class Null(): pass
class Column(object):
comparison = Comparison
def __init__(self, table, column):
self.table = table
self.column = column
def __repr__(self): # pragma: no cover
return '{0}.{1}'.format(self.table, self.column)
def many(self, column):
c = self.comparison(self, column, '=')
c.many = True
return c
def __eq__(self, column): return self.comparison(self, column, '=')
def __gt__(self, column): return self.comparison(self, column, '>')
def __ge__(self, column): return self.comparison(self, column, '>=')
def __lt__(self, column): return self.comparison(self, column, '<')
def __le__(self, column): return self.comparison(self, column, '<=')
def __ne__(self, column): return self.comparison(self, column, '!=')
def Is(self, column): return self.comparison(self, column, ' IS ')
def IsNot(self, column): return self.comparison(self, column, ' IS NOT ')
def IsDistinct(self, column):
return self.comparison(self, column, ' IS DISTINCT FROM ')
def IsNotDistinct(self, column):
return self.comparison(self, column, ' IS NOT DISTINCT FROM ')
def IsNull(self):
return self.comparison(self, Null(), ' IS NULL')
def IsNotNull(self):
return self.comparison(self, Null(), ' IS NOT NULL')
def In(self, tup):
if isinstance(tup, list):
tup = tuple(tup)
return self.comparison(self, tup, ' IN ')
def Like(self, column):
return self.comparison(self, column, ' LIKE ')
def Ilike(self, column):
return self.comparison(self, column, ' ILIKE ')
def Any(self, column):
comp = self.comparison(self, column, ' = ANY ')
comp._array_exp = True
return comp
def wrap_ooc(ooc):
if isinstance(ooc, Comparison):
return '%s' % str(ooc)
return '(%s)' % str(ooc)
class Operator(object):
def __init__(self, kind, operators_or_comp):
self.kind = kind
self.operators_or_comp = operators_or_comp
def __repr__(self): # pragma: no cover
return '{0}{1}'.format(self.kind, repr(self.operators_or_comp))
def __str__(self):
kind = ' {0} '.format(self.kind)
return kind.join(map(wrap_ooc, self.operators_or_comp))
def __iter__(self):
i = []
for comp in self.operators_or_comp:
if isinstance(comp, Operator):
i.extend(comp)
elif isinstance(comp, Comparison) and not comp._null_kind():
i.append(comp.value())
return iter(i)
def __add__(self, i):
if isinstance(i, tuple):
self.operators_or_comp += i
else:
self.operators_or_comp += (i,)
return self
def _copy(self):
new = type(self)()
new.operators_or_comp = tuple(i._copy() for i in self.operators_or_comp)
return new
class Or(Operator):
def __init__(self, *operators_or_comp):
super(Or, self).__init__('OR', operators_or_comp)
class And(Operator):
def __init__(self, *operators_or_comp):
super(And, self).__init__('AND', operators_or_comp)
| apache-2.0 | -8,272,285,528,467,006,000 | 27.80315 | 91 | 0.558958 | false | 3.755647 | false | false | false |
megraf/asuswrt-merlin | release/src/router/libxml2/python/tests/xpath.py | 87 | 1188 | #!/usr/bin/python -u
#
# this test exercise the XPath basic engine, parser, etc, and
# allows to detect memory leaks
#
import sys
import libxml2
# Memory debug specific
libxml2.debugMemory(1)
doc = libxml2.parseFile("tst.xml")
if doc.name != "tst.xml":
print "doc.name error"
sys.exit(1);
ctxt = doc.xpathNewContext()
res = ctxt.xpathEval("//*")
if len(res) != 2:
print "xpath query: wrong node set size"
sys.exit(1)
if res[0].name != "doc" or res[1].name != "foo":
print "xpath query: wrong node set value"
sys.exit(1)
ctxt.setContextNode(res[0])
res = ctxt.xpathEval("foo")
if len(res) != 1:
print "xpath query: wrong node set size"
sys.exit(1)
if res[0].name != "foo":
print "xpath query: wrong node set value"
sys.exit(1)
doc.freeDoc()
ctxt.xpathFreeContext()
i = 1000
while i > 0:
doc = libxml2.parseFile("tst.xml")
ctxt = doc.xpathNewContext()
res = ctxt.xpathEval("//*")
doc.freeDoc()
ctxt.xpathFreeContext()
i = i -1
del ctxt
# Memory debug specific
libxml2.cleanupParser()
if libxml2.debugMemory(1) == 0:
print "OK"
else:
print "Memory leak %d bytes" % (libxml2.debugMemory(1))
libxml2.dumpMemory()
| gpl-2.0 | -4,336,757,916,907,250,000 | 22.294118 | 61 | 0.654882 | false | 2.933333 | false | false | false |
mmatyas/skia | tools/misc_utils.py | 68 | 7711 | # Copyright 2014 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module to host the VerboseSubprocess, ChangeDir, and ReSearch classes.
"""
import os
import re
import subprocess
def print_subprocess_args(prefix, *args, **kwargs):
"""Print out args in a human-readable manner."""
def quote_and_escape(string):
"""Quote and escape a string if necessary."""
if ' ' in string or '\n' in string:
string = '"%s"' % string.replace('"', '\\"')
return string
if 'cwd' in kwargs:
print '%scd %s' % (prefix, kwargs['cwd'])
print prefix + ' '.join(quote_and_escape(arg) for arg in args[0])
if 'cwd' in kwargs:
print '%scd -' % prefix
class VerboseSubprocess(object):
"""Call subprocess methods, but print out command before executing.
Attributes:
verbose: (boolean) should we print out the command or not. If
not, this is the same as calling the subprocess method
quiet: (boolean) suppress stdout on check_call and call.
prefix: (string) When verbose, what to print before each command.
"""
def __init__(self, verbose):
self.verbose = verbose
self.quiet = not verbose
self.prefix = '~~$ '
def check_call(self, *args, **kwargs):
"""Wrapper for subprocess.check_call().
Args:
*args: to be passed to subprocess.check_call()
**kwargs: to be passed to subprocess.check_call()
Returns:
Whatever subprocess.check_call() returns.
Raises:
OSError or subprocess.CalledProcessError: raised by check_call.
"""
if self.verbose:
print_subprocess_args(self.prefix, *args, **kwargs)
if self.quiet:
with open(os.devnull, 'w') as devnull:
return subprocess.check_call(*args, stdout=devnull, **kwargs)
else:
return subprocess.check_call(*args, **kwargs)
def call(self, *args, **kwargs):
"""Wrapper for subprocess.check().
Args:
*args: to be passed to subprocess.check_call()
**kwargs: to be passed to subprocess.check_call()
Returns:
Whatever subprocess.call() returns.
Raises:
OSError or subprocess.CalledProcessError: raised by call.
"""
if self.verbose:
print_subprocess_args(self.prefix, *args, **kwargs)
if self.quiet:
with open(os.devnull, 'w') as devnull:
return subprocess.call(*args, stdout=devnull, **kwargs)
else:
return subprocess.call(*args, **kwargs)
def check_output(self, *args, **kwargs):
"""Wrapper for subprocess.check_output().
Args:
*args: to be passed to subprocess.check_output()
**kwargs: to be passed to subprocess.check_output()
Returns:
Whatever subprocess.check_output() returns.
Raises:
OSError or subprocess.CalledProcessError: raised by check_output.
"""
if self.verbose:
print_subprocess_args(self.prefix, *args, **kwargs)
return subprocess.check_output(*args, **kwargs)
def strip_output(self, *args, **kwargs):
"""Wrap subprocess.check_output and str.strip().
Pass the given arguments into subprocess.check_output() and return
the results, after stripping any excess whitespace.
Args:
*args: to be passed to subprocess.check_output()
**kwargs: to be passed to subprocess.check_output()
Returns:
The output of the process as a string without leading or
trailing whitespace.
Raises:
OSError or subprocess.CalledProcessError: raised by check_output.
"""
if self.verbose:
print_subprocess_args(self.prefix, *args, **kwargs)
return str(subprocess.check_output(*args, **kwargs)).strip()
def popen(self, *args, **kwargs):
"""Wrapper for subprocess.Popen().
Args:
*args: to be passed to subprocess.Popen()
**kwargs: to be passed to subprocess.Popen()
Returns:
The output of subprocess.Popen()
Raises:
OSError or subprocess.CalledProcessError: raised by Popen.
"""
if self.verbose:
print_subprocess_args(self.prefix, *args, **kwargs)
return subprocess.Popen(*args, **kwargs)
class ChangeDir(object):
"""Use with a with-statement to temporarily change directories."""
# pylint: disable=I0011,R0903
def __init__(self, directory, verbose=False):
self._directory = directory
self._verbose = verbose
def __enter__(self):
if self._directory != os.curdir:
if self._verbose:
print '~~$ cd %s' % self._directory
cwd = os.getcwd()
os.chdir(self._directory)
self._directory = cwd
def __exit__(self, etype, value, traceback):
if self._directory != os.curdir:
if self._verbose:
print '~~$ cd %s' % self._directory
os.chdir(self._directory)
class ReSearch(object):
"""A collection of static methods for regexing things."""
@staticmethod
def search_within_stream(input_stream, pattern, default=None):
"""Search for regular expression in a file-like object.
Opens a file for reading and searches line by line for a match to
the regex and returns the parenthesized group named return for the
first match. Does not search across newlines.
For example:
pattern = '^root(:[^:]*){4}:(?P<return>[^:]*)'
with open('/etc/passwd', 'r') as stream:
return search_within_file(stream, pattern)
should return root's home directory (/root on my system).
Args:
input_stream: file-like object to be read
pattern: (string) to be passed to re.compile
default: what to return if no match
Returns:
A string or whatever default is
"""
pattern_object = re.compile(pattern)
for line in input_stream:
match = pattern_object.search(line)
if match:
return match.group('return')
return default
@staticmethod
def search_within_string(input_string, pattern, default=None):
"""Search for regular expression in a string.
Args:
input_string: (string) to be searched
pattern: (string) to be passed to re.compile
default: what to return if no match
Returns:
A string or whatever default is
"""
match = re.search(pattern, input_string)
return match.group('return') if match else default
@staticmethod
def search_within_output(verbose, pattern, default, *args, **kwargs):
"""Search for regular expression in a process output.
Does not search across newlines.
Args:
verbose: (boolean) shoule we call print_subprocess_args?
pattern: (string) to be passed to re.compile
default: what to return if no match
*args: to be passed to subprocess.Popen()
**kwargs: to be passed to subprocess.Popen()
Returns:
A string or whatever default is
"""
if verbose:
print_subprocess_args('~~$ ', *args, **kwargs)
proc = subprocess.Popen(*args, stdout=subprocess.PIPE, **kwargs)
return ReSearch.search_within_stream(proc.stdout, pattern, default)
| bsd-3-clause | -5,381,130,281,967,364,000 | 33.424107 | 77 | 0.59253 | false | 4.452079 | false | false | false |
mbauskar/helpdesk-frappe | frappe/patches/v5_2/change_checks_to_not_null.py | 58 | 1267 | from __future__ import unicode_literals
import frappe
from frappe.utils import cint
from frappe.model import default_fields
def execute():
for table in frappe.db.get_tables():
doctype = table[3:]
if frappe.db.exists("DocType", doctype):
fieldnames = [df["fieldname"] for df in
frappe.get_all("DocField", fields=["fieldname"], filters={"parent": doctype})]
custom_fieldnames = [df["fieldname"] for df in
frappe.get_all("Custom Field", fields=["fieldname"], filters={"dt": doctype})]
else:
fieldnames = custom_fieldnames = []
for column in frappe.db.sql("""desc `{0}`""".format(table), as_dict=True):
if column["Type"]=="int(1)":
fieldname = column["Field"]
# only change for defined fields, ignore old fields that don't exist in meta
if not (fieldname in default_fields or fieldname in fieldnames or fieldname in custom_fieldnames):
continue
# set 0
frappe.db.sql("""update `{table}` set `{column}`=0 where `{column}` is null"""\
.format(table=table, column=fieldname))
frappe.db.commit()
# change definition
frappe.db.sql_ddl("""alter table `{table}`
modify `{column}` int(1) not null default {default}"""\
.format(table=table, column=fieldname, default=cint(column["Default"])))
| mit | 7,124,992,491,837,945,000 | 36.264706 | 102 | 0.671665 | false | 3.480769 | false | false | false |
wiltonlazary/arangodb | 3rdParty/V8/V8-5.0.71.39/tools/swarming_client/tests/lru_test.py | 6 | 6265 | #!/usr/bin/env python
# Copyright 2013 The Swarming Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0 that
# can be found in the LICENSE file.
import json
import logging
import os
import sys
import tempfile
import unittest
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, ROOT_DIR)
from utils import lru
class LRUDictTest(unittest.TestCase):
@staticmethod
def prepare_lru_dict(keys):
"""Returns new LRUDict with given |keys| added one by one."""
lru_dict = lru.LRUDict()
for key in keys:
lru_dict.add(key, None)
return lru_dict
def assert_order(self, lru_dict, expected_keys):
"""Asserts order of keys in |lru_dict| is |expected_keys|.
expected_keys[0] is supposedly oldest key, expected_keys[-1] is newest.
Destroys |lru_dict| state in the process.
"""
# Check keys iteration works.
self.assertEqual(lru_dict.keys_set(), set(expected_keys))
# Check pop_oldest returns keys in expected order.
actual_keys = []
while lru_dict:
oldest_key, _ = lru_dict.pop_oldest()
actual_keys.append(oldest_key)
self.assertEqual(actual_keys, expected_keys)
def assert_same_data(self, lru_dict, regular_dict):
"""Asserts that given |lru_dict| contains same data as |regular_dict|."""
self.assertEqual(lru_dict.keys_set(), set(regular_dict.keys()))
self.assertEqual(set(lru_dict.itervalues()), set(regular_dict.values()))
for k, v in regular_dict.items():
self.assertEqual(lru_dict.get(k), v)
def test_basic_dict_funcs(self):
lru_dict = lru.LRUDict()
# Add a bunch.
data = {1: 'one', 2: 'two', 3: 'three'}
for k, v in data.items():
lru_dict.add(k, v)
# Check its there.
self.assert_same_data(lru_dict, data)
# Replace value.
lru_dict.add(1, 'one!!!')
data[1] = 'one!!!'
self.assert_same_data(lru_dict, data)
# Check pop works.
self.assertEqual(lru_dict.pop(2), 'two')
data.pop(2)
self.assert_same_data(lru_dict, data)
# Pop missing key.
with self.assertRaises(KeyError):
lru_dict.pop(2)
# Touch has no effect on set of keys and values.
lru_dict.touch(1)
self.assert_same_data(lru_dict, data)
# Touch fails on missing key.
with self.assertRaises(KeyError):
lru_dict.touch(22)
def test_magic_methods(self):
# Check __nonzero__, __len__ and __contains__ for empty dict.
lru_dict = lru.LRUDict()
self.assertFalse(lru_dict)
self.assertEqual(len(lru_dict), 0)
self.assertFalse(1 in lru_dict)
# Dict with one item.
lru_dict.add(1, 'one')
self.assertTrue(lru_dict)
self.assertEqual(len(lru_dict), 1)
self.assertTrue(1 in lru_dict)
self.assertFalse(2 in lru_dict)
def test_order(self):
data = [1, 2, 3]
# Edge cases.
self.assert_order(self.prepare_lru_dict([]), [])
self.assert_order(self.prepare_lru_dict([1]), [1])
# No touches.
self.assert_order(self.prepare_lru_dict(data), data)
# Touching newest item is noop.
lru_dict = self.prepare_lru_dict(data)
lru_dict.touch(3)
self.assert_order(lru_dict, data)
# Touch to move to newest.
lru_dict = self.prepare_lru_dict(data)
lru_dict.touch(2)
self.assert_order(lru_dict, [1, 3, 2])
# Pop newest.
lru_dict = self.prepare_lru_dict(data)
lru_dict.pop(1)
self.assert_order(lru_dict, [2, 3])
# Pop in the middle.
lru_dict = self.prepare_lru_dict(data)
lru_dict.pop(2)
self.assert_order(lru_dict, [1, 3])
# Pop oldest.
lru_dict = self.prepare_lru_dict(data)
lru_dict.pop(3)
self.assert_order(lru_dict, [1, 2])
# Add oldest.
lru_dict = self.prepare_lru_dict(data)
lru_dict.batch_insert_oldest([(4, 4), (5, 5)])
self.assert_order(lru_dict, [4, 5] + data)
# Add newest.
lru_dict = self.prepare_lru_dict(data)
lru_dict.add(4, 4)
self.assert_order(lru_dict, data + [4])
def test_load_save(self):
def save_and_load(lru_dict):
handle, tmp_name = tempfile.mkstemp(prefix=u'lru_test')
os.close(handle)
try:
lru_dict.save(tmp_name)
return lru.LRUDict.load(tmp_name)
finally:
try:
os.unlink(tmp_name)
except OSError:
pass
data = [1, 2, 3]
# Edge case.
empty = save_and_load(lru.LRUDict())
self.assertFalse(empty)
# Normal flow.
lru_dict = save_and_load(self.prepare_lru_dict(data))
self.assert_order(lru_dict, data)
# After touches.
lru_dict = self.prepare_lru_dict(data)
lru_dict.touch(2)
lru_dict = save_and_load(lru_dict)
self.assert_order(lru_dict, [1, 3, 2])
# After pop.
lru_dict = self.prepare_lru_dict(data)
lru_dict.pop(2)
lru_dict = save_and_load(lru_dict)
self.assert_order(lru_dict, [1, 3])
# After add.
lru_dict = self.prepare_lru_dict(data)
lru_dict.add(4, 4)
lru_dict.batch_insert_oldest([(5, 5), (6, 6)])
lru_dict = save_and_load(lru_dict)
self.assert_order(lru_dict, [5, 6] + data + [4])
def test_corrupted_state_file(self):
def load_from_state(state_text):
handle, tmp_name = tempfile.mkstemp(prefix=u'lru_test')
os.close(handle)
try:
with open(tmp_name, 'w') as f:
f.write(state_text)
return lru.LRUDict.load(tmp_name)
finally:
os.unlink(tmp_name)
# Loads correct state just fine.
self.assertIsNotNone(load_from_state(json.dumps([
['key1', 'value1'],
['key2', 'value2'],
])))
# Not a json.
with self.assertRaises(ValueError):
load_from_state('garbage, not a state')
# Not a list.
with self.assertRaises(ValueError):
load_from_state('{}')
# Not a list of pairs.
with self.assertRaises(ValueError):
load_from_state(json.dumps([
['key', 'value', 'and whats this?'],
]))
# Duplicate keys.
with self.assertRaises(ValueError):
load_from_state(json.dumps([
['key', 'value'],
['key', 'another_value'],
]))
if __name__ == '__main__':
VERBOSE = '-v' in sys.argv
logging.basicConfig(level=logging.DEBUG if VERBOSE else logging.ERROR)
unittest.main()
| apache-2.0 | -7,147,157,685,968,510,000 | 26.47807 | 80 | 0.622666 | false | 3.060576 | true | false | false |
kkampardi/Plinth | plinth/modules/matrixsynapse/views.py | 2 | 2670 | #
# This file is part of Plinth.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Views for the Matrix Synapse module.
"""
from django.shortcuts import redirect
from django.urls import reverse_lazy
from django.views.generic import FormView
from plinth import actions
from plinth import views
from plinth.modules import matrixsynapse
from plinth.forms import DomainSelectionForm
from plinth.utils import get_domain_names
class SetupView(FormView):
"""Show matrix-synapse setup page."""
template_name = 'matrix-synapse-pre-setup.html'
form_class = DomainSelectionForm
success_url = reverse_lazy('matrixsynapse:index')
def form_valid(self, form):
"""Handle valid form submission."""
domain_name = form.cleaned_data['domain_name']
actions.superuser_run('matrixsynapse',
['setup', '--domain-name', domain_name])
return super().form_valid(form)
def get_context_data(self, *args, **kwargs):
"""Provide context data to the template."""
context = super().get_context_data(**kwargs)
context['title'] = matrixsynapse.name
context['description'] = matrixsynapse.description
context['domain_names'] = get_domain_names()
return context
class ServiceView(views.ServiceView):
"""Show matrix-synapse service page."""
service_id = matrixsynapse.managed_services[0]
template_name = 'matrix-synapse.html'
description = matrixsynapse.description
diagnostics_module_name = 'matrixsynapse'
def dispatch(self, request, *args, **kwargs):
"""Redirect to setup page if setup is not done yet."""
if not matrixsynapse.is_setup():
return redirect('matrixsynapse:setup')
return super().dispatch(request, *args, **kwargs)
def get_context_data(self, *args, **kwargs):
"""Add additional context data for template."""
context = super().get_context_data(**kwargs)
context['domain_name'] = matrixsynapse.get_configured_domain_name()
return context
| agpl-3.0 | 345,140,781,094,044,350 | 34.131579 | 75 | 0.695506 | false | 4.076336 | false | false | false |
ZihengJiang/mxnet | python/mxnet/profiler.py | 31 | 2161 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=fixme, invalid-name, too-many-arguments, too-many-locals, too-many-lines
# pylint: disable=too-many-branches, too-many-statements
"""Profiler setting methods."""
from __future__ import absolute_import
import ctypes
from .base import _LIB, check_call, c_str
def profiler_set_config(mode='symbolic', filename='profile.json'):
"""Set up the configure of profiler.
Parameters
----------
mode : string, optional
Indicates whether to enable the profiler, can
be 'symbolic', or 'all'. Defaults to `symbolic`.
filename : string, optional
The name of output trace file. Defaults to 'profile.json'.
"""
mode2int = {'symbolic': 0, 'all': 1}
check_call(_LIB.MXSetProfilerConfig(
ctypes.c_int(mode2int[mode]),
c_str(filename)))
def profiler_set_state(state='stop'):
"""Set up the profiler state to record operator.
Parameters
----------
state : string, optional
Indicates whether to run the profiler, can
be 'stop' or 'run'. Default is `stop`.
"""
state2int = {'stop': 0, 'run': 1}
check_call(_LIB.MXSetProfilerState(ctypes.c_int(state2int[state])))
def dump_profile():
"""Dump profile and stop profiler. Use this to save profile
in advance in case your program cannot exit normally."""
check_call(_LIB.MXDumpProfile())
| apache-2.0 | 7,532,587,623,368,357,000 | 36.258621 | 90 | 0.698751 | false | 3.972426 | false | false | false |
maciekcc/tensorflow | tensorflow/python/ops/linalg_grad.py | 23 | 9040 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradients for operators defined in linalg_ops.py.
Useful reference for derivative formulas is
An extended collection of matrix derivative results for forward and reverse
mode algorithmic differentiation by Mike Giles:
http://eprints.maths.ox.ac.uk/1079/1/NA-08-01.pdf
A detailed derivation of formulas for backpropagating through spectral layers
(SVD and Eig) by Ionescu, Vantzos & Sminchisescu:
https://arxiv.org/pdf/1509.07838v4.pdf
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
@ops.RegisterGradient("MatrixInverse")
def _MatrixInverseGrad(op, grad):
"""Gradient for MatrixInverse."""
ainv = op.outputs[0]
return -math_ops.matmul(
ainv, math_ops.matmul(
grad, ainv, adjoint_b=True), adjoint_a=True)
@ops.RegisterGradient("MatrixDeterminant")
def _MatrixDeterminantGrad(op, grad):
"""Gradient for MatrixDeterminant."""
a = op.inputs[0]
c = op.outputs[0]
a_adj_inv = linalg_ops.matrix_inverse(a, adjoint=True)
multipliers = array_ops.reshape(
grad * c, array_ops.concat([array_ops.shape(c), [1, 1]], 0))
return multipliers * a_adj_inv
@ops.RegisterGradient("Cholesky")
def _CholeskyGrad(op, grad):
"""Gradient for Cholesky."""
# Gradient is l^{-H} @ ((l^{H} @ grad) * (tril(ones)-1/2*eye)) @ l^{-1}
l = op.outputs[0]
num_rows = array_ops.shape(l)[-1]
batch_shape = array_ops.shape(l)[:-2]
l_inverse = linalg_ops.matrix_triangular_solve(
l, linalg_ops.eye(num_rows, batch_shape=batch_shape, dtype=l.dtype))
middle = math_ops.matmul(l, grad, adjoint_a=True)
middle = array_ops.matrix_set_diag(middle,
0.5 * array_ops.matrix_diag_part(middle))
middle = array_ops.matrix_band_part(middle, -1, 0)
grad_a = math_ops.matmul(
math_ops.matmul(l_inverse, middle, adjoint_a=True), l_inverse)
grad_a += math_ops.conj(array_ops.matrix_transpose(grad_a))
return grad_a * 0.5
@ops.RegisterGradient("MatrixSolve")
def _MatrixSolveGrad(op, grad):
"""Gradient for MatrixSolve."""
a = op.inputs[0]
adjoint_a = op.get_attr("adjoint")
c = op.outputs[0]
grad_b = linalg_ops.matrix_solve(a, grad, adjoint=not adjoint_a)
if adjoint_a:
grad_a = -math_ops.matmul(c, grad_b, adjoint_b=True)
else:
grad_a = -math_ops.matmul(grad_b, c, adjoint_b=True)
return (grad_a, grad_b)
@ops.RegisterGradient("MatrixSolveLs")
def _MatrixSolveLsGrad(op, grad):
"""Gradients for MatrixSolveLs."""
# TODO(rmlarsen): The implementation could be more efficient:
# a) Output the Cholesky factorization from forward op instead of
# recomputing it here.
# b) Implement a symmetric rank-k update op instead of computing
# x*z + transpose(x*z). This pattern occurs other places in TensorFlow.
def _overdetermined(op, grad):
"""Gradients for the overdetermined case of MatrixSolveLs.
This is the backprop for the solution to the normal equations of the first
kind:
X = F(A, B) = (A^T * A + lambda * I)^{-1} * A^T * B
which solve the least squares problem
min ||A * X - B||_F^2 + lambda ||X||_F^2.
"""
a = op.inputs[0]
b = op.inputs[1]
l2_regularizer = math_ops.cast(op.inputs[2], a.dtype.base_dtype)
x = op.outputs[0]
a_shape = array_ops.shape(a)
batch_shape = a_shape[:-2]
n = a_shape[-1]
identity = linalg_ops.eye(n, batch_shape=batch_shape, dtype=a.dtype)
gramian = math_ops.matmul(a, a, adjoint_a=True) + l2_regularizer * identity
chol = linalg_ops.cholesky(gramian)
# Temporary z = (A^T * A + lambda * I)^{-1} * grad.
z = linalg_ops.cholesky_solve(chol, grad)
xzt = math_ops.matmul(x, z, adjoint_b=True)
zx_sym = xzt + array_ops.matrix_transpose(xzt)
grad_a = -math_ops.matmul(a, zx_sym) + math_ops.matmul(b, z, adjoint_b=True)
grad_b = math_ops.matmul(a, z)
return (grad_a, grad_b, None)
def _underdetermined(op, grad):
"""Gradients for the underdetermined case of MatrixSolveLs.
This is the backprop for the solution to the normal equations of the second
kind:
X = F(A, B) = A * (A*A^T + lambda*I)^{-1} * B
that (for lambda=0) solve the least squares problem
min ||X||_F subject to A*X = B.
"""
a = op.inputs[0]
b = op.inputs[1]
l2_regularizer = math_ops.cast(op.inputs[2], a.dtype.base_dtype)
a_shape = array_ops.shape(a)
batch_shape = a_shape[:-2]
m = a_shape[-2]
identity = linalg_ops.eye(m, batch_shape=batch_shape, dtype=a.dtype)
gramian = math_ops.matmul(a, a, adjoint_b=True) + l2_regularizer * identity
chol = linalg_ops.cholesky(gramian)
grad_b = linalg_ops.cholesky_solve(chol, math_ops.matmul(a, grad))
# Temporary tmp = (A * A^T + lambda * I)^{-1} * B.
tmp = linalg_ops.cholesky_solve(chol, b)
a1 = math_ops.matmul(tmp, a, adjoint_a=True)
a1 = -math_ops.matmul(grad_b, a1)
a2 = grad - math_ops.matmul(a, grad_b, adjoint_a=True)
a2 = math_ops.matmul(tmp, a2, adjoint_b=True)
grad_a = a1 + a2
return (grad_a, grad_b, None)
fast = op.get_attr("fast")
if fast is False:
raise ValueError("Gradient not defined for fast=False")
matrix_shape = op.inputs[0].get_shape()[-2:]
if matrix_shape.is_fully_defined():
if matrix_shape[-2] >= matrix_shape[-1]:
return _overdetermined(op, grad)
else:
return _underdetermined(op, grad)
else:
# We have to defer determining the shape to runtime and use
# conditional execution of the appropriate graph.
matrix_shape = array_ops.shape(op.inputs[0])[-2:]
return control_flow_ops.cond(matrix_shape[-2] >= matrix_shape[-1],
lambda: _overdetermined(op, grad),
lambda: _underdetermined(op, grad))
@ops.RegisterGradient("MatrixTriangularSolve")
def _MatrixTriangularSolveGrad(op, grad):
"""Gradient for MatrixTriangularSolve."""
a = op.inputs[0]
adjoint_a = op.get_attr("adjoint")
lower_a = op.get_attr("lower")
c = op.outputs[0]
grad_b = linalg_ops.matrix_triangular_solve(
a, grad, lower=lower_a, adjoint=not adjoint_a)
if adjoint_a:
grad_a = -math_ops.matmul(c, grad_b, adjoint_b=True)
else:
grad_a = -math_ops.matmul(grad_b, c, adjoint_b=True)
if lower_a:
grad_a = array_ops.matrix_band_part(grad_a, -1, 0)
else:
grad_a = array_ops.matrix_band_part(grad_a, 0, -1)
return (grad_a, grad_b)
@ops.RegisterGradient("SelfAdjointEigV2")
def _SelfAdjointEigV2Grad(op, grad_e, grad_v):
"""Gradient for SelfAdjointEigV2."""
e = op.outputs[0]
v = op.outputs[1]
# a = op.inputs[0], which satisfies
# a[...,:,:] * v[...,:,i] = e[...,i] * v[...,i]
with ops.control_dependencies([grad_e.op, grad_v.op]):
if grad_v is not None:
# Construct the matrix f(i,j) = (i != j ? 1 / (e_i - e_j) : 0).
# Notice that because of the term involving f, the gradient becomes
# infinite (or NaN in practice) when eigenvalues are not unique.
# Mathematically this should not be surprising, since for (k-fold)
# degenerate eigenvalues, the corresponding eigenvectors are only defined
# up to arbitrary rotation in a (k-dimensional) subspace.
f = array_ops.matrix_set_diag(
math_ops.reciprocal(
array_ops.expand_dims(e, -2) - array_ops.expand_dims(e, -1)),
array_ops.zeros_like(e))
grad_a = math_ops.matmul(
v,
math_ops.matmul(
array_ops.matrix_diag(grad_e) + f * math_ops.matmul(
v, grad_v, adjoint_a=True),
v,
adjoint_b=True))
else:
grad_a = math_ops.matmul(
v, math_ops.matmul(
array_ops.matrix_diag(grad_e), v, adjoint_b=True))
# The forward op only depends on the lower triangular part of a, so here we
# symmetrize and take the lower triangle
grad_a = array_ops.matrix_band_part(
grad_a + math_ops.conj(array_ops.matrix_transpose(grad_a)), -1, 0)
grad_a = array_ops.matrix_set_diag(grad_a,
0.5 * array_ops.matrix_diag_part(grad_a))
return grad_a
| apache-2.0 | -1,518,084,748,407,574,500 | 37.468085 | 80 | 0.645796 | false | 3.150924 | false | false | false |
Fireblend/chromium-crosswalk | tools/cygprofile/cyglog_to_orderfile.py | 6 | 8358 | #!/usr/bin/python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Symbolizes a log file produced by cyprofile instrumentation.
Given a log file and the binary being profiled, creates an orderfile.
"""
import logging
import multiprocessing
import optparse
import os
import tempfile
import string
import sys
import cygprofile_utils
import symbol_extractor
def _ParseLogLines(log_file_lines):
"""Parses a merged cyglog produced by mergetraces.py.
Args:
log_file_lines: array of lines in log file produced by profiled run
lib_name: library or executable containing symbols
Below is an example of a small log file:
5086e000-52e92000 r-xp 00000000 b3:02 51276 libchromeview.so
secs usecs pid:threadid func
START
1314897086 795828 3587:1074648168 0x509e105c
1314897086 795874 3587:1074648168 0x509e0eb4
1314897086 796326 3587:1074648168 0x509e0e3c
1314897086 796552 3587:1074648168 0x509e07bc
END
Returns:
An ordered list of callee offsets.
"""
call_lines = []
vm_start = 0
line = log_file_lines[0]
assert 'r-xp' in line
end_index = line.find('-')
vm_start = int(line[:end_index], 16)
for line in log_file_lines[3:]:
fields = line.split()
if len(fields) == 4:
call_lines.append(fields)
else:
assert fields[0] == 'END'
# Convert strings to int in fields.
call_info = []
for call_line in call_lines:
addr = int(call_line[3], 16)
if vm_start < addr:
addr -= vm_start
call_info.append(addr)
return call_info
def _GroupLibrarySymbolInfosByOffset(lib_filename):
"""Returns a dict {offset: [SymbolInfo]} from a library."""
symbol_infos = symbol_extractor.SymbolInfosFromBinary(lib_filename)
return symbol_extractor.GroupSymbolInfosByOffset(symbol_infos)
class SymbolNotFoundException(Exception):
def __init__(self, value):
super(SymbolNotFoundException, self).__init__(value)
self.value = value
def __str__(self):
return repr(self.value)
def _FindSymbolInfosAtOffset(offset_to_symbol_infos, offset):
"""Finds all SymbolInfo at a given offset.
Args:
offset_to_symbol_infos: {offset: [SymbolInfo]}
offset: offset to look the symbols at
Returns:
The list of SymbolInfo at the given offset
Raises:
SymbolNotFoundException if the offset doesn't match any symbol.
"""
if offset in offset_to_symbol_infos:
return offset_to_symbol_infos[offset]
elif offset % 2 and (offset - 1) in offset_to_symbol_infos:
# On ARM, odd addresses are used to signal thumb instruction. They are
# generated by setting the LSB to 1 (see
# http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0471e/Babfjhia.html).
# TODO(lizeb): Make sure this hack doesn't propagate to other archs.
return offset_to_symbol_infos[offset - 1]
else:
raise SymbolNotFoundException(offset)
def _GetObjectFileNames(obj_dir):
"""Returns the list of object files in a directory."""
obj_files = []
for (dirpath, _, filenames) in os.walk(obj_dir):
for file_name in filenames:
if file_name.endswith('.o'):
obj_files.append(os.path.join(dirpath, file_name))
return obj_files
def _AllSymbolInfos(object_filenames):
"""Returns a list of SymbolInfo from an iterable of filenames."""
pool = multiprocessing.Pool()
# Hopefully the object files are in the page cache at this step, so IO should
# not be a problem (hence no concurrency limit on the pool).
symbol_infos_nested = pool.map(
symbol_extractor.SymbolInfosFromBinary, object_filenames)
result = []
for symbol_infos in symbol_infos_nested:
result += symbol_infos
return result
def _GetSymbolToSectionMapFromObjectFiles(obj_dir):
""" Creates a mapping from symbol to linker section name by scanning all
the object files.
"""
object_files = _GetObjectFileNames(obj_dir)
symbol_to_section_map = {}
symbol_warnings = cygprofile_utils.WarningCollector(300)
symbol_infos = _AllSymbolInfos(object_files)
for symbol_info in symbol_infos:
symbol = symbol_info.name
if symbol.startswith('.LTHUNK'):
continue
section = symbol_info.section
if ((symbol in symbol_to_section_map) and
(symbol_to_section_map[symbol] != symbol_info.section)):
symbol_warnings.Write('Symbol ' + symbol +
' in conflicting sections ' + section +
' and ' + symbol_to_section_map[symbol])
elif not section.startswith('.text'):
symbol_warnings.Write('Symbol ' + symbol +
' in incorrect section ' + section)
else:
symbol_to_section_map[symbol] = section
symbol_warnings.WriteEnd('bad sections')
return symbol_to_section_map
def _WarnAboutDuplicates(offsets):
"""Warns about duplicate offsets.
Args:
offsets: list of offsets to check for duplicates
Returns:
True if there are no duplicates, False otherwise.
"""
seen_offsets = set()
ok = True
for offset in offsets:
if offset not in seen_offsets:
seen_offsets.add(offset)
else:
ok = False
logging.warning('Duplicate offset: ' + hex(offset))
return ok
def _OutputOrderfile(offsets, offset_to_symbol_infos, symbol_to_section_map,
output_file):
"""Outputs the orderfile to output_file.
Args:
offsets: Iterable of offsets to match to section names
offset_to_symbol_infos: {offset: [SymbolInfo]}
symbol_to_section_map: {name: section}
output_file: file-like object to write the results to
"""
success = True
unknown_symbol_warnings = cygprofile_utils.WarningCollector(300)
symbol_not_found_warnings = cygprofile_utils.WarningCollector(300)
output_sections = set()
for offset in offsets:
try:
symbol_infos = _FindSymbolInfosAtOffset(offset_to_symbol_infos, offset)
for symbol_info in symbol_infos:
if symbol_info.name in symbol_to_section_map:
section = symbol_to_section_map[symbol_info.name]
if not section in output_sections:
output_file.write(section + '\n')
output_sections.add(section)
else:
unknown_symbol_warnings.Write(
'No known section for symbol ' + symbol_info.name)
except SymbolNotFoundException:
symbol_not_found_warnings.Write(
'Did not find function in binary. offset: ' + hex(offset))
success = False
unknown_symbol_warnings.WriteEnd('no known section for symbol.')
symbol_not_found_warnings.WriteEnd('symbol not found in the binary.')
return success
def main():
parser = optparse.OptionParser(usage=
'usage: %prog [options] <merged_cyglog> <library> <output_filename>')
parser.add_option('--target-arch', action='store', dest='arch',
choices=['arm', 'arm64', 'x86', 'x86_64', 'x64', 'mips'],
help='The target architecture for libchrome.so')
options, argv = parser.parse_args(sys.argv)
if not options.arch:
options.arch = cygprofile_utils.DetectArchitecture()
if len(argv) != 4:
parser.print_help()
return 1
(log_filename, lib_filename, output_filename) = argv[1:]
symbol_extractor.SetArchitecture(options.arch)
obj_dir = os.path.abspath(os.path.join(
os.path.dirname(lib_filename), '../obj'))
log_file_lines = map(string.rstrip, open(log_filename).readlines())
offsets = _ParseLogLines(log_file_lines)
_WarnAboutDuplicates(offsets)
offset_to_symbol_infos = _GroupLibrarySymbolInfosByOffset(lib_filename)
symbol_to_section_map = _GetSymbolToSectionMapFromObjectFiles(obj_dir)
success = False
temp_filename = None
output_file = None
try:
(fd, temp_filename) = tempfile.mkstemp(dir=os.path.dirname(output_filename))
output_file = os.fdopen(fd, 'w')
ok = _OutputOrderfile(
offsets, offset_to_symbol_infos, symbol_to_section_map, output_file)
output_file.close()
os.rename(temp_filename, output_filename)
temp_filename = None
success = ok
finally:
if output_file:
output_file.close()
if temp_filename:
os.remove(temp_filename)
return 0 if success else 1
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
sys.exit(main())
| bsd-3-clause | -5,208,371,290,180,285,000 | 31.395349 | 90 | 0.684015 | false | 3.635494 | false | false | false |
igor-rangel7l/novoigorrangel.repository | plugin.video.SportsDevil/service/asn1crypto/keys.py | 13 | 35048 | # coding: utf-8
"""
ASN.1 type classes for public and private keys. Exports the following items:
- DSAPrivateKey()
- ECPrivateKey()
- EncryptedPrivateKeyInfo()
- PrivateKeyInfo()
- PublicKeyInfo()
- RSAPrivateKey()
- RSAPublicKey()
Other type classes are defined that help compose the types listed above.
"""
from __future__ import unicode_literals, division, absolute_import, print_function
import hashlib
import math
from ._elliptic_curve import (
SECP192R1_BASE_POINT,
SECP224R1_BASE_POINT,
SECP256R1_BASE_POINT,
SECP384R1_BASE_POINT,
SECP521R1_BASE_POINT,
PrimeCurve,
PrimePoint,
)
from ._errors import unwrap
from ._types import type_name, str_cls, byte_cls
from .algos import _ForceNullParameters, DigestAlgorithm, EncryptionAlgorithm
from .core import (
Any,
Asn1Value,
BitString,
Choice,
Integer,
IntegerOctetString,
Null,
ObjectIdentifier,
OctetBitString,
OctetString,
ParsableOctetString,
ParsableOctetBitString,
Sequence,
SequenceOf,
SetOf,
)
from .util import int_from_bytes, int_to_bytes
class OtherPrimeInfo(Sequence):
"""
Source: https://tools.ietf.org/html/rfc3447#page-46
"""
_fields = [
('prime', Integer),
('exponent', Integer),
('coefficient', Integer),
]
class OtherPrimeInfos(SequenceOf):
"""
Source: https://tools.ietf.org/html/rfc3447#page-46
"""
_child_spec = OtherPrimeInfo
class RSAPrivateKeyVersion(Integer):
"""
Original Name: Version
Source: https://tools.ietf.org/html/rfc3447#page-45
"""
_map = {
0: 'two-prime',
1: 'multi',
}
class RSAPrivateKey(Sequence):
"""
Source: https://tools.ietf.org/html/rfc3447#page-45
"""
_fields = [
('version', RSAPrivateKeyVersion),
('modulus', Integer),
('public_exponent', Integer),
('private_exponent', Integer),
('prime1', Integer),
('prime2', Integer),
('exponent1', Integer),
('exponent2', Integer),
('coefficient', Integer),
('other_prime_infos', OtherPrimeInfos, {'optional': True})
]
class RSAPublicKey(Sequence):
"""
Source: https://tools.ietf.org/html/rfc3447#page-44
"""
_fields = [
('modulus', Integer),
('public_exponent', Integer)
]
class DSAPrivateKey(Sequence):
"""
The ASN.1 structure that OpenSSL uses to store a DSA private key that is
not part of a PKCS#8 structure. Reversed engineered from english-language
description on linked OpenSSL documentation page.
Original Name: None
Source: https://www.openssl.org/docs/apps/dsa.html
"""
_fields = [
('version', Integer),
('p', Integer),
('q', Integer),
('g', Integer),
('public_key', Integer),
('private_key', Integer),
]
class _ECPoint():
"""
In both PublicKeyInfo and PrivateKeyInfo, the EC public key is a byte
string that is encoded as a bit string. This class adds convenience
methods for converting to and from the byte string to a pair of integers
that are the X and Y coordinates.
"""
@classmethod
def from_coords(cls, x, y):
"""
Creates an ECPoint object from the X and Y integer coordinates of the
point
:param x:
The X coordinate, as an integer
:param y:
The Y coordinate, as an integer
:return:
An ECPoint object
"""
x_bytes = int(math.ceil(math.log(x, 2) / 8.0))
y_bytes = int(math.ceil(math.log(y, 2) / 8.0))
num_bytes = max(x_bytes, y_bytes)
byte_string = b'\x04'
byte_string += int_to_bytes(x, width=num_bytes)
byte_string += int_to_bytes(y, width=num_bytes)
return cls(byte_string)
def to_coords(self):
"""
Returns the X and Y coordinates for this EC point, as native Python
integers
:return:
A 2-element tuple containing integers (X, Y)
"""
data = self.native
first_byte = data[0:1]
# Uncompressed
if first_byte == b'\x04':
remaining = data[1:]
field_len = len(remaining) // 2
x = int_from_bytes(remaining[0:field_len])
y = int_from_bytes(remaining[field_len:])
return (x, y)
if first_byte not in set([b'\x02', b'\x03']):
raise ValueError(unwrap(
'''
Invalid EC public key - first byte is incorrect
'''
))
raise ValueError(unwrap(
'''
Compressed representations of EC public keys are not supported due
to patent US6252960
'''
))
class ECPoint(OctetString, _ECPoint):
pass
class ECPointBitString(OctetBitString, _ECPoint):
pass
class SpecifiedECDomainVersion(Integer):
"""
Source: http://www.secg.org/sec1-v2.pdf page 104
"""
_map = {
1: 'ecdpVer1',
2: 'ecdpVer2',
3: 'ecdpVer3',
}
class FieldType(ObjectIdentifier):
"""
Original Name: None
Source: http://www.secg.org/sec1-v2.pdf page 101
"""
_map = {
'1.2.840.10045.1.1': 'prime_field',
'1.2.840.10045.1.2': 'characteristic_two_field',
}
class CharacteristicTwoBasis(ObjectIdentifier):
"""
Original Name: None
Source: http://www.secg.org/sec1-v2.pdf page 102
"""
_map = {
'1.2.840.10045.1.2.1.1': 'gn_basis',
'1.2.840.10045.1.2.1.2': 'tp_basis',
'1.2.840.10045.1.2.1.3': 'pp_basis',
}
class Pentanomial(Sequence):
"""
Source: http://www.secg.org/sec1-v2.pdf page 102
"""
_fields = [
('k1', Integer),
('k2', Integer),
('k3', Integer),
]
class CharacteristicTwo(Sequence):
"""
Original Name: Characteristic-two
Source: http://www.secg.org/sec1-v2.pdf page 101
"""
_fields = [
('m', Integer),
('basis', CharacteristicTwoBasis),
('parameters', Any),
]
_oid_pair = ('basis', 'parameters')
_oid_specs = {
'gn_basis': Null,
'tp_basis': Integer,
'pp_basis': Pentanomial,
}
class FieldID(Sequence):
"""
Source: http://www.secg.org/sec1-v2.pdf page 100
"""
_fields = [
('field_type', FieldType),
('parameters', Any),
]
_oid_pair = ('field_type', 'parameters')
_oid_specs = {
'prime_field': Integer,
'characteristic_two_field': CharacteristicTwo,
}
class Curve(Sequence):
"""
Source: http://www.secg.org/sec1-v2.pdf page 104
"""
_fields = [
('a', OctetString),
('b', OctetString),
('seed', OctetBitString, {'optional': True}),
]
class SpecifiedECDomain(Sequence):
"""
Source: http://www.secg.org/sec1-v2.pdf page 103
"""
_fields = [
('version', SpecifiedECDomainVersion),
('field_id', FieldID),
('curve', Curve),
('base', ECPoint),
('order', Integer),
('cofactor', Integer, {'optional': True}),
('hash', DigestAlgorithm, {'optional': True}),
]
class NamedCurve(ObjectIdentifier):
"""
Various named curves
Original Name: None
Source: https://tools.ietf.org/html/rfc3279#page-23,
https://tools.ietf.org/html/rfc5480#page-5
"""
_map = {
# https://tools.ietf.org/html/rfc3279#page-23
'1.2.840.10045.3.0.1': 'c2pnb163v1',
'1.2.840.10045.3.0.2': 'c2pnb163v2',
'1.2.840.10045.3.0.3': 'c2pnb163v3',
'1.2.840.10045.3.0.4': 'c2pnb176w1',
'1.2.840.10045.3.0.5': 'c2tnb191v1',
'1.2.840.10045.3.0.6': 'c2tnb191v2',
'1.2.840.10045.3.0.7': 'c2tnb191v3',
'1.2.840.10045.3.0.8': 'c2onb191v4',
'1.2.840.10045.3.0.9': 'c2onb191v5',
'1.2.840.10045.3.0.10': 'c2pnb208w1',
'1.2.840.10045.3.0.11': 'c2tnb239v1',
'1.2.840.10045.3.0.12': 'c2tnb239v2',
'1.2.840.10045.3.0.13': 'c2tnb239v3',
'1.2.840.10045.3.0.14': 'c2onb239v4',
'1.2.840.10045.3.0.15': 'c2onb239v5',
'1.2.840.10045.3.0.16': 'c2pnb272w1',
'1.2.840.10045.3.0.17': 'c2pnb304w1',
'1.2.840.10045.3.0.18': 'c2tnb359v1',
'1.2.840.10045.3.0.19': 'c2pnb368w1',
'1.2.840.10045.3.0.20': 'c2tnb431r1',
'1.2.840.10045.3.1.2': 'prime192v2',
'1.2.840.10045.3.1.3': 'prime192v3',
'1.2.840.10045.3.1.4': 'prime239v1',
'1.2.840.10045.3.1.5': 'prime239v2',
'1.2.840.10045.3.1.6': 'prime239v3',
# https://tools.ietf.org/html/rfc5480#page-5
'1.3.132.0.1': 'sect163k1',
'1.3.132.0.15': 'sect163r2',
'1.2.840.10045.3.1.1': 'secp192r1',
'1.3.132.0.33': 'secp224r1',
'1.3.132.0.26': 'sect233k1',
'1.2.840.10045.3.1.7': 'secp256r1',
'1.3.132.0.27': 'sect233r1',
'1.3.132.0.16': 'sect283k1',
'1.3.132.0.17': 'sect283r1',
'1.3.132.0.34': 'secp384r1',
'1.3.132.0.36': 'sect409k1',
'1.3.132.0.37': 'sect409r1',
'1.3.132.0.35': 'secp521r1',
'1.3.132.0.38': 'sect571k1',
'1.3.132.0.39': 'sect571r1',
}
class ECDomainParameters(Choice):
"""
Source: http://www.secg.org/sec1-v2.pdf page 102
"""
_alternatives = [
('specified', SpecifiedECDomain),
('named', NamedCurve),
('implicit_ca', Null),
]
class ECPrivateKeyVersion(Integer):
"""
Original Name: None
Source: http://www.secg.org/sec1-v2.pdf page 108
"""
_map = {
1: 'ecPrivkeyVer1',
}
class ECPrivateKey(Sequence):
"""
Source: http://www.secg.org/sec1-v2.pdf page 108
"""
_fields = [
('version', ECPrivateKeyVersion),
('private_key', IntegerOctetString),
('parameters', ECDomainParameters, {'tag_type': 'explicit', 'tag': 0, 'optional': True}),
('public_key', ECPointBitString, {'tag_type': 'explicit', 'tag': 1, 'optional': True}),
]
class DSAParams(Sequence):
"""
Parameters for a DSA public or private key
Original Name: Dss-Parms
Source: https://tools.ietf.org/html/rfc3279#page-9
"""
_fields = [
('p', Integer),
('q', Integer),
('g', Integer),
]
class Attribute(Sequence):
"""
Source: https://www.itu.int/rec/dologin_pub.asp?lang=e&id=T-REC-X.501-198811-S!!PDF-E&type=items page 8
"""
_fields = [
('type', ObjectIdentifier),
('values', SetOf, {'spec': Any}),
]
class Attributes(SetOf):
"""
Source: https://tools.ietf.org/html/rfc5208#page-3
"""
_child_spec = Attribute
class PrivateKeyAlgorithmId(ObjectIdentifier):
"""
These OIDs for various public keys are reused when storing private keys
inside of a PKCS#8 structure
Original Name: None
Source: https://tools.ietf.org/html/rfc3279
"""
_map = {
# https://tools.ietf.org/html/rfc3279#page-19
'1.2.840.113549.1.1.1': 'rsa',
# https://tools.ietf.org/html/rfc3279#page-18
'1.2.840.10040.4.1': 'dsa',
# https://tools.ietf.org/html/rfc3279#page-13
'1.2.840.10045.2.1': 'ec',
}
class PrivateKeyAlgorithm(_ForceNullParameters, Sequence):
"""
Original Name: PrivateKeyAlgorithmIdentifier
Source: https://tools.ietf.org/html/rfc5208#page-3
"""
_fields = [
('algorithm', PrivateKeyAlgorithmId),
('parameters', Any, {'optional': True}),
]
_oid_pair = ('algorithm', 'parameters')
_oid_specs = {
'dsa': DSAParams,
'ec': ECDomainParameters,
}
class PrivateKeyInfo(Sequence):
"""
Source: https://tools.ietf.org/html/rfc5208#page-3
"""
_fields = [
('version', Integer),
('private_key_algorithm', PrivateKeyAlgorithm),
('private_key', ParsableOctetString),
('attributes', Attributes, {'tag_type': 'implicit', 'tag': 0, 'optional': True}),
]
def _private_key_spec(self):
algorithm = self['private_key_algorithm']['algorithm'].native
return {
'rsa': RSAPrivateKey,
'dsa': Integer,
'ec': ECPrivateKey,
}[algorithm]
_spec_callbacks = {
'private_key': _private_key_spec
}
_algorithm = None
_bit_size = None
_public_key = None
_fingerprint = None
@classmethod
def wrap(cls, private_key, algorithm):
"""
Wraps a private key in a PrivateKeyInfo structure
:param private_key:
A byte string or Asn1Value object of the private key
:param algorithm:
A unicode string of "rsa", "dsa" or "ec"
:return:
A PrivateKeyInfo object
"""
if not isinstance(private_key, byte_cls) and not isinstance(private_key, Asn1Value):
raise TypeError(unwrap(
'''
private_key must be a byte string or Asn1Value, not %s
''',
type_name(private_key)
))
if algorithm == 'rsa':
if not isinstance(private_key, RSAPrivateKey):
private_key = RSAPrivateKey.load(private_key)
params = Null()
elif algorithm == 'dsa':
if not isinstance(private_key, DSAPrivateKey):
private_key = DSAPrivateKey.load(private_key)
params = DSAParams()
params['p'] = private_key['p']
params['q'] = private_key['q']
params['g'] = private_key['g']
public_key = private_key['public_key']
private_key = private_key['private_key']
elif algorithm == 'ec':
if not isinstance(private_key, ECPrivateKey):
private_key = ECPrivateKey.load(private_key)
else:
private_key = private_key.copy()
params = private_key['parameters']
del private_key['parameters']
else:
raise ValueError(unwrap(
'''
algorithm must be one of "rsa", "dsa", "ec", not %s
''',
repr(algorithm)
))
private_key_algo = PrivateKeyAlgorithm()
private_key_algo['algorithm'] = PrivateKeyAlgorithmId(algorithm)
private_key_algo['parameters'] = params
container = cls()
container._algorithm = algorithm
container['version'] = Integer(0)
container['private_key_algorithm'] = private_key_algo
container['private_key'] = private_key
# Here we save the DSA public key if possible since it is not contained
# within the PKCS#8 structure for a DSA key
if algorithm == 'dsa':
container._public_key = public_key
return container
def _compute_public_key(self):
"""
Computes the public key corresponding to the current private key.
:return:
For RSA keys, an RSAPublicKey object. For DSA keys, an Integer
object. For EC keys, an ECPointBitString.
"""
if self.algorithm == 'dsa':
params = self['private_key_algorithm']['parameters']
return Integer(pow(
params['g'].native,
self['private_key'].parsed.native,
params['p'].native
))
if self.algorithm == 'rsa':
key = self['private_key'].parsed
return RSAPublicKey({
'modulus': key['modulus'],
'public_exponent': key['public_exponent'],
})
if self.algorithm == 'ec':
curve_type, details = self.curve
if curve_type == 'implicit_ca':
raise ValueError(unwrap(
'''
Unable to compute public key for EC key using Implicit CA
parameters
'''
))
if curve_type == 'specified':
if details['field_id']['field_type'] == 'characteristic_two_field':
raise ValueError(unwrap(
'''
Unable to compute public key for EC key over a
characteristic two field
'''
))
curve = PrimeCurve(
details['field_id']['parameters'],
int_from_bytes(details['curve']['a']),
int_from_bytes(details['curve']['b'])
)
base_x, base_y = self['private_key_algorithm']['parameters'].chosen['base'].to_coords()
base_point = PrimePoint(curve, base_x, base_y)
elif curve_type == 'named':
if details not in ('secp192r1', 'secp224r1', 'secp256r1', 'secp384r1', 'secp521r1'):
raise ValueError(unwrap(
'''
Unable to compute public key for EC named curve %s,
parameters not currently included
''',
details
))
base_point = {
'secp192r1': SECP192R1_BASE_POINT,
'secp224r1': SECP224R1_BASE_POINT,
'secp256r1': SECP256R1_BASE_POINT,
'secp384r1': SECP384R1_BASE_POINT,
'secp521r1': SECP521R1_BASE_POINT,
}[details]
public_point = base_point * self['private_key'].parsed['private_key'].native
return ECPointBitString.from_coords(public_point.x, public_point.y)
def unwrap(self):
"""
Unwraps the private key into an RSAPrivateKey, DSAPrivateKey or
ECPrivateKey object
:return:
An RSAPrivateKey, DSAPrivateKey or ECPrivateKey object
"""
if self.algorithm == 'rsa':
return self['private_key'].parsed
if self.algorithm == 'dsa':
params = self['private_key_algorithm']['parameters']
return DSAPrivateKey({
'version': 0,
'p': params['p'],
'q': params['q'],
'g': params['g'],
'public_key': self.public_key,
'private_key': self['private_key'].parsed,
})
if self.algorithm == 'ec':
output = self['private_key'].parsed
output['parameters'] = self['private_key_algorithm']['parameters']
output['public_key'] = self.public_key
return output
@property
def curve(self):
"""
Returns information about the curve used for an EC key
:raises:
ValueError - when the key is not an EC key
:return:
A two-element tuple, with the first element being a unicode string
of "implicit_ca", "specified" or "named". If the first element is
"implicit_ca", the second is None. If "specified", the second is
an OrderedDict that is the native version of SpecifiedECDomain. If
"named", the second is a unicode string of the curve name.
"""
if self.algorithm != 'ec':
raise ValueError(unwrap(
'''
Only EC keys have a curve, this key is %s
''',
self.algorithm.upper()
))
params = self['private_key_algorithm']['parameters']
chosen = params.chosen
if params.name == 'implicit_ca':
value = None
else:
value = chosen.native
return (params.name, value)
@property
def hash_algo(self):
"""
Returns the name of the family of hash algorithms used to generate a
DSA key
:raises:
ValueError - when the key is not a DSA key
:return:
A unicode string of "sha1" or "sha2"
"""
if self.algorithm != 'dsa':
raise ValueError(unwrap(
'''
Only DSA keys are generated using a hash algorithm, this key is
%s
''',
self.algorithm.upper()
))
byte_len = math.log(self['private_key_algorithm']['parameters']['q'].native, 2) / 8
return 'sha1' if byte_len <= 20 else 'sha2'
@property
def algorithm(self):
"""
:return:
A unicode string of "rsa", "dsa" or "ec"
"""
if self._algorithm is None:
self._algorithm = self['private_key_algorithm']['algorithm'].native
return self._algorithm
@property
def bit_size(self):
"""
:return:
The bit size of the private key, as an integer
"""
if self._bit_size is None:
if self.algorithm == 'rsa':
prime = self['private_key'].parsed['modulus'].native
elif self.algorithm == 'dsa':
prime = self['private_key_algorithm']['parameters']['p'].native
elif self.algorithm == 'ec':
prime = self['private_key'].parsed['private_key'].native
self._bit_size = int(math.ceil(math.log(prime, 2)))
modulus = self._bit_size % 8
if modulus != 0:
self._bit_size += 8 - modulus
return self._bit_size
@property
def byte_size(self):
"""
:return:
The byte size of the private key, as an integer
"""
return int(math.ceil(self.bit_size / 8))
@property
def public_key(self):
"""
:return:
If an RSA key, an RSAPublicKey object. If a DSA key, an Integer
object. If an EC key, an ECPointBitString object.
"""
if self._public_key is None:
if self.algorithm == 'ec':
key = self['private_key'].parsed
if key['public_key']:
self._public_key = key['public_key'].untag()
else:
self._public_key = self._compute_public_key()
else:
self._public_key = self._compute_public_key()
return self._public_key
@property
def public_key_info(self):
"""
:return:
A PublicKeyInfo object derived from this private key.
"""
return PublicKeyInfo({
'algorithm': {
'algorithm': self.algorithm,
'parameters': self['private_key_algorithm']['parameters']
},
'public_key': self.public_key
})
@property
def fingerprint(self):
"""
Creates a fingerprint that can be compared with a public key to see if
the two form a pair.
This fingerprint is not compatiable with fingerprints generated by any
other software.
:return:
A byte string that is a sha256 hash of selected components (based
on the key type)
"""
if self._fingerprint is None:
params = self['private_key_algorithm']['parameters']
key = self['private_key'].parsed
if self.algorithm == 'rsa':
to_hash = '%d:%d' % (
key['modulus'].native,
key['public_exponent'].native,
)
elif self.algorithm == 'dsa':
public_key = self.public_key
to_hash = '%d:%d:%d:%d' % (
params['p'].native,
params['q'].native,
params['g'].native,
public_key.native,
)
elif self.algorithm == 'ec':
public_key = key['public_key'].native
if public_key is None:
public_key = self.public_key.native
if params.name == 'named':
to_hash = '%s:' % params.chosen.native
to_hash = to_hash.encode('utf-8')
to_hash += public_key
elif params.name == 'implicit_ca':
to_hash = public_key
elif params.name == 'specified':
to_hash = '%s:' % params.chosen['field_id']['parameters'].native
to_hash = to_hash.encode('utf-8')
to_hash += b':' + params.chosen['curve']['a'].native
to_hash += b':' + params.chosen['curve']['b'].native
to_hash += public_key
if isinstance(to_hash, str_cls):
to_hash = to_hash.encode('utf-8')
self._fingerprint = hashlib.sha256(to_hash).digest()
return self._fingerprint
class EncryptedPrivateKeyInfo(Sequence):
"""
Source: https://tools.ietf.org/html/rfc5208#page-4
"""
_fields = [
('encryption_algorithm', EncryptionAlgorithm),
('encrypted_data', OctetString),
]
# These structures are from https://tools.ietf.org/html/rfc3279
class ValidationParms(Sequence):
"""
Source: https://tools.ietf.org/html/rfc3279#page-10
"""
_fields = [
('seed', BitString),
('pgen_counter', Integer),
]
class DomainParameters(Sequence):
"""
Source: https://tools.ietf.org/html/rfc3279#page-10
"""
_fields = [
('p', Integer),
('g', Integer),
('q', Integer),
('j', Integer, {'optional': True}),
('validation_params', ValidationParms, {'optional': True}),
]
class PublicKeyAlgorithmId(ObjectIdentifier):
"""
Original Name: None
Source: https://tools.ietf.org/html/rfc3279
"""
_map = {
# https://tools.ietf.org/html/rfc3279#page-19
'1.2.840.113549.1.1.1': 'rsa',
# https://tools.ietf.org/html/rfc3279#page-18
'1.2.840.10040.4.1': 'dsa',
# https://tools.ietf.org/html/rfc3279#page-13
'1.2.840.10045.2.1': 'ec',
# https://tools.ietf.org/html/rfc3279#page-10
'1.2.840.10046.2.1': 'dh',
}
class PublicKeyAlgorithm(_ForceNullParameters, Sequence):
"""
Original Name: AlgorithmIdentifier
Source: https://tools.ietf.org/html/rfc5280#page-18
"""
_fields = [
('algorithm', PublicKeyAlgorithmId),
('parameters', Any, {'optional': True}),
]
_oid_pair = ('algorithm', 'parameters')
_oid_specs = {
'dsa': DSAParams,
'ec': ECDomainParameters,
'dh': DomainParameters,
}
class PublicKeyInfo(Sequence):
"""
Original Name: SubjectPublicKeyInfo
Source: https://tools.ietf.org/html/rfc5280#page-17
"""
_fields = [
('algorithm', PublicKeyAlgorithm),
('public_key', ParsableOctetBitString),
]
def _public_key_spec(self):
algorithm = self['algorithm']['algorithm'].native
return {
'rsa': RSAPublicKey,
'dsa': Integer,
# We override the field spec with ECPoint so that users can easily
# decompose the byte string into the constituent X and Y coords
'ec': (ECPointBitString, None),
'dh': Integer,
}[algorithm]
_spec_callbacks = {
'public_key': _public_key_spec
}
_algorithm = None
_bit_size = None
_fingerprint = None
_sha1 = None
_sha256 = None
@classmethod
def wrap(cls, public_key, algorithm):
"""
Wraps a public key in a PublicKeyInfo structure
:param public_key:
A byte string or Asn1Value object of the public key
:param algorithm:
A unicode string of "rsa"
:return:
A PublicKeyInfo object
"""
if not isinstance(public_key, byte_cls) and not isinstance(public_key, Asn1Value):
raise TypeError(unwrap(
'''
public_key must be a byte string or Asn1Value, not %s
''',
type_name(public_key)
))
if algorithm != 'rsa':
raise ValueError(unwrap(
'''
algorithm must "rsa", not %s
''',
repr(algorithm)
))
algo = PublicKeyAlgorithm()
algo['algorithm'] = PublicKeyAlgorithmId(algorithm)
algo['parameters'] = Null()
container = cls()
container['algorithm'] = algo
if isinstance(public_key, Asn1Value):
public_key = public_key.untag().dump()
container['public_key'] = ParsableOctetBitString(public_key)
return container
def unwrap(self):
"""
Unwraps an RSA public key into an RSAPublicKey object. Does not support
DSA or EC public keys since they do not have an unwrapped form.
:return:
An RSAPublicKey object
"""
if self.algorithm == 'rsa':
return self['public_key'].parsed
key_type = self.algorithm.upper()
a_an = 'an' if key_type == 'EC' else 'a'
raise ValueError(unwrap(
'''
Only RSA public keys may be unwrapped - this key is %s %s public
key
''',
a_an,
key_type
))
@property
def curve(self):
"""
Returns information about the curve used for an EC key
:raises:
ValueError - when the key is not an EC key
:return:
A two-element tuple, with the first element being a unicode string
of "implicit_ca", "specified" or "named". If the first element is
"implicit_ca", the second is None. If "specified", the second is
an OrderedDict that is the native version of SpecifiedECDomain. If
"named", the second is a unicode string of the curve name.
"""
if self.algorithm != 'ec':
raise ValueError(unwrap(
'''
Only EC keys have a curve, this key is %s
''',
self.algorithm.upper()
))
params = self['algorithm']['parameters']
chosen = params.chosen
if params.name == 'implicit_ca':
value = None
else:
value = chosen.native
return (params.name, value)
@property
def hash_algo(self):
"""
Returns the name of the family of hash algorithms used to generate a
DSA key
:raises:
ValueError - when the key is not a DSA key
:return:
A unicode string of "sha1" or "sha2" or None if no parameters are
present
"""
if self.algorithm != 'dsa':
raise ValueError(unwrap(
'''
Only DSA keys are generated using a hash algorithm, this key is
%s
''',
self.algorithm.upper()
))
parameters = self['algorithm']['parameters']
if parameters.native is None:
return None
byte_len = math.log(parameters['q'].native, 2) / 8
return 'sha1' if byte_len <= 20 else 'sha2'
@property
def algorithm(self):
"""
:return:
A unicode string of "rsa", "dsa" or "ec"
"""
if self._algorithm is None:
self._algorithm = self['algorithm']['algorithm'].native
return self._algorithm
@property
def bit_size(self):
"""
:return:
The bit size of the public key, as an integer
"""
if self._bit_size is None:
if self.algorithm == 'ec':
self._bit_size = ((len(self['public_key'].native) - 1) / 2) * 8
else:
if self.algorithm == 'rsa':
prime = self['public_key'].parsed['modulus'].native
elif self.algorithm == 'dsa':
prime = self['algorithm']['parameters']['p'].native
self._bit_size = int(math.ceil(math.log(prime, 2)))
modulus = self._bit_size % 8
if modulus != 0:
self._bit_size += 8 - modulus
return self._bit_size
@property
def byte_size(self):
"""
:return:
The byte size of the public key, as an integer
"""
return int(math.ceil(self.bit_size / 8))
@property
def sha1(self):
"""
:return:
The SHA1 hash of the DER-encoded bytes of this public key info
"""
if self._sha1 is None:
self._sha1 = hashlib.sha1(byte_cls(self['public_key'])).digest()
return self._sha1
@property
def sha256(self):
"""
:return:
The SHA-256 hash of the DER-encoded bytes of this public key info
"""
if self._sha256 is None:
self._sha256 = hashlib.sha256(byte_cls(self['public_key'])).digest()
return self._sha256
@property
def fingerprint(self):
"""
Creates a fingerprint that can be compared with a private key to see if
the two form a pair.
This fingerprint is not compatiable with fingerprints generated by any
other software.
:return:
A byte string that is a sha256 hash of selected components (based
on the key type)
"""
if self._fingerprint is None:
key_type = self['algorithm']['algorithm'].native
params = self['algorithm']['parameters']
if key_type == 'rsa':
key = self['public_key'].parsed
to_hash = '%d:%d' % (
key['modulus'].native,
key['public_exponent'].native,
)
elif key_type == 'dsa':
key = self['public_key'].parsed
to_hash = '%d:%d:%d:%d' % (
params['p'].native,
params['q'].native,
params['g'].native,
key.native,
)
elif key_type == 'ec':
key = self['public_key']
if params.name == 'named':
to_hash = '%s:' % params.chosen.native
to_hash = to_hash.encode('utf-8')
to_hash += key.native
elif params.name == 'implicit_ca':
to_hash = key.native
elif params.name == 'specified':
to_hash = '%s:' % params.chosen['field_id']['parameters'].native
to_hash = to_hash.encode('utf-8')
to_hash += b':' + params.chosen['curve']['a'].native
to_hash += b':' + params.chosen['curve']['b'].native
to_hash += key.native
if isinstance(to_hash, str_cls):
to_hash = to_hash.encode('utf-8')
self._fingerprint = hashlib.sha256(to_hash).digest()
return self._fingerprint
| gpl-2.0 | -8,525,391,529,607,395,000 | 27.151004 | 107 | 0.524738 | false | 3.824531 | false | false | false |
bhansa/fireball | pyvenv/Lib/site-packages/pip/vcs/bazaar.py | 514 | 3803 | from __future__ import absolute_import
import logging
import os
import tempfile
# TODO: Get this into six.moves.urllib.parse
try:
from urllib import parse as urllib_parse
except ImportError:
import urlparse as urllib_parse
from pip.utils import rmtree, display_path
from pip.vcs import vcs, VersionControl
from pip.download import path_to_url
logger = logging.getLogger(__name__)
class Bazaar(VersionControl):
name = 'bzr'
dirname = '.bzr'
repo_name = 'branch'
schemes = (
'bzr', 'bzr+http', 'bzr+https', 'bzr+ssh', 'bzr+sftp', 'bzr+ftp',
'bzr+lp',
)
def __init__(self, url=None, *args, **kwargs):
super(Bazaar, self).__init__(url, *args, **kwargs)
# Python >= 2.7.4, 3.3 doesn't have uses_fragment or non_hierarchical
# Register lp but do not expose as a scheme to support bzr+lp.
if getattr(urllib_parse, 'uses_fragment', None):
urllib_parse.uses_fragment.extend(['lp'])
urllib_parse.non_hierarchical.extend(['lp'])
def export(self, location):
"""
Export the Bazaar repository at the url to the destination location
"""
temp_dir = tempfile.mkdtemp('-export', 'pip-')
self.unpack(temp_dir)
if os.path.exists(location):
# Remove the location to make sure Bazaar can export it correctly
rmtree(location)
try:
self.run_command(['export', location], cwd=temp_dir,
show_stdout=False)
finally:
rmtree(temp_dir)
def switch(self, dest, url, rev_options):
self.run_command(['switch', url], cwd=dest)
def update(self, dest, rev_options):
self.run_command(['pull', '-q'] + rev_options, cwd=dest)
def obtain(self, dest):
url, rev = self.get_url_rev()
if rev:
rev_options = ['-r', rev]
rev_display = ' (to revision %s)' % rev
else:
rev_options = []
rev_display = ''
if self.check_destination(dest, url, rev_options, rev_display):
logger.info(
'Checking out %s%s to %s',
url,
rev_display,
display_path(dest),
)
self.run_command(['branch', '-q'] + rev_options + [url, dest])
def get_url_rev(self):
# hotfix the URL scheme after removing bzr+ from bzr+ssh:// readd it
url, rev = super(Bazaar, self).get_url_rev()
if url.startswith('ssh://'):
url = 'bzr+' + url
return url, rev
def get_url(self, location):
urls = self.run_command(['info'], show_stdout=False, cwd=location)
for line in urls.splitlines():
line = line.strip()
for x in ('checkout of branch: ',
'parent branch: '):
if line.startswith(x):
repo = line.split(x)[1]
if self._is_local_repository(repo):
return path_to_url(repo)
return repo
return None
def get_revision(self, location):
revision = self.run_command(
['revno'], show_stdout=False, cwd=location)
return revision.splitlines()[-1]
def get_src_requirement(self, dist, location):
repo = self.get_url(location)
if not repo:
return None
if not repo.lower().startswith('bzr:'):
repo = 'bzr+' + repo
egg_project_name = dist.egg_name().split('-', 1)[0]
current_rev = self.get_revision(location)
return '%s@%s#egg=%s' % (repo, current_rev, egg_project_name)
def check_version(self, dest, rev_options):
"""Always assume the versions don't match"""
return False
vcs.register(Bazaar)
| gpl-3.0 | 5,999,897,277,662,634,000 | 31.784483 | 77 | 0.55614 | false | 3.825956 | false | false | false |
noroutine/ansible | lib/ansible/playbook/role_include.py | 23 | 5507 |
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from os.path import basename
from ansible.errors import AnsibleParserError
from ansible.playbook.attribute import FieldAttribute
from ansible.playbook.task_include import TaskInclude
from ansible.playbook.role import Role
from ansible.playbook.role.include import RoleInclude
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
__all__ = ['IncludeRole']
class IncludeRole(TaskInclude):
"""
A Role include is derived from a regular role to handle the special
circumstances related to the `- include_role: ...`
"""
BASE = ('name', 'role') # directly assigned
FROM_ARGS = ('tasks_from', 'vars_from', 'defaults_from') # used to populate from dict in role
OTHER_ARGS = ('private', 'allow_duplicates') # assigned to matching property
VALID_ARGS = tuple(frozenset(BASE + FROM_ARGS + OTHER_ARGS)) # all valid args
_inheritable = False
# =================================================================================
# ATTRIBUTES
# private as this is a 'module options' vs a task property
_allow_duplicates = FieldAttribute(isa='bool', default=True, private=True)
_private = FieldAttribute(isa='bool', default=None, private=True)
def __init__(self, block=None, role=None, task_include=None):
super(IncludeRole, self).__init__(block=block, role=role, task_include=task_include)
self._from_files = {}
self._parent_role = role
self._role_name = None
self._role_path = None
def get_block_list(self, play=None, variable_manager=None, loader=None):
# only need play passed in when dynamic
if play is None:
myplay = self._parent._play
else:
myplay = play
ri = RoleInclude.load(self._role_name, play=myplay, variable_manager=variable_manager, loader=loader)
ri.vars.update(self.vars)
# build role
actual_role = Role.load(ri, myplay, parent_role=self._parent_role, from_files=self._from_files)
actual_role._metadata.allow_duplicates = self.allow_duplicates
# save this for later use
self._role_path = actual_role._role_path
# compile role with parent roles as dependencies to ensure they inherit
# variables
if not self._parent_role:
dep_chain = []
else:
dep_chain = list(self._parent_role._parents)
dep_chain.append(self._parent_role)
blocks = actual_role.compile(play=myplay, dep_chain=dep_chain)
for b in blocks:
b._parent = self
# updated available handlers in play
handlers = actual_role.get_handler_blocks(play=myplay)
myplay.handlers = myplay.handlers + handlers
return blocks, handlers
@staticmethod
def load(data, block=None, role=None, task_include=None, variable_manager=None, loader=None):
ir = IncludeRole(block, role, task_include=task_include).load_data(data, variable_manager=variable_manager, loader=loader)
# Validate options
my_arg_names = frozenset(ir.args.keys())
# name is needed, or use role as alias
ir._role_name = ir.args.get('name', ir.args.get('role'))
if ir._role_name is None:
raise AnsibleParserError("'name' is a required field for include_role.")
# validate bad args, otherwise we silently ignore
bad_opts = my_arg_names.difference(IncludeRole.VALID_ARGS)
if bad_opts:
raise AnsibleParserError('Invalid options for include_role: %s' % ','.join(list(bad_opts)))
# build options for role includes
for key in my_arg_names.intersection(IncludeRole.FROM_ARGS):
from_key = key.replace('_from', '')
ir._from_files[from_key] = basename(ir.args.get(key))
# manual list as otherwise the options would set other task parameters we don't want.
for option in my_arg_names.intersection(IncludeRole.OTHER_ARGS):
setattr(ir, option, ir.args.get(option))
return ir
def copy(self, exclude_parent=False, exclude_tasks=False):
new_me = super(IncludeRole, self).copy(exclude_parent=exclude_parent, exclude_tasks=exclude_tasks)
new_me.statically_loaded = self.statically_loaded
new_me._from_files = self._from_files.copy()
new_me._parent_role = self._parent_role
new_me._role_name = self._role_name
new_me._role_path = self._role_path
return new_me
def get_include_params(self):
v = super(IncludeRole, self).get_include_params()
if self._parent_role:
v.update(self._parent_role.get_role_params())
return v
| gpl-3.0 | -1,847,802,811,969,058,000 | 36.209459 | 130 | 0.654803 | false | 3.976173 | false | false | false |
hnousiainen/pghoard | pghoard/pgutil.py | 2 | 3543 | # Copied from https://github.com/ohmu/ohmu_common_py ohmu_common_py/pgutil.py version 0.0.1-0-unknown-fa54b44
"""
pghoard - postgresql utility functions
Copyright (c) 2015 Ohmu Ltd
See LICENSE for details
"""
try:
from urllib.parse import urlparse, parse_qs # pylint: disable=no-name-in-module, import-error
except ImportError:
from urlparse import urlparse, parse_qs # pylint: disable=no-name-in-module, import-error
def create_connection_string(connection_info):
return " ".join("{}='{}'".format(k, str(v).replace("'", "\\'"))
for k, v in sorted(connection_info.items()))
def mask_connection_info(info):
masked_info = get_connection_info(info)
password = masked_info.pop("password", None)
return "{0}; {1} password".format(
create_connection_string(masked_info),
"no" if password is None else "hidden")
def get_connection_info_from_config_line(line):
_, value = line.split("=", 1)
value = value.strip()[1:-1].replace("''", "'")
return get_connection_info(value)
def get_connection_info(info):
"""turn a connection info object into a dict or return it if it was a
dict already. supports both the traditional libpq format and the new
url format"""
if isinstance(info, dict):
return info.copy()
elif info.startswith("postgres://") or info.startswith("postgresql://"):
return parse_connection_string_url(info)
else:
return parse_connection_string_libpq(info)
def parse_connection_string_url(url):
# drop scheme from the url as some versions of urlparse don't handle
# query and path properly for urls with a non-http scheme
schemeless_url = url.split(":", 1)[1]
p = urlparse(schemeless_url)
fields = {}
if p.hostname:
fields["host"] = p.hostname
if p.port:
fields["port"] = str(p.port)
if p.username:
fields["user"] = p.username
if p.password is not None:
fields["password"] = p.password
if p.path and p.path != "/":
fields["dbname"] = p.path[1:]
for k, v in parse_qs(p.query).items():
fields[k] = v[-1]
return fields
def parse_connection_string_libpq(connection_string):
"""parse a postgresql connection string as defined in
http://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-CONNSTRING"""
fields = {}
while True:
connection_string = connection_string.strip()
if not connection_string:
break
if "=" not in connection_string:
raise ValueError("expecting key=value format in connection_string fragment {!r}".format(connection_string))
key, rem = connection_string.split("=", 1)
if rem.startswith("'"):
asis, value = False, ""
for i in range(1, len(rem)):
if asis:
value += rem[i]
asis = False
elif rem[i] == "'":
break # end of entry
elif rem[i] == "\\":
asis = True
else:
value += rem[i]
else:
raise ValueError("invalid connection_string fragment {!r}".format(rem))
connection_string = rem[i + 1:] # pylint: disable=undefined-loop-variable
else:
res = rem.split(None, 1)
if len(res) > 1:
value, connection_string = res
else:
value, connection_string = rem, ""
fields[key] = value
return fields
| apache-2.0 | 4,727,380,614,604,684,000 | 34.43 | 119 | 0.594129 | false | 3.851087 | false | false | false |
vgkholla/ambry | ambry-tools/scripts/layout-analyzer.py | 7 | 7025 | #!/usr/bin/python2.7
import argparse
import json
import sys
from collections import defaultdict
class Node(object):
def __init__(self, node, datacenter):
self.node = node
self.datacenter = datacenter
self.partitions = set()
@property
def hostname(self):
return self.node["hostname"]
@property
def port(self):
return self.node["port"]
@property
def rack_id(self):
if "rackId" in self.node:
return self.node["rackId"]
return -1
@property
def datacenter_name(self):
return self.datacenter["name"]
def add_partition(self, partition):
self.partitions.add(partition)
def __repr__(self):
return "[hostname: {}, port: {}, dc: {}]".format(
self.hostname, self.port, self.datacenter_name)
__str__ = __repr__
class Partition(object):
def __init__(self, partition):
self.partition = partition
self.nodes_by_datacenter = defaultdict(set)
@property
def id(self):
return self.partition["id"]
def add_node(self, node):
self.nodes_by_datacenter[node.datacenter_name].add(node)
node.add_partition(self)
def racks_used(self, datacenter_name):
return {node.rack_id for node in self.nodes_by_datacenter[datacenter_name]}
def __repr__(self):
return "[id: {}]".format(self.id)
__str__ = __repr__
class Layout(object):
BALANCE_THRESHOLD = 4.0
def __init__(self, hardware_layout_filename, partition_layout_filename):
with open(hardware_layout_filename) as f:
self.hardware_layout = json.load(f)
with open(partition_layout_filename) as f:
self.partition_layout = json.load(f)
self.setup()
def setup(self):
self.node_map = {}
self.partition_map = {}
self.dc_node_combo_map = defaultdict(lambda: defaultdict(set))
for datacenter_struct in self.hardware_layout["datacenters"]:
for node_struct in datacenter_struct["dataNodes"]:
k = (node_struct["hostname"], node_struct["port"])
self.node_map[k] = Node(node_struct, datacenter_struct)
for partition_struct in self.partition_layout["partitions"]:
partition = Partition(partition_struct)
if len(partition_struct["replicas"]) == 0:
raise Exception("No replicas assigned to partition {}".format(partition.id))
for replica_struct in partition_struct["replicas"]:
k = (replica_struct["hostname"], replica_struct["port"])
node = self.node_map[k]
partition.add_node(node)
for dc, nodes in partition.nodes_by_datacenter.items():
self.dc_node_combo_map[dc][frozenset(nodes)].add(partition)
self.partition_map[partition_struct["id"]] = partition
def rack_id(self, node_host, node_port):
k = (node_host, node_port)
if k in self.node_map:
return self.node_map[k].rack_id
raise Exception("Node {}:{} not found".format(node_host, node_port))
def racks_used(self, partition_id, datacenter_name):
return self.partition_map[partition_id].racks_used(datacenter_name)
def shared_partitions(self, *nodes):
return set.intersection(
*(self.node_map[node].partitions for node in nodes)
)
def print_report(self):
for dc, node_combo_map in self.dc_node_combo_map.items():
print("In datacenter: {}".format(dc))
max_combo = max(node_combo_map,
key=lambda k: len(node_combo_map[k]))
avg_per_combo = sum(len(partitions) for partitions in node_combo_map.values()) / float(len(node_combo_map))
max_per_combo = len(node_combo_map[max_combo])
print("Num node combos used: {}".format(len(node_combo_map)))
print("Average partitions sharing a node combo: {}".format(avg_per_combo))
print("Max partitions sharing a node combo: {} on the following nodes:".format(max_per_combo))
for node in max_combo:
print(node)
if (float(max_per_combo) / avg_per_combo) > self.BALANCE_THRESHOLD:
print("The ratio of max to average number of partitions sharing a node combo "
+ "exceeds the threshold: {} on this datacenter".format(self.BALANCE_THRESHOLD))
sum_racks, n_partitions, min_racks = 0, 0, sys.maxsize
for partition in self.partition_map.values():
num_racks = len(partition.racks_used(dc))
n_partitions += 1
sum_racks += num_racks
if num_racks < min_racks:
min_racks = num_racks
print("Min racks used: {}".format(min_racks))
print("Average racks used: {}".format(
float(sum_racks) / n_partitions))
partitions_per_node = [len(node.partitions) for node in self.node_map.values()
if node.datacenter_name == dc]
print("")
def interactive(self):
while True:
cmd = raw_input(">> ").split()
try:
if len(cmd) == 0:
continue
elif cmd[0] == "report":
self.print_report()
elif cmd[0] == "rack_id":
print("Node {}:{} is on rack {}".format(
cmd[1], cmd[2], self.rack_id(cmd[1], int(cmd[2]))))
elif cmd[0] == "racks_used":
print("Partition {} in datacenter {} uses the following racks: {}".format(
cmd[1], cmd[2], self.racks_used(int(cmd[1]), cmd[2])))
elif cmd[0] == "shared_partitions":
args = [(cmd[i + 1], int(cmd[i + 2])) for i in range(0, len(cmd) - 1, 2)]
print("The following nodes:")
for hostname, port in args:
print(" {}:{}".format(hostname, port))
print("share the following partitions:")
print(self.shared_partitions(*args))
else:
print("Command not recognized")
except Exception:
print("Invalid input")
print("")
def main():
parser = argparse.ArgumentParser(
description='Analyze node distribution in a partition layout')
parser.add_argument("--interactive", "-i", action="store_true")
parser.add_argument('hardware_layout',
help='the path to the hardware layout file')
parser.add_argument('partition_layout',
help='the path to the partition layout file')
args = parser.parse_args()
layout = Layout(args.hardware_layout, args.partition_layout)
if args.interactive:
layout.interactive()
else:
layout.print_report()
if __name__ == "__main__":
main()
| apache-2.0 | -2,986,022,109,325,995,000 | 36.768817 | 119 | 0.560142 | false | 4.081929 | false | false | false |
mlperf/training_results_v0.7 | Fujitsu/benchmarks/resnet/implementations/implementation_open/mxnet/3rdparty/tvm/topi/python/topi/testing/roi_align_python.py | 2 | 3126 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-nested-blocks
"Roi align in python"
import math
import numpy as np
def roi_align_nchw_python(a_np, rois_np, pooled_size, spatial_scale, sample_ratio):
"""Roi align in python"""
_, channel, height, width = a_np.shape
num_roi = rois_np.shape[0]
b_np = np.zeros((num_roi, channel, pooled_size, pooled_size), dtype=a_np.dtype)
if isinstance(pooled_size, int):
pooled_size_h = pooled_size_w = pooled_size
else:
pooled_size_h, pooled_size_w = pooled_size
def _bilinear(b, c, y, x):
if y < -1 or y > height or x < -1 or x > width:
return 0
y = max(y, 0.0)
x = max(x, 0.0)
y_low = int(y)
x_low = int(x)
y_high = min(y_low + 1, height - 1)
x_high = min(x_low + 1, width - 1)
ly = y - y_low
lx = x - x_low
return (1 - ly) * (1 - lx) * a_np[b, c, y_low, x_low] + \
(1 - ly) * lx * a_np[b, c, y_low, x_high] + \
ly * (1 - lx) * a_np[b, c, y_high, x_low] + \
ly * lx * a_np[b, c, y_high, x_high]
for i in range(num_roi):
roi = rois_np[i]
batch_index = int(roi[0])
roi_start_w, roi_start_h, roi_end_w, roi_end_h = roi[1:] * spatial_scale
roi_h = max(roi_end_h - roi_start_h, 1.0)
roi_w = max(roi_end_w - roi_start_w, 1.0)
bin_h = roi_h / pooled_size_h
bin_w = roi_w / pooled_size_w
if sample_ratio > 0:
roi_bin_grid_h = roi_bin_grid_w = int(sample_ratio)
else:
roi_bin_grid_h = int(math.ceil(roi_h / pooled_size))
roi_bin_grid_w = int(math.ceil(roi_w / pooled_size))
count = roi_bin_grid_h * roi_bin_grid_w
for c in range(channel):
for ph in range(pooled_size_h):
for pw in range(pooled_size_w):
total = 0.
for iy in range(roi_bin_grid_h):
for ix in range(roi_bin_grid_w):
y = roi_start_h + ph * bin_h + (iy + 0.5) * bin_h / roi_bin_grid_h
x = roi_start_w + pw * bin_w + (ix + 0.5) * bin_w / roi_bin_grid_w
total += _bilinear(batch_index, c, y, x)
b_np[i, c, ph, pw] = total / count
return b_np
| apache-2.0 | 9,151,436,490,313,603,000 | 38.56962 | 94 | 0.559181 | false | 3.043817 | false | false | false |
zhouxiao-coder/Paddle | demo/introduction/trainer_config.py | 3 | 1344 | # Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.trainer_config_helpers import *
# 1. read data. Suppose you saved above python code as dataprovider.py
data_file = 'empty.list'
with open(data_file, 'w') as f:
f.writelines(' ')
define_py_data_sources2(
train_list=data_file,
test_list=None,
module='dataprovider',
obj='process',
args={})
# 2. learning algorithm
settings(batch_size=12, learning_rate=1e-3, learning_method=MomentumOptimizer())
# 3. Network configuration
x = data_layer(name='x', size=1)
y = data_layer(name='y', size=1)
y_predict = fc_layer(
input=x,
param_attr=ParamAttr(name='w'),
size=1,
act=LinearActivation(),
bias_attr=ParamAttr(name='b'))
cost = regression_cost(input=y_predict, label=y)
outputs(cost)
| apache-2.0 | 7,014,155,936,608,804,000 | 31.780488 | 80 | 0.720238 | false | 3.428571 | false | false | false |
caesar2164/edx-platform | common/djangoapps/track/transformers.py | 51 | 16677 | """
EventTransformers are data structures that represents events, and modify those
events to match the format desired for the tracking logs. They are registered
by name (or name prefix) in the EventTransformerRegistry, which is used to
apply them to the appropriate events.
"""
import json
import logging
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import UsageKey
log = logging.getLogger(__name__)
class DottedPathMapping(object):
"""
Dictionary-like object for creating keys of dotted paths.
If a key is created that ends with a dot, it will be treated as a path
prefix. Any value whose prefix matches the dotted path can be used
as a key for that value, but only the most specific match will
be used.
"""
# TODO: The current implementation of the prefix registry requires
# O(number of prefix event transformers) to access an event. If we get a
# large number of EventTransformers, it may be worth writing a tree-based
# map structure where each node is a segment of the match key, which would
# reduce access time to O(len(match.key.split('.'))), or essentially constant
# time.
def __init__(self, registry=None):
self._match_registry = {}
self._prefix_registry = {}
self.update(registry or {})
def __contains__(self, key):
try:
_ = self[key]
return True
except KeyError:
return False
def __getitem__(self, key):
if key in self._match_registry:
return self._match_registry[key]
if isinstance(key, basestring):
# Reverse-sort the keys to find the longest matching prefix.
for prefix in sorted(self._prefix_registry, reverse=True):
if key.startswith(prefix):
return self._prefix_registry[prefix]
raise KeyError('Key {} not found in {}'.format(key, type(self)))
def __setitem__(self, key, value):
if key.endswith('.'):
self._prefix_registry[key] = value
else:
self._match_registry[key] = value
def __delitem__(self, key):
if key.endswith('.'):
del self._prefix_registry[key]
else:
del self._match_registry[key]
def get(self, key, default=None):
"""
Return `self[key]` if it exists, otherwise, return `None` or `default`
if it is specified.
"""
try:
self[key]
except KeyError:
return default
def update(self, dict_):
"""
Update the mapping with the values in the supplied `dict`.
"""
for key, value in dict_:
self[key] = value
def keys(self):
"""
Return the keys of the mapping, including both exact matches and
prefix matches.
"""
return self._match_registry.keys() + self._prefix_registry.keys()
class EventTransformerRegistry(object):
"""
Registry to track which EventTransformers handle which events. The
EventTransformer must define a `match_key` attribute which contains the
name or prefix of the event names it tracks. Any `match_key` that ends
with a `.` will match all events that share its prefix. A transformer name
without a trailing dot only processes exact matches.
"""
mapping = DottedPathMapping()
@classmethod
def register(cls, transformer):
"""
Decorator to register an EventTransformer. It must have a `match_key`
class attribute defined.
"""
cls.mapping[transformer.match_key] = transformer
return transformer
@classmethod
def create_transformer(cls, event):
"""
Create an EventTransformer of the given event.
If no transformer is registered to handle the event, this raises a
KeyError.
"""
name = event.get(u'name')
return cls.mapping[name](event)
class EventTransformer(dict):
"""
Creates a transformer to modify analytics events based on event type.
To use the transformer, instantiate it using the
`EventTransformer.create_transformer()` classmethod with the event
dictionary as the sole argument, and then call `transformer.transform()` on
the created object to modify the event to the format required for output.
Custom transformers will want to define some or all of the following values
Attributes:
match_key:
This is the name of the event you want to transform. If the name
ends with a `'.'`, it will be treated as a *prefix transformer*.
All other names denote *exact transformers*.
A *prefix transformer* will handle any event whose name begins with
the name of the prefix transformer. Only the most specific match
will be used, so if a transformer exists with a name of
`'edx.ui.lms.'` and another transformer has the name
`'edx.ui.lms.sequence.'` then an event called
`'edx.ui.lms.sequence.tab_selected'` will be handled by the
`'edx.ui.lms.sequence.'` transformer.
An *exact transformer* will only handle events whose name matches
name of the transformer exactly.
Exact transformers always take precedence over prefix transformers.
Transformers without a name will not be added to the registry, and
cannot be accessed via the `EventTransformer.create_transformer()`
classmethod.
is_legacy_event:
If an event is a legacy event, it needs to set event_type to the
legacy name for the event, and may need to set certain event fields
to maintain backward compatiblity. If an event needs to provide
legacy support in some contexts, `is_legacy_event` can be defined
as a property to add dynamic behavior.
Default: False
legacy_event_type:
If the event is or can be a legacy event, it should define
the legacy value for the event_type field here.
Processing methods. Override these to provide the behavior needed for your
particular EventTransformer:
self.process_legacy_fields():
This method should modify the event payload in any way necessary to
support legacy event types. It will only be run if
`is_legacy_event` returns a True value.
self.process_event()
This method modifies the event payload unconditionally. It will
always be run.
"""
def __init__(self, *args, **kwargs):
super(EventTransformer, self).__init__(*args, **kwargs)
self.load_payload()
# Properties to be overridden
is_legacy_event = False
@property
def legacy_event_type(self):
"""
Override this as an attribute or property to provide the value for
the event's `event_type`, if it does not match the event's `name`.
"""
raise NotImplementedError
# Convenience properties
@property
def name(self):
"""
Returns the event's name.
"""
return self[u'name']
@property
def context(self):
"""
Returns the event's context dict.
"""
return self.get(u'context', {})
# Transform methods
def load_payload(self):
"""
Create a data version of self[u'event'] at self.event
"""
if u'event' in self:
if isinstance(self[u'event'], basestring):
self.event = json.loads(self[u'event'])
else:
self.event = self[u'event']
def dump_payload(self):
"""
Write self.event back to self[u'event'].
Keep the same format we were originally given.
"""
if isinstance(self.get(u'event'), basestring):
self[u'event'] = json.dumps(self.event)
else:
self[u'event'] = self.event
def transform(self):
"""
Transform the event with legacy fields and other necessary
modifications.
"""
if self.is_legacy_event:
self._set_legacy_event_type()
self.process_legacy_fields()
self.process_event()
self.dump_payload()
def _set_legacy_event_type(self):
"""
Update the event's `event_type` to the value specified by
`self.legacy_event_type`.
"""
self['event_type'] = self.legacy_event_type
def process_legacy_fields(self):
"""
Override this method to specify how to update event fields to maintain
compatibility with legacy events.
"""
pass
def process_event(self):
"""
Override this method to make unconditional modifications to event
fields.
"""
pass
@EventTransformerRegistry.register
class SequenceTabSelectedEventTransformer(EventTransformer):
"""
Transformer to maintain backward compatiblity with seq_goto events.
"""
match_key = u'edx.ui.lms.sequence.tab_selected'
is_legacy_event = True
legacy_event_type = u'seq_goto'
def process_legacy_fields(self):
self.event[u'old'] = self.event[u'current_tab']
self.event[u'new'] = self.event[u'target_tab']
class _BaseLinearSequenceEventTransformer(EventTransformer):
"""
Common functionality for transforming
`edx.ui.lms.sequence.{next,previous}_selected` events.
"""
offset = None
@property
def is_legacy_event(self):
"""
Linear sequence events are legacy events if the origin and target lie
within the same sequence.
"""
return not self.crosses_boundary()
def process_legacy_fields(self):
"""
Set legacy payload fields:
old: equal to the new current_tab field
new: the tab to which the user is navigating
"""
self.event[u'old'] = self.event[u'current_tab']
self.event[u'new'] = self.event[u'current_tab'] + self.offset
def crosses_boundary(self):
"""
Returns true if the navigation takes the focus out of the current
sequence.
"""
raise NotImplementedError
@EventTransformerRegistry.register
class NextSelectedEventTransformer(_BaseLinearSequenceEventTransformer):
"""
Transformer to maintain backward compatiblity with seq_next events.
"""
match_key = u'edx.ui.lms.sequence.next_selected'
offset = 1
legacy_event_type = u'seq_next'
def crosses_boundary(self):
"""
Returns true if the navigation moves the focus to the next sequence.
"""
return self.event[u'current_tab'] == self.event[u'tab_count']
@EventTransformerRegistry.register
class PreviousSelectedEventTransformer(_BaseLinearSequenceEventTransformer):
"""
Transformer to maintain backward compatiblity with seq_prev events.
"""
match_key = u'edx.ui.lms.sequence.previous_selected'
offset = -1
legacy_event_type = u'seq_prev'
def crosses_boundary(self):
"""
Returns true if the navigation moves the focus to the previous
sequence.
"""
return self.event[u'current_tab'] == 1
@EventTransformerRegistry.register
class VideoEventTransformer(EventTransformer):
"""
Converts new format video events into the legacy video event format.
Mobile devices cannot actually emit events that exactly match their
counterparts emitted by the LMS javascript video player. Instead of
attempting to get them to do that, we instead insert a transformer here
that converts the events they *can* easily emit and converts them into the
legacy format.
"""
match_key = u'edx.video.'
name_to_event_type_map = {
u'edx.video.played': u'play_video',
u'edx.video.paused': u'pause_video',
u'edx.video.stopped': u'stop_video',
u'edx.video.loaded': u'load_video',
u'edx.video.position.changed': u'seek_video',
u'edx.video.seeked': u'seek_video',
u'edx.video.transcript.shown': u'show_transcript',
u'edx.video.transcript.hidden': u'hide_transcript',
u'edx.video.language_menu.shown': u'video_show_cc_menu',
u'edx.video.language_menu.hidden': u'video_hide_cc_menu',
}
is_legacy_event = True
@property
def legacy_event_type(self):
"""
Return the legacy event_type of the current event
"""
return self.name_to_event_type_map[self.name]
def transform(self):
"""
Transform the event with necessary modifications if it is one of the
expected types of events.
"""
if self.name in self.name_to_event_type_map:
super(VideoEventTransformer, self).transform()
def process_event(self):
"""
Modify event fields.
"""
# Convert edx.video.seeked to edx.video.position.changed because edx.video.seeked was not intended to actually
# ever be emitted.
if self.name == "edx.video.seeked":
self['name'] = "edx.video.position.changed"
if not self.event:
return
self.set_id_to_usage_key()
self.capcase_current_time()
self.convert_seek_type()
self.disambiguate_skip_and_seek()
self.set_page_to_browser_url()
self.handle_ios_seek_bug()
def set_id_to_usage_key(self):
"""
Validate that the module_id is a valid usage key, and set the id field
accordingly.
"""
if 'module_id' in self.event:
module_id = self.event['module_id']
try:
usage_key = UsageKey.from_string(module_id)
except InvalidKeyError:
log.warning('Unable to parse module_id "%s"', module_id, exc_info=True)
else:
self.event['id'] = usage_key.html_id()
del self.event['module_id']
def capcase_current_time(self):
"""
Convert the current_time field to currentTime.
"""
if 'current_time' in self.event:
self.event['currentTime'] = self.event.pop('current_time')
def convert_seek_type(self):
"""
Converts seek_type to seek and converts skip|slide to
onSlideSeek|onSkipSeek.
"""
if 'seek_type' in self.event:
seek_type = self.event['seek_type']
if seek_type == 'slide':
self.event['type'] = "onSlideSeek"
elif seek_type == 'skip':
self.event['type'] = "onSkipSeek"
del self.event['seek_type']
def disambiguate_skip_and_seek(self):
"""
For the Android build that isn't distinguishing between skip/seek.
"""
if 'requested_skip_interval' in self.event:
if abs(self.event['requested_skip_interval']) != 30:
if 'type' in self.event:
self.event['type'] = 'onSlideSeek'
def set_page_to_browser_url(self):
"""
If `open_in_browser_url` is specified, set the page to the base of the
specified url.
"""
if 'open_in_browser_url' in self.context:
self['page'] = self.context.pop('open_in_browser_url').rpartition('/')[0]
def handle_ios_seek_bug(self):
"""
Handle seek bug in iOS.
iOS build 1.0.02 has a bug where it returns a +30 second skip when
it should be returning -30.
"""
if self._build_requests_plus_30_for_minus_30():
if self._user_requested_plus_30_skip():
self.event[u'requested_skip_interval'] = -30
def _build_requests_plus_30_for_minus_30(self):
"""
Returns True if this build contains the seek bug
"""
if u'application' in self.context:
if all(key in self.context[u'application'] for key in (u'version', u'name')):
app_version = self.context[u'application'][u'version']
app_name = self.context[u'application'][u'name']
return app_version == u'1.0.02' and app_name == u'edx.mobileapp.iOS'
return False
def _user_requested_plus_30_skip(self):
"""
If the user requested a +30 second skip, return True.
"""
if u'requested_skip_interval' in self.event and u'type' in self.event:
interval = self.event[u'requested_skip_interval']
action = self.event[u'type']
return interval == 30 and action == u'onSkipSeek'
else:
return False
| agpl-3.0 | -4,993,183,082,950,215,000 | 32.089286 | 118 | 0.611261 | false | 4.273962 | false | false | false |
luci/luci-py | appengine/swarming/server/task_request_test.py | 2 | 86434 | #!/usr/bin/env vpython
# Copyright 2014 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
import datetime
import logging
import random
import string
import sys
import unittest
import test_env
test_env.setup_test_env()
from google.protobuf import duration_pb2
from google.protobuf import timestamp_pb2
from google.appengine.api import datastore_errors
from google.appengine.ext import ndb
from components import auth_testing
from components import utils
from test_support import test_case
from proto.api import swarming_pb2
from server import bq_state
from server import config
from server import pools_config
from server import task_pack
from server import task_request
# pylint: disable=W0212
def _gen_cipd_input(**kwargs):
"""Creates a CipdInput."""
args = {
u'client_package':
task_request.CipdPackage(
package_name=u'infra/tools/cipd/${platform}',
version=u'git_revision:deadbeef'),
u'packages': [
task_request.CipdPackage(
package_name=u'rm', path=u'bin',
version=u'git_revision:deadbeef'),
],
u'server':
u'https://chrome-infra-packages.appspot.com'
}
args.update(kwargs)
return task_request.CipdInput(**args)
def _gen_properties(**kwargs):
"""Creates a TaskProperties."""
args = {
u'cipd_input':
_gen_cipd_input(),
u'command': [u'command1', u'arg1'],
u'containment': {
u'lower_priority': False,
u'containment_type': None,
u'limit_processes': None,
u'limit_total_committed_memory': None,
},
u'dimensions': {
u'OS': [u'Windows-3.1.1'],
u'hostname': [u'localhost'],
u'pool': [u'default'],
},
u'env': {
u'foo': u'bar',
u'joe': u'2'
},
u'env_prefixes': {
u'PATH': [u'local/path']
},
u'execution_timeout_secs':
30,
u'grace_period_secs':
30,
u'idempotent':
False,
u'inputs_ref':
task_request.FilesRef(
isolatedserver=u'https://isolateserver.appspot.com',
namespace=u'default-gzip'),
u'io_timeout_secs':
None,
u'has_secret_bytes':
False,
}
args.update(kwargs)
args[u'dimensions_data'] = args.pop(u'dimensions')
return task_request.TaskProperties(**args)
def _gen_request_slices(**kwargs):
"""Creates a TaskRequest."""
template_apply = kwargs.pop('_template_apply', task_request.TEMPLATE_AUTO)
now = utils.utcnow()
args = {
u'created_ts': now,
u'manual_tags': [u'tag:1'],
u'name': u'Request name',
u'priority': 50,
u'task_slices': [
task_request.TaskSlice(
expiration_secs=30, properties=_gen_properties()),
],
u'user': u'Jesus',
u'bot_ping_tolerance_secs': 120,
}
args.update(kwargs)
# Note that ndb model constructor accepts dicts for structured properties.
req = task_request.TaskRequest(**args)
task_request.init_new_request(req, True, template_apply)
return req
def _gen_request(properties=None, **kwargs):
"""Creates a TaskRequest with a single TaskSlice."""
return _gen_request_slices(
task_slices=[
task_request.TaskSlice(
expiration_secs=30, properties=properties or _gen_properties()),
],
**kwargs)
def _gen_secret(req, secret_bytes):
assert req.key
sb = task_request.SecretBytes(secret_bytes=secret_bytes)
sb.key = req.secret_bytes_key
return sb
def _gen_task_template(cache=None, cipd_package=None, env=None):
"""Builds an unverified pools_config.TaskTemplate for use with
_set_pool_config_with_templates.
Args:
cache (None|dict{name: path}) - cache entries to set.
cipd_package (None|dict{(path, pkg): version}) - cipd packages to set.
env (None|dict{var: value|(value, prefix)|(value, prefix, soft)}) -
envvars to set. The key is always the envvar to set, and the value may
be:
* the envvar value as a string (prefix=() and soft=False)
* A (value, prefix) tuple (soft=False)
* A (value, prefix, soft) tuple
Returns constructed pools_config.TaskTemplate.
"""
def env_value(var, combo_value):
prefix, soft = (), False
if isinstance(combo_value, tuple):
assert len(combo_value) in (2, 3), ('unexpected tuple length: %r' %
combo_value)
if len(combo_value) == 2:
value, prefix = combo_value
else:
value, prefix, soft = combo_value
else:
value = unicode(combo_value)
return pools_config.Env(var, value, tuple(map(unicode, prefix)), soft)
return pools_config.TaskTemplate(
cache=sorted(
pools_config.CacheEntry(unicode(name), unicode(path))
for name, path in (cache or {}).items()),
cipd_package=sorted(
pools_config.CipdPackage(
unicode(path), unicode(pkg), unicode(version))
for (path, pkg), version in (cipd_package or {}).items()),
env=sorted(
env_value(unicode(var), value) for var, value in (env or {}).items()),
inclusions=(),
)
class Prop(object):
_name = 'foo'
class TestCase(test_case.TestCase):
def setUp(self):
super(TestCase, self).setUp()
auth_testing.mock_get_current_identity(self)
class TaskRequestPrivateTest(TestCase):
def test_validate_task_run_id(self):
self.assertEqual(
'1d69b9f088008811',
task_request._validate_task_run_id(Prop(), '1d69b9f088008811'))
self.assertEqual(None, task_request._validate_task_run_id(Prop(), ''))
with self.assertRaises(ValueError):
task_request._validate_task_run_id(Prop(), '1')
def test_validate_cas_instance(self):
valid_cas_instance = 'projects/chromium-swarm/instances/default_instance'
self.assertEqual(
valid_cas_instance,
task_request._validate_cas_instance(Prop(), valid_cas_instance))
self.assertEqual(None, task_request._validate_cas_instance(Prop(), ''))
with self.assertRaises(datastore_errors.BadValueError):
task_request._validate_cas_instance(Prop(), 'invalid')
def test_apply_template_simple(self):
tt = _gen_task_template(
cache={'cache': 'c'},
cipd_package={('cipd', 'some/pkg'): 'latest'},
env={'ENV': ('1', ['a'])},
)
p = task_request.TaskProperties()
task_request._apply_task_template(tt, p)
self.assertEqual(
p,
task_request.TaskProperties(
env={u'ENV': u'1'},
env_prefixes={u'ENV': [u'a']},
caches=[task_request.CacheEntry(name=u'cache', path=u'c')],
cipd_input=task_request.CipdInput(packages=[
task_request.CipdPackage(
package_name=u'some/pkg', path=u'cipd', version=u'latest')
])))
def test_apply_template_env_set_error(self):
tt = _gen_task_template(env={'ENV': ('1', ['a'])})
p = task_request.TaskProperties(env={u'ENV': u'10'})
with self.assertRaises(ValueError) as ex:
task_request._apply_task_template(tt, p)
self.assertEqual(ex.exception.message,
"request.env[u'ENV'] conflicts with pool's template")
def test_apply_template_env_prefix_set_error(self):
tt = _gen_task_template(env={'ENV': ('1', ['a'])})
p = task_request.TaskProperties(env_prefixes={u'ENV': [u'b']})
with self.assertRaises(ValueError) as ex:
task_request._apply_task_template(tt, p)
self.assertEqual(
ex.exception.message,
"request.env_prefixes[u'ENV'] conflicts with pool's template")
def test_apply_template_env_override_soft(self):
tt = _gen_task_template(env={'ENV': ('1', ['a'], True)})
p = task_request.TaskProperties(env={u'ENV': u'2'})
task_request._apply_task_template(tt, p)
self.assertEqual(
p,
task_request.TaskProperties(
env={u'ENV': u'2'},
env_prefixes={u'ENV': [u'a']},
))
def test_apply_template_env_prefixes_append_soft(self):
tt = _gen_task_template(env={'ENV': ('1', ['a'], True)})
p = task_request.TaskProperties(env_prefixes={u'ENV': [u'b']})
task_request._apply_task_template(tt, p)
self.assertEqual(
p,
task_request.TaskProperties(
env={u'ENV': u'1'},
env_prefixes={u'ENV': [u'a', u'b']},
))
def test_apply_template_conflicting_cache(self):
tt = _gen_task_template(cache={'c': 'C'})
p = task_request.TaskProperties(
caches=[task_request.CacheEntry(name='c', path='B')])
with self.assertRaises(ValueError) as ex:
task_request._apply_task_template(tt, p)
self.assertEqual(ex.exception.message,
"request.cache['c'] conflicts with pool's template")
def test_apply_template_conflicting_cache_path(self):
tt = _gen_task_template(cache={'c': 'C'})
p = task_request.TaskProperties(
caches=[task_request.CacheEntry(name='other', path='C')])
with self.assertRaises(ValueError) as ex:
task_request._apply_task_template(tt, p)
self.assertEqual(
ex.exception.message,
"u'C': directory has conflicting owners: task cache 'other' "
"and task template cache u'c'")
def test_apply_template_conflicting_cache_cipd_path(self):
tt = _gen_task_template(cache={'c': 'C'})
p = task_request.TaskProperties(
cipd_input=task_request.CipdInput(packages=[
task_request.CipdPackage(
path='C', package_name='pkg', version='latest')
]))
with self.assertRaises(ValueError) as ex:
task_request._apply_task_template(tt, p)
self.assertEqual(
ex.exception.message,
"u'C': directory has conflicting owners: task cipd['pkg:latest'] "
"and task template cache u'c'")
def test_apply_template_conflicting_cipd_package(self):
tt = _gen_task_template(cipd_package={('C', 'pkg'): 'latest'})
p = task_request.TaskProperties(
cipd_input=task_request.CipdInput(packages=[
task_request.CipdPackage(
path='C', package_name='other', version='latest')
]))
with self.assertRaises(ValueError) as ex:
task_request._apply_task_template(tt, p)
self.assertEqual(
ex.exception.message,
"u'C': directory has conflicting owners: task cipd['other:latest'] "
"and task template cipd[u'pkg:latest']")
def test_apply_template_conflicting_cipd_cache_path(self):
tt = _gen_task_template(cipd_package={('C', 'pkg'): 'latest'})
p = task_request.TaskProperties(
caches=[task_request.CacheEntry(name='other', path='C')])
with self.assertRaises(ValueError) as ex:
task_request._apply_task_template(tt, p)
self.assertEqual(
ex.exception.message,
"u'C': directory has conflicting owners: task cache 'other' "
"and task template cipd[u'pkg:latest']")
class TaskRequestApiTest(TestCase):
def setUp(self):
super(TaskRequestApiTest, self).setUp()
# pool_configs is a mapping of pool name -> pools_config.PoolConfig. Tests
# can modify this to have pools_config.get_pool_config return the
# appropriate data.
self._pool_configs = {}
self.mock(pools_config, 'get_pool_config', self._pool_configs.get)
self._enqueue_calls = []
self._enqueue_orig = self.mock(utils, 'enqueue_task', self._enqueue)
def tearDown(self):
try:
self.assertFalse(self._enqueue_calls)
finally:
super(TaskRequestApiTest, self).tearDown()
def _enqueue(self, *args, **kwargs):
self._enqueue_calls.append((args, kwargs))
return self._enqueue_orig(*args, use_dedicated_module=False, **kwargs)
def test_all_apis_are_tested(self):
# Ensures there's a test for each public API.
module = task_request
expected = frozenset(
i for i in dir(module)
if i[0] != '_' and hasattr(getattr(module, i), 'func_name'))
missing = expected - frozenset(
i[5:] for i in dir(self) if i.startswith('test_'))
self.assertFalse(missing)
def test_get_automatic_tags(self):
req = _gen_request()
expected = set((u'hostname:localhost', u'OS:Windows-3.1.1', u'pool:default',
u'priority:50', u'service_account:none', u'user:Jesus',
u'use_isolate_1143123:1', u'use_cas_1143123:0'))
self.assertEqual(expected, task_request.get_automatic_tags(req, 0))
with self.assertRaises(IndexError):
task_request.get_automatic_tags(req, 1)
def test_get_automatic_tags_slices(self):
# Repeated TaskSlice.
slices = [
task_request.TaskSlice(
expiration_secs=60,
properties=_gen_properties(dimensions={
u'gpu': [u'1234:5678'],
u'pool': [u'GPU']
})),
task_request.TaskSlice(
expiration_secs=60,
properties=_gen_properties(dimensions={
u'gpu': [u'none'],
u'pool': [u'GPU']
})),
]
req = _gen_request_slices(task_slices=slices)
expected = set(
(u'gpu:1234:5678', u'pool:GPU', u'priority:50', u'service_account:none',
u'user:Jesus', u'use_isolate_1143123:1', u'use_cas_1143123:0'))
self.assertEqual(expected, task_request.get_automatic_tags(req, 0))
expected = set(
(u'gpu:none', u'pool:GPU', u'priority:50', u'service_account:none',
u'user:Jesus', u'use_isolate_1143123:1', u'use_cas_1143123:0'))
self.assertEqual(expected, task_request.get_automatic_tags(req, 1))
with self.assertRaises(IndexError):
task_request.get_automatic_tags(req, 2)
def test_get_automatic_tags_or_dim(self):
slices = [
task_request.TaskSlice(
expiration_secs=60,
properties=_gen_properties(dimensions={
u'gpu': [u'nv|amd'],
u'pool': [u'foo']
})),
task_request.TaskSlice(
expiration_secs=60,
properties=_gen_properties(dimensions={
u'os': [u'linux|mac|win'],
u'pool': [u'bar']
})),
]
req = _gen_request_slices(task_slices=slices)
expected = set((u'gpu:nv', u'gpu:amd', u'pool:foo', u'priority:50',
u'service_account:none', u'user:Jesus',
u'use_isolate_1143123:1', u'use_cas_1143123:0'))
self.assertEqual(expected, task_request.get_automatic_tags(req, 0))
expected = set((u'os:linux', u'os:mac', u'os:win', u'pool:bar',
u'priority:50', u'service_account:none', u'user:Jesus',
u'use_isolate_1143123:1', u'use_cas_1143123:0'))
self.assertEqual(expected, task_request.get_automatic_tags(req, 1))
def test_create_termination_task(self):
request = task_request.create_termination_task(
u'some-bot', wait_for_capacity=True)
self.assertTrue(request.task_slice(0).properties.is_terminate)
def test_new_request_key(self):
for _ in range(3):
delta = utils.utcnow() - task_request._BEGINING_OF_THE_WORLD
now = int(round(delta.total_seconds() * 1000.))
key = task_request.new_request_key()
# Remove the XOR.
key_id = key.integer_id() ^ task_pack.TASK_REQUEST_KEY_ID_MASK
timestamp = key_id >> 20
randomness = (key_id >> 4) & 0xFFFF
version = key_id & 0xF
self.assertLess(abs(timestamp - now), 1000)
self.assertEqual(1, version)
if randomness:
break
else:
self.fail('Failed to find randomness')
def test_new_request_key_zero(self):
def getrandbits(i):
self.assertEqual(i, 16)
return 0x7766
self.mock(random, 'getrandbits', getrandbits)
self.mock_now(task_request._BEGINING_OF_THE_WORLD)
key = task_request.new_request_key()
# Remove the XOR.
key_id = key.integer_id() ^ task_pack.TASK_REQUEST_KEY_ID_MASK
# 00000000000 7766 1
# ^ ^ ^
# | | |
# since 2010 | schema version
# |
# rand
self.assertEqual('0x0000000000077661', '0x%016x' % key_id)
def test_new_request_key_end(self):
def getrandbits(i):
self.assertEqual(i, 16)
return 0x7766
self.mock(random, 'getrandbits', getrandbits)
days_until_end_of_the_world = 2**43 / 24. / 60. / 60. / 1000.
num_days = int(days_until_end_of_the_world)
# Remove 1ms to not overflow.
num_seconds = ((days_until_end_of_the_world - num_days) * 24. * 60. * 60. -
0.001)
self.assertEqual(101806, num_days)
self.assertEqual(278, int(num_days / 365.3))
now = (
task_request._BEGINING_OF_THE_WORLD +
datetime.timedelta(days=num_days, seconds=num_seconds))
self.mock_now(now)
key = task_request.new_request_key()
# Remove the XOR.
key_id = key.integer_id() ^ task_pack.TASK_REQUEST_KEY_ID_MASK
# 7ffffffffff 7766 1
# ^ ^ ^
# | | |
# since 2010 | schema version
# |
# rand
self.assertEqual('0x7ffffffffff77661', '0x%016x' % key_id)
def test_validate_request_key(self):
task_request.validate_request_key(task_pack.unpack_request_key('11'))
with self.assertRaises(ValueError):
task_request.validate_request_key(ndb.Key('TaskRequest', 1))
def test_init_new_request(self):
parent = _gen_request()
# Parent entity must have a valid key id and be stored.
parent.key = task_request.new_request_key()
parent.put()
# The reference is to the TaskRunResult.
parent_id = task_pack.pack_request_key(parent.key) + u'1'
req = _gen_request(
properties=_gen_properties(
idempotent=True, relative_cwd=u'deeep', has_secret_bytes=True),
parent_task_id=parent_id)
# TaskRequest with secret must have a valid key.
req.key = task_request.new_request_key()
# Needed for the get() call below.
req.put()
sb = _gen_secret(req, 'I am a banana')
# Needed for properties_hash() call.
sb.put()
expected_properties = {
'caches': [],
'cipd_input': {
'client_package': {
'package_name': u'infra/tools/cipd/${platform}',
'path': None,
'version': u'git_revision:deadbeef',
},
'packages': [{
'package_name': u'rm',
'path': u'bin',
'version': u'git_revision:deadbeef',
}],
'server': u'https://chrome-infra-packages.appspot.com'
},
'command': [u'command1', u'arg1'],
'containment': {
u'lower_priority': False,
u'containment_type': None,
u'limit_processes': None,
u'limit_total_committed_memory': None,
},
'relative_cwd': u'deeep',
'dimensions': {
u'OS': [u'Windows-3.1.1'],
u'hostname': [u'localhost'],
u'pool': [u'default'],
},
'env': {
u'foo': u'bar',
u'joe': u'2'
},
'env_prefixes': {
u'PATH': [u'local/path']
},
'extra_args': [],
'execution_timeout_secs': 30,
'grace_period_secs': 30,
'has_secret_bytes': True,
'idempotent': True,
'inputs_ref': {
'isolated': None,
'isolatedserver': u'https://isolateserver.appspot.com',
'namespace': u'default-gzip',
},
'cas_input_root': None,
'io_timeout_secs': None,
'outputs': [],
}
expected_request = {
'authenticated': auth_testing.DEFAULT_MOCKED_IDENTITY,
'name': u'Request name',
'parent_task_id': unicode(parent_id),
'priority': 50,
'pubsub_topic': None,
'pubsub_userdata': None,
'service_account': u'none',
'tags': [
u'OS:Windows-3.1.1',
u'authenticated:user:[email protected]',
u'hostname:localhost',
u'parent_task_id:%s' % parent_id,
u'pool:default',
u'priority:50',
u'realm:none',
u'service_account:none',
u'swarming.pool.template:no_config',
u'tag:1',
u'use_cas_1143123:0',
u'use_isolate_1143123:1',
u'user:Jesus',
],
'task_slices': [{
'expiration_secs': 30,
'properties': expected_properties,
'wait_for_capacity': False,
},],
'user': u'Jesus',
'realm': None,
'realms_enabled': False,
'bot_ping_tolerance_secs': 120,
'resultdb': None,
}
actual = req.to_dict()
actual.pop('created_ts')
actual.pop('expiration_ts')
self.assertEqual(expected_request, actual)
self.assertEqual(30, req.expiration_secs)
# Intentionally hard code the hash value since it has to be deterministic.
# Other unit tests should use the calculated value.
self.assertEqual(
'c262bae20e9b1a265fa5937d67aa36f690612b0e28c8af7e38b347dd6746da65',
req.task_slice(0).properties_hash(req).encode('hex'))
# TODO(crbug.com/1115778): remove after RBE-CAS migration.
def test_init_new_request_isolated(self):
parent = _gen_request(
properties=_gen_properties(
command=[u'command1', u'arg1'],
inputs_ref={
'isolated': '0123456789012345678901234567890123456789',
'isolatedserver': 'http://localhost:1',
'namespace': 'default-gzip',
}))
# Parent entity must have a valid key id and be stored.
parent.key = task_request.new_request_key()
parent.put()
# The reference is to the TaskRunResult.
parent_id = task_pack.pack_request_key(parent.key) + u'1'
req = _gen_request(
properties=_gen_properties(idempotent=True, has_secret_bytes=True),
parent_task_id=parent_id)
# TaskRequest with secret must have a valid key.
req.key = task_request.new_request_key()
# Needed for the get() call below.
req.put()
sb = _gen_secret(req, 'I am not a banana')
# Needed for properties_hash() call.
sb.put()
expected_properties = {
'caches': [],
'cipd_input': {
'client_package': {
'package_name': u'infra/tools/cipd/${platform}',
'path': None,
'version': u'git_revision:deadbeef',
},
'packages': [{
'package_name': u'rm',
'path': u'bin',
'version': u'git_revision:deadbeef',
}],
'server': u'https://chrome-infra-packages.appspot.com'
},
'command': [u'command1', u'arg1'],
'containment': {
u'lower_priority': False,
u'containment_type': None,
u'limit_processes': None,
u'limit_total_committed_memory': None,
},
'relative_cwd': None,
'dimensions': {
u'OS': [u'Windows-3.1.1'],
u'hostname': [u'localhost'],
u'pool': [u'default'],
},
'env': {
u'foo': u'bar',
u'joe': u'2'
},
'env_prefixes': {
u'PATH': [u'local/path']
},
'extra_args': [],
'execution_timeout_secs': 30,
'grace_period_secs': 30,
'idempotent': True,
'inputs_ref': {
'isolated': None,
'isolatedserver': u'https://isolateserver.appspot.com',
'namespace': u'default-gzip',
},
'cas_input_root': None,
'io_timeout_secs': None,
'outputs': [],
'has_secret_bytes': True,
}
expected_request = {
'authenticated': auth_testing.DEFAULT_MOCKED_IDENTITY,
'name': u'Request name',
'parent_task_id': unicode(parent_id),
'priority': 50,
'pubsub_topic': None,
'pubsub_userdata': None,
'service_account': u'none',
'tags': [
u'OS:Windows-3.1.1',
u'authenticated:user:[email protected]',
u'hostname:localhost',
u'parent_task_id:%s' % parent_id,
u'pool:default',
u'priority:50',
u'realm:none',
u'service_account:none',
u'swarming.pool.template:no_config',
u'tag:1',
u'use_cas_1143123:0',
u'use_isolate_1143123:1',
u'user:Jesus',
],
'task_slices': [{
'expiration_secs': 30,
'properties': expected_properties,
'wait_for_capacity': False,
},],
'user': u'Jesus',
'realm': None,
'realms_enabled': False,
'bot_ping_tolerance_secs': 120,
'resultdb': None,
}
actual = req.to_dict()
# expiration_ts - created_ts == scheduling_expiration_secs.
actual.pop('created_ts')
actual.pop('expiration_ts')
self.assertEqual(expected_request, actual)
self.assertEqual(30, req.expiration_secs)
# Intentionally hard code the hash value since it has to be deterministic.
# Other unit tests should use the calculated value.
self.assertEqual(
'f9254eae480e442121919c503c685319ab3a903c2d7b76eac79a947afd09d425',
req.task_slice(0).properties_hash(req).encode('hex'))
def test_init_new_request_cas_input(self):
parent = _gen_request()
# Parent entity must have a valid key id and be stored.
parent.key = task_request.new_request_key()
parent.put()
# The reference is to the TaskRunResult.
parent_id = task_pack.pack_request_key(parent.key) + u'1'
cas_input_root = {
'cas_instance': u'projects/test/instances/default',
'digest': {
'hash': u'12345',
'size_bytes': 1,
}
}
req = _gen_request(
parent_task_id=parent_id,
properties=_gen_properties(
idempotent=True,
has_secret_bytes=True,
inputs_ref=None,
cas_input_root=cas_input_root,
))
# TaskRequest with secret must have a valid key.
req.key = task_request.new_request_key()
# Needed for the get() call below.
req.put()
sb = _gen_secret(req, 'I am not a banana')
# Needed for properties_hash() call.
sb.put()
expected_properties = {
'caches': [],
'cipd_input': {
'client_package': {
'package_name': u'infra/tools/cipd/${platform}',
'path': None,
'version': u'git_revision:deadbeef',
},
'packages': [{
'package_name': u'rm',
'path': u'bin',
'version': u'git_revision:deadbeef',
}],
'server': u'https://chrome-infra-packages.appspot.com'
},
'command': [u'command1', u'arg1'],
'containment': {
u'lower_priority': False,
u'containment_type': None,
u'limit_processes': None,
u'limit_total_committed_memory': None,
},
'relative_cwd': None,
'dimensions': {
u'OS': [u'Windows-3.1.1'],
u'hostname': [u'localhost'],
u'pool': [u'default'],
},
'env': {
u'foo': u'bar',
u'joe': u'2'
},
'env_prefixes': {
u'PATH': [u'local/path']
},
'extra_args': [],
'execution_timeout_secs': 30,
'grace_period_secs': 30,
'idempotent': True,
'inputs_ref': None,
'cas_input_root': cas_input_root,
'io_timeout_secs': None,
'outputs': [],
'has_secret_bytes': True,
}
expected_request = {
'authenticated': auth_testing.DEFAULT_MOCKED_IDENTITY,
'name': u'Request name',
'parent_task_id': parent_id,
'priority': 50,
'pubsub_topic': None,
'pubsub_userdata': None,
'service_account': u'none',
'tags': [
u'OS:Windows-3.1.1',
u'authenticated:user:[email protected]',
u'hostname:localhost',
u'parent_task_id:%s' % parent_id,
u'pool:default',
u'priority:50',
u'realm:none',
u'service_account:none',
u'swarming.pool.template:no_config',
u'tag:1',
u'use_cas_1143123:1',
u'use_isolate_1143123:0',
u'user:Jesus',
],
'task_slices': [{
'expiration_secs': 30,
'properties': expected_properties,
'wait_for_capacity': False,
},],
'user': u'Jesus',
'realm': None,
'realms_enabled': False,
'bot_ping_tolerance_secs': 120,
'resultdb': None,
}
actual = req.to_dict()
# expiration_ts - created_ts == scheduling_expiration_secs.
actual.pop('created_ts')
actual.pop('expiration_ts')
self.assertEqual(expected_request, actual)
self.assertEqual(30, req.expiration_secs)
# Intentionally hard code the hash value since it has to be deterministic.
# Other unit tests should use the calculated value.
self.assertEqual(
'9e1b99c20a5c523ea1ade51276230781f9ddfd3ae396e66c810612a1c5c8062a',
req.task_slice(0).properties_hash(req).encode('hex'))
def test_init_new_request_parent(self):
parent = _gen_request()
# Parent entity must have a valid key id and be stored.
parent.key = task_request.new_request_key()
parent.put()
# The reference is to the TaskRunResult.
parent_id = task_pack.pack_request_key(parent.key) + '1'
child = _gen_request(parent_task_id=parent_id)
self.assertEqual(parent_id, child.parent_task_id)
def test_init_new_request_invalid_parent_id(self):
# Must ends with '1' or '2', not '0'
with self.assertRaises(ValueError):
_gen_request(parent_task_id='1d69b9f088008810')
def test_init_new_request_missing_name(self):
req = _gen_request(name=None)
with self.assertRaisesRegexp(datastore_errors.BadValueError,
'^name is missing$'):
req.put()
def test_init_new_request_idempotent(self):
request = _gen_request(properties=_gen_properties(idempotent=True))
as_dict = request.to_dict()
self.assertEqual(True,
as_dict['task_slices'][0]['properties']['idempotent'])
# Intentionally hard code the hash value since it has to be deterministic.
# Other unit tests should use the calculated value.
# Ensure the algorithm is deterministic.
self.assertEqual(
'b1230281cc4bcc8d9458dab0810c86fcfaf8e4124351f4d39517833eb9541465',
request.task_slice(0).properties_hash(request).encode('hex'))
def test_init_new_request_bot_service_account(self):
request = _gen_request(service_account='bot')
request.put()
as_dict = request.to_dict()
self.assertEqual('bot', as_dict['service_account'])
self.assertIn(u'service_account:bot', as_dict['tags'])
def test_init_new_request_RBE_CAS(self):
request = _gen_request(
properties=_gen_properties(
inputs_ref=task_request.FilesRef(
isolated='dead' * (64 / 4),
isolatedserver='astuce-service',
namespace='sha256-GCP')))
request.put()
as_dict = request.to_dict()
expected = {
'isolated':
u'deaddeaddeaddeaddeaddeaddeaddeaddeaddeaddeaddeaddeaddeaddeaddead',
'isolatedserver':
u'astuce-service',
'namespace':
u'sha256-GCP',
}
self.assertEqual(expected,
as_dict['task_slices'][0]['properties']['inputs_ref'])
def _set_pool_config_with_templates(self,
prod=None,
canary=None,
canary_chance=None,
pool_name=u'default'):
"""Builds a new pools_config.PoolConfig populated with the given
pools_config.TaskTemplate objects and assigns it into the mocked
`pools_confi.get_pool_config()` method.
If prod is None, this omits the TaskTemplateDeployment entirely.
canary_chance may be supplied as >9999 (normally illegal) in order to force
the selection of canary."""
deployment = None
if prod is not None:
deployment = pools_config.TaskTemplateDeployment(
prod=prod, canary=canary, canary_chance=canary_chance)
self._pool_configs[pool_name] = pools_config.init_pool_config(
name=pool_name,
rev=u'testVersion1',
task_template_deployment=deployment,
)
def test_init_new_request_skip_template(self):
self._set_pool_config_with_templates(_gen_task_template(env={'hi': 'prod'}))
request = _gen_request(_template_apply=task_request.TEMPLATE_SKIP)
as_dict = request.to_dict()
self.assertIn(u'swarming.pool.version:testVersion1', as_dict['tags'])
self.assertIn(u'swarming.pool.template:skip', as_dict['tags'])
def test_init_new_request_missing_template(self):
self._set_pool_config_with_templates()
request = _gen_request()
as_dict = request.to_dict()
self.assertIn(u'swarming.pool.version:testVersion1', as_dict['tags'])
self.assertIn(u'swarming.pool.template:none', as_dict['tags'])
def test_init_new_request_prod_template(self):
self._set_pool_config_with_templates(
_gen_task_template(env={'hi': 'prod'}),
canary=None,
canary_chance=0, # always prefer prod serverside
)
request = _gen_request()
as_dict = request.to_dict()
self.assertIn(u'swarming.pool.version:testVersion1', as_dict['tags'])
self.assertIn(u'swarming.pool.template:prod', as_dict['tags'])
self.assertEqual(as_dict['task_slices'][0]['properties']['env']['hi'],
'prod')
def test_init_new_request_canary_template(self):
self._set_pool_config_with_templates(
_gen_task_template(env={'hi': 'prod'}),
_gen_task_template(env={'hi': 'canary'}),
canary_chance=10000, # always prefer canary serverside
)
request = _gen_request()
as_dict = request.to_dict()
self.assertIn(u'swarming.pool.version:testVersion1', as_dict['tags'])
self.assertIn(u'swarming.pool.template:canary', as_dict['tags'])
self.assertEqual(as_dict['task_slices'][0]['properties']['env']['hi'],
'canary')
def test_init_new_request_canary_never_template(self):
self._set_pool_config_with_templates(
_gen_task_template(env={'hi': 'prod'}),
_gen_task_template(env={'hi': 'canary'}),
canary_chance=10000, # always prefer canary serverside
)
request = _gen_request(_template_apply=task_request.TEMPLATE_CANARY_NEVER)
as_dict = request.to_dict()
self.assertIn(u'swarming.pool.version:testVersion1', as_dict['tags'])
self.assertIn(u'swarming.pool.template:prod', as_dict['tags'])
self.assertEqual(as_dict['task_slices'][0]['properties']['env']['hi'],
'prod')
def test_init_new_request_canary_prefer_template(self):
self._set_pool_config_with_templates(
_gen_task_template(env={'hi': 'prod'}),
_gen_task_template(env={'hi': 'canary'}),
canary_chance=0, # always prefer prod serverside
)
request = _gen_request(_template_apply=task_request.TEMPLATE_CANARY_PREFER)
as_dict = request.to_dict()
self.assertIn(u'swarming.pool.version:testVersion1', as_dict['tags'])
self.assertIn(u'swarming.pool.template:canary', as_dict['tags'])
self.assertEqual(as_dict['task_slices'][0]['properties']['env']['hi'],
'canary')
def test_init_new_request_canary_prefer_prod_template(self):
self._set_pool_config_with_templates(
_gen_task_template(env={'hi': 'prod'}),
# No canary defined, even though caller would prefer it, if available.
)
request = _gen_request(_template_apply=task_request.TEMPLATE_CANARY_PREFER)
as_dict = request.to_dict()
self.assertIn(u'swarming.pool.version:testVersion1', as_dict['tags'])
self.assertIn(u'swarming.pool.template:prod', as_dict['tags'])
self.assertEqual(as_dict['task_slices'][0]['properties']['env']['hi'],
'prod')
def test_duped(self):
# Two TestRequest with the same properties.
request_1 = _gen_request(properties=_gen_properties(idempotent=True))
now = utils.utcnow()
request_2 = _gen_request_slices(
name='Other',
user='Other',
priority=201,
created_ts=now,
manual_tags=['tag:2'],
task_slices=[
task_request.TaskSlice(
expiration_secs=129,
properties=_gen_properties(idempotent=True)),
])
self.assertEqual(
request_1.task_slice(0).properties_hash(request_1),
request_2.task_slice(0).properties_hash(request_2))
self.assertTrue(request_1.task_slice(0).properties_hash(request_1))
def test_different(self):
# Two TestRequest with different properties.
request_1 = _gen_request(
properties=_gen_properties(execution_timeout_secs=30, idempotent=True))
request_2 = _gen_request(
properties=_gen_properties(execution_timeout_secs=129, idempotent=True))
self.assertNotEqual(
request_1.task_slice(0).properties_hash(request_1),
request_2.task_slice(0).properties_hash(request_2))
# TODO(crbug.com/1115778): remove after RBE-CAS migration.
def test_TaskRequest_to_proto_isolated(self):
# Try to set as much things as possible to exercise most code paths.
def getrandbits(i):
self.assertEqual(i, 16)
return 0x7766
self.mock(random, 'getrandbits', getrandbits)
self.mock_now(task_request._BEGINING_OF_THE_WORLD)
# Parent entity must have a valid key id and be stored.
# This task uses user:Jesus, which will be inherited automatically.
parent = _gen_request()
parent.key = task_request.new_request_key()
parent.put()
# The reference is to the TaskRunResult.
parent_id = task_pack.pack_request_key(parent.key) + u'0'
parent_run_id = task_pack.pack_request_key(parent.key) + u'1'
self.mock_now(task_request._BEGINING_OF_THE_WORLD, 2)
request_props = _gen_properties(
inputs_ref={
'isolated': '0123456789012345678901234567890123456789',
'isolatedserver': 'http://localhost:1',
'namespace': 'default-gzip',
},
relative_cwd=u'subdir',
caches=[
task_request.CacheEntry(name=u'git_chromium', path=u'git_cache'),
],
cipd_input=_gen_cipd_input(
packages=[
task_request.CipdPackage(
package_name=u'foo', path=u'tool', version=u'git:12345'),
],),
idempotent=True,
outputs=[u'foo'],
has_secret_bytes=True,
containment=task_request.Containment(
lower_priority=True,
containment_type=task_request.ContainmentType.JOB_OBJECT,
limit_processes=1000,
limit_total_committed_memory=1024**3,
),
)
request = _gen_request_slices(
task_slices=[
task_request.TaskSlice(
expiration_secs=30,
properties=request_props,
wait_for_capacity=True,
),
],
# The user is ignored; the value is overridden by the parent task's
# user.
user=u'Joe',
parent_task_id=parent_run_id,
service_account=u'[email protected]',
pubsub_topic=u'projects/a/topics/abc',
pubsub_auth_token=u'sekret',
pubsub_userdata=u'obscure_reference',
)
# Necessary to have a valid task_id:
request.key = task_request.new_request_key()
# Necessary to attach a secret to the request:
request.put()
_gen_secret(request, 'I am a banana').put()
expected_props = swarming_pb2.TaskProperties(
cas_inputs=swarming_pb2.CASTree(
digest=u'0123456789012345678901234567890123456789',
server=u'http://localhost:1',
namespace=u'default-gzip',
),
cipd_inputs=[
swarming_pb2.CIPDPackage(
package_name=u'foo', version=u'git:12345', dest_path=u'tool'),
],
named_caches=[
swarming_pb2.NamedCacheEntry(
name=u'git_chromium', dest_path=u'git_cache'),
],
containment=swarming_pb2.Containment(
lower_priority=True,
containment_type=swarming_pb2.Containment.JOB_OBJECT,
limit_processes=1000,
limit_total_committed_memory=1024**3,
),
command=[u'command1', u'arg1'],
relative_cwd=u'subdir',
# extra_args cannot be specified with command.
# secret_bytes cannot be retrieved, but is included in properties_hash.
has_secret_bytes=True,
dimensions=[
swarming_pb2.StringListPair(key=u'OS', values=[u'Windows-3.1.1']),
swarming_pb2.StringListPair(key=u'hostname', values=[u'localhost']),
swarming_pb2.StringListPair(key=u'pool', values=[u'default']),
],
env=[
swarming_pb2.StringPair(key=u'foo', value=u'bar'),
swarming_pb2.StringPair(key=u'joe', value=u'2'),
],
env_paths=[
swarming_pb2.StringListPair(key=u'PATH', values=[u'local/path']),
],
execution_timeout=duration_pb2.Duration(seconds=30),
grace_period=duration_pb2.Duration(seconds=30),
idempotent=True,
outputs=[u'foo'],
)
# To be updated every time the schema changes.
props_h = 'e8718f59959d2c17d9ab1084b6fc9b3ee63e998a704de579543dd84bc1ef603a'
expected = swarming_pb2.TaskRequest(
# Scheduling.
task_slices=[
swarming_pb2.TaskSlice(
properties=expected_props,
expiration=duration_pb2.Duration(seconds=30),
wait_for_capacity=True,
properties_hash=props_h,
),
],
priority=50,
service_account=u'[email protected]',
# Information.
create_time=timestamp_pb2.Timestamp(seconds=1262304002),
name=u'Request name',
authenticated='user:[email protected]',
tags=[
u'OS:Windows-3.1.1',
u"authenticated:user:[email protected]",
u'hostname:localhost',
u'parent_task_id:%s' % parent_run_id,
u'pool:default',
u'priority:50',
u'realm:none',
u'service_account:[email protected]',
u'swarming.pool.template:no_config',
u'tag:1',
u"use_cas_1143123:0",
u"use_isolate_1143123:1",
u'user:Jesus',
],
user=u'Jesus',
# Hierarchy.
task_id=u'7d0776610',
parent_task_id=parent_id,
parent_run_id=parent_run_id,
# Notification. auth_token cannot be retrieved.
pubsub_notification=swarming_pb2.PubSub(
topic=u'projects/a/topics/abc', userdata=u'obscure_reference'),
)
actual = swarming_pb2.TaskRequest()
request.to_proto(actual)
self.assertEqual(unicode(expected), unicode(actual))
# with append_root_ids=True.
actual = swarming_pb2.TaskRequest()
request.to_proto(actual, append_root_ids=True)
expected.root_task_id = parent_id
expected.root_run_id = parent_run_id
self.assertEqual(unicode(expected), unicode(actual))
def test_TaskRequest_to_proto(self):
# Try to set as much things as possible to exercise most code paths.
def getrandbits(i):
self.assertEqual(i, 16)
return 0x7766
self.mock(random, 'getrandbits', getrandbits)
self.mock_now(task_request._BEGINING_OF_THE_WORLD)
# Grand parent entity must have a valid key id and be stored.
# This task uses user:Jesus, which will be inherited automatically.
grand_parent = _gen_request()
grand_parent.key = task_request.new_request_key()
grand_parent.put()
# Parent entity must have a valid key id and be stored.
self.mock_now(task_request._BEGINING_OF_THE_WORLD, 1)
parent = _gen_request(parent_task_id=grand_parent.task_id[:-1] + u'1')
parent.key = task_request.new_request_key()
parent.put()
parent_run_id = parent.task_id[:-1] + u'1'
self.mock_now(task_request._BEGINING_OF_THE_WORLD, 2)
request_props = _gen_properties(
inputs_ref=None,
cas_input_root={
'cas_instance': u'projects/test/instances/default',
'digest': {
'hash': u'12345',
'size_bytes': 1,
}
},
relative_cwd=u'subdir',
caches=[
task_request.CacheEntry(name=u'git_chromium', path=u'git_cache'),
],
cipd_input=_gen_cipd_input(
packages=[
task_request.CipdPackage(
package_name=u'foo', path=u'tool', version=u'git:12345'),
],),
idempotent=True,
outputs=[u'foo'],
has_secret_bytes=True,
containment=task_request.Containment(
lower_priority=True,
containment_type=task_request.ContainmentType.JOB_OBJECT,
limit_processes=1000,
limit_total_committed_memory=1024**3,
),
)
request = _gen_request_slices(
task_slices=[
task_request.TaskSlice(
expiration_secs=30,
properties=request_props,
wait_for_capacity=True,
),
],
# The user is ignored; the value is overridden by the parent task's
# user.
user=u'Joe',
parent_task_id=parent.task_id[:-1] + u'1',
service_account=u'[email protected]',
pubsub_topic=u'projects/a/topics/abc',
pubsub_auth_token=u'sekret',
pubsub_userdata=u'obscure_reference',
)
# Necessary to have a valid task_id:
request.key = task_request.new_request_key()
# Necessary to attach a secret to the request:
request.put()
_gen_secret(request, 'I am a banana').put()
expected_props = swarming_pb2.TaskProperties(
cas_input_root=swarming_pb2.CASReference(
cas_instance='projects/test/instances/default',
digest=swarming_pb2.Digest(
hash='12345', size_bytes=1),
),
cipd_inputs=[
swarming_pb2.CIPDPackage(
package_name=u'foo', version=u'git:12345', dest_path=u'tool'),
],
named_caches=[
swarming_pb2.NamedCacheEntry(
name=u'git_chromium', dest_path=u'git_cache'),
],
containment=swarming_pb2.Containment(
lower_priority=True,
containment_type=swarming_pb2.Containment.JOB_OBJECT,
limit_processes=1000,
limit_total_committed_memory=1024**3,
),
command=[u'command1', u'arg1'],
relative_cwd=u'subdir',
# extra_args cannot be specified with command.
# secret_bytes cannot be retrieved, but is included in properties_hash.
has_secret_bytes=True,
dimensions=[
swarming_pb2.StringListPair(key=u'OS', values=[u'Windows-3.1.1']),
swarming_pb2.StringListPair(key=u'hostname', values=[u'localhost']),
swarming_pb2.StringListPair(key=u'pool', values=[u'default']),
],
env=[
swarming_pb2.StringPair(key=u'foo', value=u'bar'),
swarming_pb2.StringPair(key=u'joe', value=u'2'),
],
env_paths=[
swarming_pb2.StringListPair(key=u'PATH', values=[u'local/path']),
],
execution_timeout=duration_pb2.Duration(seconds=30),
grace_period=duration_pb2.Duration(seconds=30),
idempotent=True,
outputs=[u'foo'],
)
# To be updated every time the schema changes.
props_h = '516b5f86592b0e5e3bdd9fbf715305ee6f7ddad36320775d5a945e60df67c360'
expected = swarming_pb2.TaskRequest(
# Scheduling.
task_slices=[
swarming_pb2.TaskSlice(
properties=expected_props,
expiration=duration_pb2.Duration(seconds=30),
wait_for_capacity=True,
properties_hash=props_h,
),
],
priority=50,
service_account=u'[email protected]',
# Information.
create_time=timestamp_pb2.Timestamp(seconds=1262304002),
name=u'Request name',
authenticated='user:[email protected]',
tags=[
u'OS:Windows-3.1.1',
u"authenticated:user:[email protected]",
u'hostname:localhost',
u'parent_task_id:%s' % parent_run_id,
u'pool:default',
u'priority:50',
u'realm:none',
u'service_account:[email protected]',
u'swarming.pool.template:no_config',
u'tag:1',
u"use_cas_1143123:1",
u"use_isolate_1143123:0",
u'user:Jesus',
],
user=u'Jesus',
# Hierarchy.
task_id=u'7d0776610',
parent_task_id=parent.task_id,
parent_run_id=parent_run_id,
# Notification. auth_token cannot be retrieved.
pubsub_notification=swarming_pb2.PubSub(
topic=u'projects/a/topics/abc', userdata=u'obscure_reference'),
)
actual = swarming_pb2.TaskRequest()
request.to_proto(actual)
self.assertEqual(unicode(expected), unicode(actual))
actual = swarming_pb2.TaskRequest()
expected.root_task_id = grand_parent.task_id
expected.root_run_id = grand_parent.task_id[:-1] + u'1'
request.to_proto(actual, append_root_ids=True)
self.assertEqual(unicode(expected), unicode(actual))
# With append_root_ids=True.
actual = swarming_pb2.TaskRequest()
request.to_proto(actual, append_root_ids=True)
expected.root_task_id = grand_parent.task_id
expected.root_run_id = grand_parent.task_id[:-1] + u'1'
self.assertEqual(unicode(expected), unicode(actual))
def test_TaskRequest_to_proto_empty(self):
# Assert that it doesn't throw on empty entity.
actual = swarming_pb2.TaskRequest()
task_request.TaskRequest().to_proto(actual)
self.assertEqual(swarming_pb2.TaskRequest(), actual)
def test_TaskSlice_to_proto_empty(self):
# Assert that it doesn't throw on empty entity.
request = task_request.TaskRequest()
actual = swarming_pb2.TaskSlice()
task_request.TaskSlice().to_proto(actual, request)
self.assertEqual(swarming_pb2.TaskSlice(), actual)
def test_TaskProperties_to_proto_empty(self):
# Assert that it doesn't throw on empty entity.
actual = swarming_pb2.TaskProperties()
task_request.TaskProperties().to_proto(actual)
expected = swarming_pb2.TaskProperties()
expected.grace_period.seconds = 30
self.assertEqual(expected, actual)
def test_request_bad_values(self):
with self.assertRaises(AttributeError):
_gen_request(properties=_gen_properties(foo='bar'))
def test_request_bad_values_stale_style(self):
# Old TaskRequest.properties style.
# Hack a bit the call to force the incorrect call.
req = _gen_request_slices(
task_slices=[],
expiration_ts=utils.utcnow() + datetime.timedelta(hours=1),
properties_old=_gen_properties())
with self.assertRaises(datastore_errors.BadValueError):
req.put()
def test_request_bad_values_task_slices(self):
with self.assertRaises(ValueError):
# No TaskSlice
_gen_request_slices(task_slices=[])
def _gen_slice(**props):
return task_request.TaskSlice(
expiration_secs=60, properties=_gen_properties(**props))
slices = [_gen_slice(dimensions={u'pool': [u'GPU']})]
_gen_request_slices(task_slices=slices).put()
# Limit on the maximum number of TaskSlice in a TaskRequest.
slices = [
_gen_slice(dimensions={
u'pool': [u'GPU'],
u'v': [unicode(i)]
}) for i in range(8)
]
_gen_request_slices(task_slices=slices).put()
slices = [
_gen_slice(dimensions={
u'pool': [u'GPU'],
u'v': [unicode(i)]
}) for i in range(9)
]
req = _gen_request_slices(task_slices=slices)
with self.assertRaises(datastore_errors.BadValueError):
req.put()
# Different pools.
slices = [
task_request.TaskSlice(
expiration_secs=60,
properties=_gen_properties(dimensions={u'pool': [u'GPU']})),
task_request.TaskSlice(
expiration_secs=60,
properties=_gen_properties(dimensions={u'pool': [u'other']})),
]
req = _gen_request_slices(task_slices=slices)
with self.assertRaises(datastore_errors.BadValueError):
req.put()
def test_request_bad_command(self):
req = _gen_request(properties=_gen_properties(command=[], inputs_ref=None))
with self.assertRaises(datastore_errors.BadValueError):
req.put()
with self.assertRaises(datastore_errors.BadValueError):
_gen_request(properties=_gen_properties(command={'a': 'b'}))
with self.assertRaises(datastore_errors.BadValueError):
_gen_request(properties=_gen_properties(command='python'))
_gen_request(properties=_gen_properties(command=['python'])).put()
_gen_request(properties=_gen_properties(command=[u'python'])).put()
_gen_request(properties=_gen_properties(command=[u'python'] * 128)).put()
with self.assertRaises(datastore_errors.BadValueError):
_gen_request(properties=_gen_properties(command=[u'python'] * 129)).put()
def test_request_extra_args(self):
with self.assertRaises(datastore_errors.BadValueError):
_gen_request(
properties=_gen_properties(
command=[],
extra_args=[u'python'],
inputs_ref=task_request.FilesRef(
isolated='deadbeefdeadbeefdeadbeefdeadbeefdeadbeef',
isolatedserver='http://localhost:1',
namespace='default-gzip'))).put()
with self.assertRaises(datastore_errors.BadValueError):
_gen_request(
properties=_gen_properties(
command=[u'python'],
extra_args=[u'python'],
inputs_ref=task_request.FilesRef(
isolated='deadbeefdeadbeefdeadbeefdeadbeefdeadbeef',
isolatedserver='http://localhost:1',
namespace='default-gzip'))).put()
def test_request_bad_cipd_input(self):
def mkcipdreq(idempotent=False, **cipd_input):
return _gen_request(
properties=_gen_properties(
idempotent=idempotent, cipd_input=_gen_cipd_input(**cipd_input)))
req = mkcipdreq(packages=[{}])
with self.assertRaises(datastore_errors.BadValueError):
req.put()
with self.assertRaises(datastore_errors.BadValueError):
mkcipdreq(packages=[
task_request.CipdPackage(
package_name='infra|rm', path='.', version='latest'),
])
req = mkcipdreq(
packages=[task_request.CipdPackage(package_name='rm', path='.')])
with self.assertRaises(datastore_errors.BadValueError):
req.put()
req = mkcipdreq(packages=[
task_request.CipdPackage(package_name='rm', version='latest'),
])
with self.assertRaises(datastore_errors.BadValueError):
req.put()
with self.assertRaises(datastore_errors.BadValueError):
mkcipdreq(packages=[
task_request.CipdPackage(
package_name='rm', path='/', version='latest'),
])
with self.assertRaises(datastore_errors.BadValueError):
mkcipdreq(packages=[
task_request.CipdPackage(
package_name='rm', path='/a', version='latest'),
])
with self.assertRaises(datastore_errors.BadValueError):
mkcipdreq(packages=[
task_request.CipdPackage(
package_name='rm', path='a/..', version='latest'),
])
with self.assertRaises(datastore_errors.BadValueError):
mkcipdreq(packages=[
task_request.CipdPackage(
package_name='rm', path='a/./b', version='latest'),
])
req = mkcipdreq(packages=[
task_request.CipdPackage(package_name='rm', path='.', version='latest'),
task_request.CipdPackage(package_name='rm', path='.', version='canary'),
])
with self.assertRaises(datastore_errors.BadValueError):
req.put()
req = mkcipdreq(
idempotent=True,
packages=[
task_request.CipdPackage(
package_name='rm', path='.', version='latest'),
])
with self.assertRaises(datastore_errors.BadValueError):
req.put()
with self.assertRaises(datastore_errors.BadValueError):
mkcipdreq(server='abc')
with self.assertRaises(datastore_errors.BadValueError):
mkcipdreq(
client_package=task_request.CipdPackage(
package_name='--bad package--'))
mkcipdreq().put()
mkcipdreq(packages=[
task_request.CipdPackage(package_name='rm', path='.', version='latest'),
]).put()
mkcipdreq(
client_package=task_request.CipdPackage(
package_name='infra/tools/cipd/${platform}',
version='git_revision:daedbeef'),
packages=[
task_request.CipdPackage(
package_name='rm', path='.', version='latest'),
],
server='https://chrome-infra-packages.appspot.com').put()
def test_request_bad_named_cache(self):
mkcachereq = lambda *c: _gen_request(properties=_gen_properties(caches=c)
).put()
with self.assertRaises(datastore_errors.BadValueError):
mkcachereq(task_request.CacheEntry(name='', path='git_cache'))
with self.assertRaises(datastore_errors.BadValueError):
mkcachereq(task_request.CacheEntry(name='git_chromium', path=''))
with self.assertRaises(datastore_errors.BadValueError):
mkcachereq(
task_request.CacheEntry(name='git_chromium', path='git_cache'),
task_request.CacheEntry(name='git_v8', path='git_cache'))
with self.assertRaises(datastore_errors.BadValueError):
mkcachereq(
task_request.CacheEntry(name='git_chromium', path='git_cache'),
task_request.CacheEntry(name='git_chromium', path='git_cache2'))
with self.assertRaises(datastore_errors.BadValueError):
mkcachereq(
task_request.CacheEntry(name='git_chromium', path='/git_cache'))
with self.assertRaises(datastore_errors.BadValueError):
mkcachereq(
task_request.CacheEntry(name='git_chromium', path='../git_cache'))
with self.assertRaises(datastore_errors.BadValueError):
mkcachereq(
task_request.CacheEntry(
name='git_chromium', path='git_cache/../../a'))
with self.assertRaises(datastore_errors.BadValueError):
mkcachereq(
task_request.CacheEntry(name='git_chromium', path='../git_cache'))
with self.assertRaises(datastore_errors.BadValueError):
mkcachereq(
task_request.CacheEntry(name='git_chromium', path='git_cache//a'))
with self.assertRaises(datastore_errors.BadValueError):
mkcachereq(
task_request.CacheEntry(name='git_chromium', path='a/./git_cache'))
with self.assertRaises(datastore_errors.BadValueError):
mkcachereq(task_request.CacheEntry(name='has space', path='git_cache'))
with self.assertRaises(datastore_errors.BadValueError):
mkcachereq(task_request.CacheEntry(name='CAPITAL', path='git_cache'))
mkcachereq()
mkcachereq(task_request.CacheEntry(name='git_chromium', path='git_cache'))
mkcachereq(
task_request.CacheEntry(name='git_chromium', path='git_cache'),
task_request.CacheEntry(name='build_chromium', path='out'))
mkcachereq(task_request.CacheEntry(name=u'g' * 128, path=u'git_cache'))
with self.assertRaises(datastore_errors.BadValueError):
mkcachereq(task_request.CacheEntry(name=u'g' * 129, path=u'git_cache'))
mkcachereq(task_request.CacheEntry(name=u'g', path=u'p' * 256))
with self.assertRaises(datastore_errors.BadValueError):
mkcachereq(task_request.CacheEntry(name=u'g', path=u'p' * 257))
# Too many.
c = [
task_request.CacheEntry(name=unicode(i), path=unicode(i))
for i in range(32)
]
_gen_request(properties=_gen_properties(caches=c)).put()
with self.assertRaises(datastore_errors.BadValueError):
c = [
task_request.CacheEntry(name=unicode(i), path=unicode(i))
for i in range(33)
]
_gen_request(properties=_gen_properties(caches=c)).put()
def test_request_bad_named_cache_and_cipd_input(self):
# A CIPD package and named caches cannot be mapped to the same path.
req = _gen_request(
properties=_gen_properties(
caches=[
task_request.CacheEntry(name='git_chromium', path='git_cache'),
],
cipd_input=_gen_cipd_input(packages=[
task_request.CipdPackage(
package_name='foo', path='git_cache', version='latest'),
])))
with self.assertRaises(datastore_errors.BadValueError):
req.put()
req = _gen_request(
properties=_gen_properties(
caches=[
task_request.CacheEntry(name='git_chromium', path='git_cache1'),
],
cipd_input=_gen_cipd_input(packages=[
task_request.CipdPackage(
package_name='foo', path='git_cache2', version='latest'),
]))).put()
def test_request_bad_dimensions(self):
# Type error.
with self.assertRaises(TypeError):
_gen_request(properties=_gen_properties(dimensions=[]))
with self.assertRaises(datastore_errors.BadValueError):
_gen_request(properties=_gen_properties(dimensions={}))
with self.assertRaises(datastore_errors.BadValueError):
_gen_request(
properties=_gen_properties(dimensions={
u'id': u'b',
u'a:': u'b'
}))
with self.assertRaises(datastore_errors.BadValueError):
_gen_request(
properties=_gen_properties(dimensions={
u'id': u'b',
u'a.': u'b'
}))
with self.assertRaises(datastore_errors.BadValueError):
_gen_request(
properties=_gen_properties(dimensions={
u'id': u'b',
u'a': [u'b']
}))
# >1 value for id.
with self.assertRaises(datastore_errors.BadValueError):
_gen_request(properties=_gen_properties(dimensions={u'id': [u'a', u'b']}))
# >1 value for pool.
with self.assertRaises(datastore_errors.BadValueError):
_gen_request(
properties=_gen_properties(dimensions={u'pool': [u'b', u'b']}))
_gen_request(
properties=_gen_properties(dimensions={
u'id': [u'b'],
u'pool': [u'b']
})).put()
_gen_request(
properties=_gen_properties(dimensions={
u'id': [u'b'],
u'pool': [u'b'],
u'a.': [u'c']
})).put()
_gen_request(
properties=_gen_properties(dimensions={
u'pool': [u'b'],
u'a.': [u'b', u'c']
})).put()
def test_request_bad_dimensions_key(self):
# Max # keys.
d = {u'a%s' % string.ascii_letters[i]: [unicode(i)] for i in range(31)}
d[u'pool'] = [u'a']
_gen_request(properties=_gen_properties(dimensions=d)).put()
with self.assertRaises(datastore_errors.BadValueError):
d = {u'a%s' % string.ascii_letters[i]: [unicode(i)] for i in range(32)}
d[u'pool'] = [u'a']
_gen_request(properties=_gen_properties(dimensions=d)).put()
with self.assertRaises(datastore_errors.BadValueError):
# Key regexp.
d = {u'pool': [u'default'], u'1': [u'value']}
_gen_request(properties=_gen_properties(dimensions=d)).put()
# Key length.
d = {
u'pool': [u'default'],
u'v' * config.DIMENSION_KEY_LENGTH: [u'v'],
}
_gen_request(properties=_gen_properties(dimensions=d)).put()
with self.assertRaises(datastore_errors.BadValueError):
d = {
u'pool': [u'default'],
u'v' * (config.DIMENSION_KEY_LENGTH + 1): [u'value'],
}
_gen_request(properties=_gen_properties(dimensions=d)).put()
def test_request_bad_dimensions_value(self):
# Max # values.
d = {u'pool': [u'b'], u'a.': [unicode(i) for i in range(16)]}
_gen_request(properties=_gen_properties(dimensions=d)).put()
with self.assertRaises(datastore_errors.BadValueError):
d = {u'pool': [u'b'], u'a.': [unicode(i) for i in range(17)]}
_gen_request(properties=_gen_properties(dimensions=d)).put()
# Value length.
d = {
u'pool': [u'default'],
u'v': [u'v' * config.DIMENSION_VALUE_LENGTH],
}
_gen_request(properties=_gen_properties(dimensions=d)).put()
with self.assertRaises(datastore_errors.BadValueError):
d = {
u'pool': [u'default'],
u'v': [u'v' * (config.DIMENSION_VALUE_LENGTH + 1)],
}
_gen_request(properties=_gen_properties(dimensions=d)).put()
with self.assertRaises(datastore_errors.BadValueError):
# Value with space.
d = {u'pool': [u'default'], u'v': [u'v ']}
_gen_request(properties=_gen_properties(dimensions=d)).put()
with self.assertRaises(datastore_errors.BadValueError):
# Duplicate value.
d = {u'pool': [u'default'], u'v': [u'v', u'v']}
_gen_request(properties=_gen_properties(dimensions=d)).put()
with self.assertRaisesRegexp(
datastore_errors.BadValueError,
"^dimension key u'v' has invalid value u'v||c'$"):
# Empty 'or' dimension value.
d = {u'pool': [u'default'], u'v': [u'v||c']}
_gen_request(properties=_gen_properties(dimensions=d)).put()
with self.assertRaisesRegexp(
datastore_errors.BadValueError,
"^'pool' cannot be specified more than once in dimensions "
"\[u'default|non-default'\]$"):
# Use 'or' dimension in pool.
d = {u'pool': [u'default|non-default'], u'v': [u'v']}
_gen_request(properties=_gen_properties(dimensions=d)).put()
with self.assertRaisesRegexp(
datastore_errors.BadValueError,
'possible dimension subset for \'or\' dimensions '
'should not be more than 8, but 9'):
# Too many combinations for 'or'
d = {
u'pool': [u'default'],
u'x': [u'1|2|3'],
u'y': [u'1|2|3'],
}
_gen_request(properties=_gen_properties(dimensions=d)).put()
d = {
u'pool': [u'default'],
u'x': [u'1|2'],
u'y': [u'1|2'],
u'z': [u'1|2'],
}
_gen_request(properties=_gen_properties(dimensions=d)).put()
def test_request_bad_env(self):
# Type error.
with self.assertRaises(TypeError):
_gen_request(properties=_gen_properties(env=[]))
with self.assertRaises(TypeError):
_gen_request(properties=_gen_properties(env={u'a': 1}))
_gen_request(properties=_gen_properties(env={})).put()
e = {u'k': u'v'}
_gen_request(properties=_gen_properties(env=e)).put()
# Key length.
e = {u'k' * 64: u'v'}
_gen_request(properties=_gen_properties(env=e)).put()
with self.assertRaises(datastore_errors.BadValueError):
e = {u'k' * 65: u'v'}
_gen_request(properties=_gen_properties(env=e)).put()
# # keys.
e = {u'k%s' % i: u'v' for i in range(64)}
_gen_request(properties=_gen_properties(env=e)).put()
with self.assertRaises(datastore_errors.BadValueError):
e = {u'k%s' % i: u'v' for i in range(65)}
_gen_request(properties=_gen_properties(env=e)).put()
# Value length.
e = {u'k': u'v' * 1024}
_gen_request(properties=_gen_properties(env=e)).put()
with self.assertRaises(datastore_errors.BadValueError):
e = {u'k': u'v' * 1025}
_gen_request(properties=_gen_properties(env=e)).put()
def test_request_bad_env_prefixes(self):
# Type error.
with self.assertRaises(TypeError):
_gen_request(properties=_gen_properties(env_prefixes=[]))
with self.assertRaises(TypeError):
_gen_request(properties=_gen_properties(env_prefixes={u'a': 1}))
_gen_request(properties=_gen_properties(env_prefixes={})).put()
e = {u'k': [u'v']}
_gen_request(properties=_gen_properties(env_prefixes=e)).put()
# Key length.
e = {u'k' * 64: [u'v']}
_gen_request(properties=_gen_properties(env_prefixes=e)).put()
with self.assertRaises(datastore_errors.BadValueError):
e = {u'k' * 65: [u'v']}
_gen_request(properties=_gen_properties(env_prefixes=e)).put()
# # keys.
e = {u'k%s' % i: [u'v'] for i in range(64)}
_gen_request(properties=_gen_properties(env_prefixes=e)).put()
with self.assertRaises(datastore_errors.BadValueError):
e = {u'k%s' % i: [u'v'] for i in range(65)}
_gen_request(properties=_gen_properties(env_prefixes=e)).put()
# Value length.
e = {u'k': [u'v' * 1024]}
_gen_request(properties=_gen_properties(env_prefixes=e)).put()
with self.assertRaises(datastore_errors.BadValueError):
e = {u'k': [u'v' * 1025]}
_gen_request(properties=_gen_properties(env_prefixes=e)).put()
def test_request_bad_priority(self):
with self.assertRaises(datastore_errors.BadValueError):
_gen_request(priority=task_request.MAXIMUM_PRIORITY + 1)
_gen_request(priority=task_request.MAXIMUM_PRIORITY).put()
def test_request_bad_bot_ping_tolerance(self):
with self.assertRaises(datastore_errors.BadValueError):
_gen_request(
bot_ping_tolerance_secs=task_request._MAX_BOT_PING_TOLERANCE_SECS + 1)
_gen_request(
bot_ping_tolerance_secs=task_request._MIN_BOT_PING_TOLERANCE_SECS - 1)
def test_request_bad_execution_timeout(self):
# When used locally, it is set to 1, which means it's impossible to test
# below _MIN_TIMEOUT_SECS but above 0.
self.mock(task_request, '_MIN_TIMEOUT_SECS', 30)
p = _gen_request(properties=_gen_properties(execution_timeout_secs=0))
with self.assertRaises(datastore_errors.BadValueError):
# Only termination task may have 0.
p.put()
with self.assertRaises(datastore_errors.BadValueError):
_gen_request(
properties=_gen_properties(
execution_timeout_secs=task_request._MIN_TIMEOUT_SECS - 1))
_gen_request(
properties=_gen_properties(
execution_timeout_secs=task_request._MIN_TIMEOUT_SECS))
with self.assertRaises(datastore_errors.BadValueError):
_gen_request(
properties=_gen_properties(
execution_timeout_secs=task_request.MAX_TIMEOUT_SECS + 1))
_gen_request(
properties=_gen_properties(
execution_timeout_secs=task_request.MAX_TIMEOUT_SECS)).put()
def test_request_bad_expiration(self):
now = utils.utcnow()
with self.assertRaises(ValueError):
_gen_request_slices(
created_ts=now,
task_slices=[
task_request.TaskSlice(
expiration_secs=None, properties=_gen_properties()),
])
with self.assertRaises(datastore_errors.BadValueError):
_gen_request_slices(
created_ts=now,
task_slices=[
task_request.TaskSlice(
expiration_secs=task_request._MIN_TIMEOUT_SECS - 1,
properties=_gen_properties()),
])
with self.assertRaises(datastore_errors.BadValueError):
_gen_request_slices(
created_ts=now,
task_slices=[
task_request.TaskSlice(
expiration_secs=task_request.MAX_EXPIRATION_SECS + 1,
properties=_gen_properties()),
])
_gen_request_slices(
created_ts=now,
task_slices=[
task_request.TaskSlice(
expiration_secs=task_request._MIN_TIMEOUT_SECS,
properties=_gen_properties()),
]).put()
_gen_request_slices(
created_ts=now,
task_slices=[
task_request.TaskSlice(
expiration_secs=task_request.MAX_EXPIRATION_SECS,
properties=_gen_properties()),
]).put()
def test_request_bad_inputs_ref(self):
# Both command and inputs_ref.isolated.
_gen_request(
properties=_gen_properties(
command=['python'],
inputs_ref=task_request.FilesRef(
isolated='deadbeefdeadbeefdeadbeefdeadbeefdeadbeef',
isolatedserver='http://localhost:1',
namespace='default-gzip'))).put()
# Bad digest.
req = _gen_request(
properties=_gen_properties(
command=['see', 'spot', 'run'],
inputs_ref=task_request.FilesRef(
isolated='deadbeef',
isolatedserver='http://localhost:1',
namespace='default-gzip')))
with self.assertRaises(datastore_errors.BadValueError):
req.put()
# inputs_ref without server/namespace.
req = _gen_request(
properties=_gen_properties(inputs_ref=task_request.FilesRef()))
with self.assertRaises(datastore_errors.BadValueError):
req.put()
# Without digest nor command.
req = _gen_request(
properties=_gen_properties(
command=[],
inputs_ref=task_request.FilesRef(
isolatedserver='https://isolateserver.appspot.com',
namespace='default-gzip^^^')))
with self.assertRaises(datastore_errors.BadValueError):
req.put()
# For 'sha256-GCP', the length must be 64.
req = _gen_request(
properties=_gen_properties(
command=[],
inputs_ref=task_request.FilesRef(
isolated='deadbeefdeadbeefdeadbeefdeadbeefdeadbeef',
isolatedserver='foo-bar',
namespace='sha256-GCP')))
with self.assertRaises(datastore_errors.BadValueError):
req.put()
# For 'sha256-GCP', the isolatedserver value must not contain '://'.
req = _gen_request(
properties=_gen_properties(
command=[],
inputs_ref=task_request.FilesRef(
isolated='dead' * (64 / 4),
isolatedserver='foo://bar',
namespace='sha256-GCP')))
with self.assertRaises(datastore_errors.BadValueError):
req.put()
def test_request_bad_cas_input_root(self):
def _gen_request_with_cas_input_root(cas_instance, digest):
return _gen_request(
properties=_gen_properties(
inputs_ref=None, # inputs_ref can't be set with cas_input_root.
cas_input_root=task_request.CASReference(
cas_instance=cas_instance, digest=digest)))
valid_cas_instance = 'projects/test/instances/default'
valid_digest = task_request.Digest(hash='12345', size_bytes=1)
# TaskRequest with a valid cas_input_root.
_gen_request_with_cas_input_root(
cas_instance=valid_cas_instance, digest=valid_digest).put()
# Missing cas_instance.
with self.assertRaises(datastore_errors.BadValueError):
_gen_request_with_cas_input_root(
cas_instance=None, digest=valid_digest).put()
# Invalid cas_instance.
with self.assertRaises(datastore_errors.BadValueError):
_gen_request_with_cas_input_root(
cas_instance='invalid_instance_name', digest=valid_digest).put()
# Missing digest.
with self.assertRaises(datastore_errors.BadValueError):
_gen_request_with_cas_input_root(
cas_instance=valid_cas_instance, digest=None).put()
# Missing digest.hash.
with self.assertRaises(datastore_errors.BadValueError):
_gen_request_with_cas_input_root(
cas_instance=valid_cas_instance,
digest=task_request.Digest(hash=None, size_bytes=1)).put()
# Missing digest.size_bytes.
with self.assertRaises(datastore_errors.BadValueError):
_gen_request_with_cas_input_root(
cas_instance=valid_cas_instance,
digest=task_request.Digest(hash='12345', size_bytes=None)).put()
def test_request_conflict_inputs(self):
req = _gen_request(
properties=_gen_properties(
inputs_ref=task_request.FilesRef(
isolated='0123456789012345678901234567890123456789',
isolatedserver=u'https://isolateserver.appspot.com',
namespace=u'default-gzip'),
cas_input_root=task_request.CASReference(
cas_instance='projects/test/instances/default',
digest=task_request.Digest(hash='12345', size_bytes=1)),
))
with self.assertRaises(datastore_errors.BadValueError) as e:
req.put()
self.assertEqual(e.exception.message,
"can't set both inputs_ref and cas_input_root")
def test_request_bad_pubsub(self):
_gen_request(pubsub_topic=u'projects/a/topics/abc').put()
with self.assertRaises(datastore_errors.BadValueError):
_gen_request(pubsub_topic=u'a')
with self.assertRaises(datastore_errors.BadValueError):
_gen_request(pubsub_topic=u'projects/a/topics/ab').put()
_gen_request(pubsub_topic=u'projects/' + u'a' * 1004 + u'/topics/abc').put()
with self.assertRaises(datastore_errors.BadValueError):
_gen_request(pubsub_topic=u'projects/' + u'a' * 1005 +
u'/topics/abc').put()
def test_request_bad_service_account(self):
_gen_request(service_account=u'none').put()
_gen_request(service_account=u'bot').put()
_gen_request(service_account=u'joe@localhost').put()
with self.assertRaises(datastore_errors.BadValueError):
_gen_request(service_account=u'joe').put()
_gen_request(service_account=u'joe@' + u'l' * 124).put()
with self.assertRaises(datastore_errors.BadValueError):
_gen_request(service_account=u'joe@' + u'l' * 125).put()
def test_request_bad_tags(self):
with self.assertRaises(datastore_errors.BadValueError):
_gen_request(manual_tags=['a']).put()
def test_request_bad_tags_too_many(self):
_gen_request(manual_tags=['a:b'] * 256).put()
with self.assertRaises(datastore_errors.BadValueError):
_gen_request(manual_tags=['a:b'] * 257).put()
def test_request_bad_tags_too_long(self):
# Minus 2 for the 'a:' prefix.
l = task_request._TAG_LENGTH - 2
_gen_request(manual_tags=['a:' + 'b' * l]).put()
with self.assertRaises(datastore_errors.BadValueError):
_gen_request(manual_tags=['a:' + 'a' * (l + 1)]).put()
def test_request_bad_realm(self):
_gen_request(realm=None).put()
_gen_request(realm='test:realm').put()
with self.assertRaises(datastore_errors.BadValueError):
_gen_request(realm='invalid_realm').put()
def test_resultdb_enable(self):
request = _gen_request(resultdb=task_request.ResultDBCfg(enable=True))
actual = swarming_pb2.TaskRequest()
request.to_proto(actual)
self.assertTrue(actual.resultdb.enable)
def test_execution_deadline(self):
self.mock_now(datetime.datetime(2020, 1, 2, 3, 4, 5))
request = _gen_request()
self.assertEqual(request.execution_deadline,
datetime.datetime(2020, 1, 2, 3, 5, 35))
def test_validate_priority(self):
with self.assertRaises(TypeError):
task_request.validate_priority(None)
with self.assertRaises(TypeError):
task_request.validate_priority('1')
with self.assertRaises(datastore_errors.BadValueError):
task_request.validate_priority(-1)
with self.assertRaises(datastore_errors.BadValueError):
task_request.validate_priority(task_request.MAXIMUM_PRIORITY + 1)
task_request.validate_priority(0)
task_request.validate_priority(1)
task_request.validate_priority(task_request.MAXIMUM_PRIORITY)
def test_datetime_to_request_base_id(self):
now = datetime.datetime(2012, 1, 2, 3, 4, 5, 123456)
self.assertEqual(0xeb5313d0300000,
task_request.datetime_to_request_base_id(now))
def test_convert_to_request_key(self):
"""Indirectly tested by API."""
now = datetime.datetime(2012, 1, 2, 3, 4, 5, 123456)
key = task_request.convert_to_request_key(now)
self.assertEqual(9157134072765480958, key.id())
def test_request_key_to_datetime(self):
key = ndb.Key(task_request.TaskRequest, 0x7f14acec2fcfffff)
# Resolution is only kept at millisecond level compared to
# datetime_to_request_base_id() by design.
self.assertEqual(
datetime.datetime(2012, 1, 2, 3, 4, 5, 123000),
task_request.request_key_to_datetime(key))
def test_request_id_to_key(self):
# Simple XOR.
self.assertEqual(
ndb.Key(task_request.TaskRequest, 0x7f14acec2fcfffff),
task_request.request_id_to_key(0xeb5313d0300000))
def test_secret_bytes(self):
task_request.SecretBytes(secret_bytes='a' * (20 * 1024)).put()
with self.assertRaises(datastore_errors.BadValueError):
task_request.SecretBytes(secret_bytes='a' * (20 * 1024 + 1)).put()
def test_cron_delete_old_task_requests(self):
# Creating 1000 tasks would make this test significantly slower.
self.mock(task_request, '_TASKS_DELETE_CHUNK_SIZE', 5)
now = utils.utcnow()
task_ids = []
for i in range(14):
self.mock_now(now, i)
request = _gen_request_slices()
request.key = task_request.new_request_key()
request.put()
task_ids.append(task_pack.pack_request_key(request.key))
# Use 11 seconds offset, so that entities 12, 13 are not deleted. Yet create
# 3 GAE tasks to delete the chunks limited at 5 items.
self.mock_now(now + task_request._OLD_TASK_REQUEST_CUT_OFF, 11)
self.assertEqual(12, task_request.cron_delete_old_task_requests())
expected = [
(
('/internal/taskqueue/cleanup/tasks/delete', 'delete-tasks'),
{
'payload': utils.encode_to_json({u'task_ids': task_ids[0:5]})
},
),
(
('/internal/taskqueue/cleanup/tasks/delete', 'delete-tasks'),
{
'payload': utils.encode_to_json({u'task_ids': task_ids[5:10]})
},
),
(
('/internal/taskqueue/cleanup/tasks/delete', 'delete-tasks'),
{
'payload': utils.encode_to_json({u'task_ids': task_ids[10:12]})
},
),
]
# task_ids[12:14] are not touched.
self.assertEqual(expected, self._enqueue_calls)
self._enqueue_calls = []
def test_task_delete_tasks(self):
# The data here should be the same as what is passed to the task queue in
# test_cron_delete_old_task_requests.
class Foo(ndb.Model):
pass
task_ids = []
for _ in range(5):
request = _gen_request_slices()
request.key = task_request.new_request_key()
request.put()
# Create a dummy child entity to ensure it's deleted too.
Foo(parent=request.key, id=1).put()
task_ids.append(task_pack.pack_request_key(request.key))
self.assertEqual(5, task_request.task_delete_tasks(task_ids))
self.assertEqual(0, task_request.TaskRequest.query().count())
self.assertEqual(0, Foo.query().count())
def test_task_bq_empty(self):
# Empty, nothing is done.
start = utils.utcnow()
end = start + datetime.timedelta(seconds=60)
self.assertEqual(0, task_request.task_bq(start, end))
def test_task_bq(self):
def getrandbits(i):
self.assertEqual(i, 16)
return 0x7766
self.mock(random, 'getrandbits', getrandbits)
payloads = []
def send_to_bq(table_name, rows):
self.assertEqual('task_requests', table_name)
payloads.append(rows)
self.mock(bq_state, 'send_to_bq', send_to_bq)
# Generate two tasks requests.
now = datetime.datetime(2014, 1, 2, 3, 4, 5, 6)
start = self.mock_now(now, 10)
request_1 = _gen_request()
request_1.key = task_request.new_request_key()
run_1_id = request_1.task_id[:-1] + '1'
request_1.put()
self.mock_now(now, 20)
request_2 = _gen_request(parent_task_id=run_1_id)
request_2.key = task_request.new_request_key()
request_2.put()
end = self.mock_now(now, 30)
self.assertEqual(2, task_request.task_bq(start, end))
self.assertEqual(1, len(payloads), payloads)
actual_rows = payloads[0]
self.assertEqual(2, len(actual_rows))
expected_ids = [
# No root IDs on task 1.
(request_1.task_id, '', ''),
# Task 1 is the root of Task 2.
(request_2.task_id, request_1.task_id, run_1_id),
]
self.assertEqual(
expected_ids,
[(t.task_id, t.root_task_id, t.root_run_id) for _, t in actual_rows])
def test_yield_request_keys_by_parent_task_id(self):
parent_request = _gen_request()
parent_request.key = task_request.new_request_key()
parent_request.put()
parent_summary_key = task_pack.request_key_to_result_summary_key(
parent_request.key)
parent_summary_id = task_pack.pack_result_summary_key(parent_summary_key)
parent_run_key = task_pack.result_summary_key_to_run_result_key(
parent_summary_key, 1)
parent_run_id = task_pack.pack_run_result_key(parent_run_key)
child_request_1_key = _gen_request(parent_task_id=parent_run_id).put()
child_request_2_key = _gen_request(parent_task_id=parent_run_id).put()
it = task_request.yield_request_keys_by_parent_task_id(parent_summary_id)
expected = [child_request_1_key, child_request_2_key]
self.assertEqual(sorted(expected), sorted([k for k in it]))
def test_normalize_or_dimensions(self):
dim1 = _gen_request(
properties=_gen_properties(dimensions={u'foo':
[u'a|c|b', u'xyz']})).task_slice(0).properties.dimensions
dim2 = _gen_request(
properties=_gen_properties(dimensions={u'foo':
[u'xyz', u'c|b|a']})).task_slice(0).properties.dimensions
expected = {u'foo': [u'a|b|c', u'xyz']}
self.assertEqual(dim1, expected)
self.assertEqual(dim1, dim2)
if __name__ == '__main__':
if '-v' in sys.argv:
unittest.TestCase.maxDiff = None
logging.basicConfig(
level=logging.DEBUG if '-v' in sys.argv else logging.ERROR)
unittest.main()
| apache-2.0 | -3,309,384,925,261,021,000 | 36.843257 | 80 | 0.60541 | false | 3.539186 | true | false | false |
Clemson-DPA/dpa-pipe | dpa/app/session.py | 1 | 9786 |
from abc import ABCMeta, abstractmethod, abstractproperty
import importlib
import os
import shlex
import subprocess
import socket
import time
import rpyc
from dpa.app.entity import EntityRegistry
from dpa.env.vars import DpaVars
from dpa.ptask.area import PTaskArea
from dpa.ptask import PTaskError, PTask
from dpa.singleton import Singleton
# -----------------------------------------------------------------------------
class SessionRegistry(Singleton):
# -------------------------------------------------------------------------
def init(self):
self._registry = {}
# -------------------------------------------------------------------------
def current(self):
for registered_cls in self._registry.values():
if registered_cls.current():
return registered_cls()
return None
# -------------------------------------------------------------------------
def register(self, cls):
self._registry[cls.app_name] = cls
# -----------------------------------------------------------------------------
class Session(object):
__metaclass__ = ABCMeta
app_name = None
# -------------------------------------------------------------------------
@classmethod
def current(cls):
return None
# -------------------------------------------------------------------------
def __init__(self):
pass
# -------------------------------------------------------------------------
@abstractmethod
def close(self):
"""Close the current file."""
# -------------------------------------------------------------------------
def list_entities(self, categories=None):
"""List entities in the session."""
entities = []
entity_classes = EntityRegistry().get_entity_classes(
self.__class__.app_name)
for entity_class in entity_classes:
entities.extend(entity_class.list(self))
if categories:
filtered = [e for e in entities if e.category in categories]
else:
filtered = entities
return filtered
# -------------------------------------------------------------------------
@classmethod
def open_file(self, filepath):
"""Open a new session with the supplied file."""
# -------------------------------------------------------------------------
@abstractmethod
def save(self, filepath=None):
"""Save the current session. Save to the file path if provided."""
# -------------------------------------------------------------------------
@abstractproperty
def in_session(self):
"""Returns True if inside a current app session."""
# -------------------------------------------------------------------------
def init_module(self, module_path):
_module = None
if self.in_session:
try:
_module = importlib.import_module(module_path)
except ImportError:
pass # will raise below
if not _module:
raise SessionError(
"Failed to initialize session. " + \
"'{mod}' module could not be imported.".format(mod=module_path)
)
return _module
# -------------------------------------------------------------------------
def require_executable(self, executable):
"""Returns the full path for the supplied executable name."""
(path, file_name) = os.path.split(executable)
# path already included
if path:
if not os.path.isfile(executable):
raise SessionError("Unable to locate executable: " + executable)
elif not os.access(executable, os.X_OK):
raise SessionError("File is not executable: " + executable)
else:
return executable
else:
bin_paths = DpaVars.path()
bin_paths.get()
for path in bin_paths.list:
executable_path = os.path.join(path, executable)
if (os.path.isfile(executable_path) and
os.access(executable_path, os.X_OK)):
return executable_path
raise SessionError("Unable to locate executable: " + executable)
# -------------------------------------------------------------------------
@property
def app_name(self):
return self.__class__.app_name
# -------------------------------------------------------------------------
@property
def ptask_area(self):
"""Return the current ptask area for this session."""
if not hasattr(self, '_ptask_area'):
self._ptask_area = PTaskArea.current()
return self._ptask_area
# -------------------------------------------------------------------------
@property
def ptask(self):
if not hasattr(self, '_ptask'):
ptask_area = self.ptask_area
if not ptask_area.spec:
self._ptask = None
else:
try:
self._ptask = PTask.get(ptask_area.spec)
except PTaskError as e:
raise SessionError("Unable to determine ptask.")
return self._ptask
# -------------------------------------------------------------------------
@property
def ptask_version(self):
"""Return the current ptask version for this session."""
if not hasattr(self, '_ptask_version'):
ptask = self.ptask
if not ptask:
self._ptask_version = None
else:
self._ptask_version = ptask.latest_version
return self._ptask_version
# -----------------------------------------------------------------------------
class RemoteMixin(object):
__metaclass__ = ABCMeta
# -------------------------------------------------------------------------
def __init__(self, remote=False):
self._remote = remote
# -------------------------------------------------------------------------
def __del__(self):
self.shutdown()
# -------------------------------------------------------------------------
def __enter__(self):
return self
# -------------------------------------------------------------------------
def __exit__(self, exc_type, exc_value, traceback):
self.shutdown()
# -------------------------------------------------------------------------
@staticmethod
def _get_port():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(("",0))
port = sock.getsockname()[1]
sock.close()
return port
# -------------------------------------------------------------------------
@property
def remote(self):
"""Returns True if in a session, False otherwise."""
return self._remote
# -------------------------------------------------------------------------
@property
def remote_connection(self):
if not hasattr(self, '_remote_connection'):
self._remote_connection = self._connect_remote()
return self._remote_connection
# -------------------------------------------------------------------------
@abstractproperty
def server_executable(self):
"""The executable for starting the remote app server."""
# -------------------------------------------------------------------------
def shutdown(self):
if hasattr(self, '_remote_connection'):
try:
self._remote_connection.root.shutdown()
except EOFError:
# this is the expected error on shutdown
pass
else:
self._remote_connection = None
# -------------------------------------------------------------------------
def init_module(self, module_path):
_module = None
if self.remote:
# need to give time for standalone app to import properly
tries = 0
while not _module or tries < 30:
try:
self.remote_connection.execute("import " + module_path)
_module = getattr(
self.remote_connection.modules, module_path)
break
except ImportError:
tries += 1
time.sleep(1)
if not _module:
self.shutdown()
elif self.in_session:
try:
_module = importlib.import_module(module_path)
except ImportError:
pass # will raise below
if not _module:
raise SessionError(
"Failed to initialize session. " + \
"'{mod}' module could not be imported.".format(mod=module_path)
)
return _module
# -------------------------------------------------------------------------
def _connect_remote(self):
port = self._get_port()
cmd = "{cmd} {port}".format(cmd=self.server_executable, port=port)
args = shlex.split(cmd)
subprocess.Popen(args)
connection = None
tries = 0
while not connection or tries < 30:
try:
connection = rpyc.classic.connect("localhost", port)
break
except socket.error:
tries += 1
time.sleep(1)
if not connection:
raise SessionError("Unable connect to remote session.")
return connection
# -----------------------------------------------------------------------------
class SessionError(Exception):
pass
| mit | 835,993,178,283,954,700 | 30.365385 | 80 | 0.41263 | false | 5.856373 | false | false | false |
h2non/paco | paco/wait.py | 1 | 3118 | # -*- coding: utf-8 -*-
import asyncio
from .assertions import isiter
from .concurrent import ConcurrentExecutor
@asyncio.coroutine
def wait(*coros_or_futures, limit=0, timeout=None, loop=None,
return_exceptions=False, return_when='ALL_COMPLETED'):
"""
Wait for the Futures and coroutine objects given by the sequence
futures to complete, with optional concurrency limit.
Coroutines will be wrapped in Tasks.
``timeout`` can be used to control the maximum number of seconds to
wait before returning. timeout can be an int or float.
If timeout is not specified or None, there is no limit to the wait time.
If ``return_exceptions`` is True, exceptions in the tasks are treated the
same as successful results, and gathered in the result list; otherwise,
the first raised exception will be immediately propagated to the
returned future.
``return_when`` indicates when this function should return.
It must be one of the following constants of the concurrent.futures module.
All futures must share the same event loop.
This functions is mostly compatible with Python standard
``asyncio.wait()``.
Arguments:
*coros_or_futures (iter|list):
an iterable collection yielding coroutines functions.
limit (int):
optional concurrency execution limit. Use ``0`` for no limit.
timeout (int/float):
maximum number of seconds to wait before returning.
return_exceptions (bool):
exceptions in the tasks are treated the same as successful results,
instead of raising them.
return_when (str):
indicates when this function should return.
loop (asyncio.BaseEventLoop):
optional event loop to use.
*args (mixed):
optional variadic argument to pass to the coroutines function.
Returns:
tuple: Returns two sets of Future: (done, pending).
Raises:
TypeError: in case of invalid coroutine object.
ValueError: in case of empty set of coroutines or futures.
TimeoutError: if execution takes more than expected.
Usage::
async def sum(x, y):
return x + y
done, pending = await paco.wait(
sum(1, 2),
sum(3, 4))
[task.result() for task in done]
# => [3, 7]
"""
# Support iterable as first argument for better interoperability
if len(coros_or_futures) == 1 and isiter(coros_or_futures[0]):
coros_or_futures = coros_or_futures[0]
# If no coroutines to schedule, return empty list
# Mimics asyncio behaviour.
if len(coros_or_futures) == 0:
raise ValueError('paco: set of coroutines/futures is empty')
# Create concurrent executor
pool = ConcurrentExecutor(limit=limit, loop=loop,
coros=coros_or_futures)
# Wait until all the tasks finishes
return (yield from pool.run(timeout=timeout,
return_when=return_when,
return_exceptions=return_exceptions))
| mit | 1,045,240,907,255,369,900 | 36.119048 | 79 | 0.650096 | false | 4.479885 | false | false | false |
akopich/spark | python/pyspark/java_gateway.py | 56 | 5851 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import atexit
import os
import sys
import select
import signal
import shlex
import socket
import platform
from subprocess import Popen, PIPE
if sys.version >= '3':
xrange = range
from py4j.java_gateway import java_import, JavaGateway, GatewayClient
from pyspark.find_spark_home import _find_spark_home
from pyspark.serializers import read_int
def launch_gateway(conf=None):
"""
launch jvm gateway
:param conf: spark configuration passed to spark-submit
:return:
"""
if "PYSPARK_GATEWAY_PORT" in os.environ:
gateway_port = int(os.environ["PYSPARK_GATEWAY_PORT"])
else:
SPARK_HOME = _find_spark_home()
# Launch the Py4j gateway using Spark's run command so that we pick up the
# proper classpath and settings from spark-env.sh
on_windows = platform.system() == "Windows"
script = "./bin/spark-submit.cmd" if on_windows else "./bin/spark-submit"
command = [os.path.join(SPARK_HOME, script)]
if conf:
for k, v in conf.getAll():
command += ['--conf', '%s=%s' % (k, v)]
submit_args = os.environ.get("PYSPARK_SUBMIT_ARGS", "pyspark-shell")
if os.environ.get("SPARK_TESTING"):
submit_args = ' '.join([
"--conf spark.ui.enabled=false",
submit_args
])
command = command + shlex.split(submit_args)
# Start a socket that will be used by PythonGatewayServer to communicate its port to us
callback_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
callback_socket.bind(('127.0.0.1', 0))
callback_socket.listen(1)
callback_host, callback_port = callback_socket.getsockname()
env = dict(os.environ)
env['_PYSPARK_DRIVER_CALLBACK_HOST'] = callback_host
env['_PYSPARK_DRIVER_CALLBACK_PORT'] = str(callback_port)
# Launch the Java gateway.
# We open a pipe to stdin so that the Java gateway can die when the pipe is broken
if not on_windows:
# Don't send ctrl-c / SIGINT to the Java gateway:
def preexec_func():
signal.signal(signal.SIGINT, signal.SIG_IGN)
proc = Popen(command, stdin=PIPE, preexec_fn=preexec_func, env=env)
else:
# preexec_fn not supported on Windows
proc = Popen(command, stdin=PIPE, env=env)
gateway_port = None
# We use select() here in order to avoid blocking indefinitely if the subprocess dies
# before connecting
while gateway_port is None and proc.poll() is None:
timeout = 1 # (seconds)
readable, _, _ = select.select([callback_socket], [], [], timeout)
if callback_socket in readable:
gateway_connection = callback_socket.accept()[0]
# Determine which ephemeral port the server started on:
gateway_port = read_int(gateway_connection.makefile(mode="rb"))
gateway_connection.close()
callback_socket.close()
if gateway_port is None:
raise Exception("Java gateway process exited before sending the driver its port number")
# In Windows, ensure the Java child processes do not linger after Python has exited.
# In UNIX-based systems, the child process can kill itself on broken pipe (i.e. when
# the parent process' stdin sends an EOF). In Windows, however, this is not possible
# because java.lang.Process reads directly from the parent process' stdin, contending
# with any opportunity to read an EOF from the parent. Note that this is only best
# effort and will not take effect if the python process is violently terminated.
if on_windows:
# In Windows, the child process here is "spark-submit.cmd", not the JVM itself
# (because the UNIX "exec" command is not available). This means we cannot simply
# call proc.kill(), which kills only the "spark-submit.cmd" process but not the
# JVMs. Instead, we use "taskkill" with the tree-kill option "/t" to terminate all
# child processes in the tree (http://technet.microsoft.com/en-us/library/bb491009.aspx)
def killChild():
Popen(["cmd", "/c", "taskkill", "/f", "/t", "/pid", str(proc.pid)])
atexit.register(killChild)
# Connect to the gateway
gateway = JavaGateway(GatewayClient(port=gateway_port), auto_convert=True)
# Import the classes used by PySpark
java_import(gateway.jvm, "org.apache.spark.SparkConf")
java_import(gateway.jvm, "org.apache.spark.api.java.*")
java_import(gateway.jvm, "org.apache.spark.api.python.*")
java_import(gateway.jvm, "org.apache.spark.ml.python.*")
java_import(gateway.jvm, "org.apache.spark.mllib.api.python.*")
# TODO(davies): move into sql
java_import(gateway.jvm, "org.apache.spark.sql.*")
java_import(gateway.jvm, "org.apache.spark.sql.hive.*")
java_import(gateway.jvm, "scala.Tuple2")
return gateway
| apache-2.0 | 2,350,691,943,521,753,000 | 45.070866 | 100 | 0.655785 | false | 4.002052 | false | false | false |
MatriX-Coder/wsploit | modules/wjoomlacomponents.py | 1 | 8322 | #!/usr/bin/python
# Wsploit Project
'''
this is simple joomla
components scanner
'''
try:
import urllib2, Queue
except:
print 'You need urllib2 and Queue librarys installed.'
try:
from threading import Thread
except:
print 'You need threading library installed.'
try:
from time import sleep
except:
print 'You need time library installed.'
paths = [
'/components/com_tag',
'/components/com_virtuemart',
'/components/com_jvehicles',
'/components/com_s5clanroster',
'/components/com_fireboard',
'/components/com_fabrik',
'/components/com_jinc',
'/components/com_xcloner-backupandrestore',
'/components/com_dshop',
'/components/com_ponygallery',
'/components/com_bearleague',
'/components/com_obsuggest',
'/components/com_alameda',
'/components/com_estateagent',
'/components/com_collector',
'/components/com_qcontacts',
'/components/com_niceajaxpoll',
'/components/com_xmap',
'/components/com_team',
'/components/com_joomnik',
'/components/com_question',
'/components/com_jmsfileseller',
'/components/com_rsfiles',
'/components/com_versioning',
'/components/com_hello',
'/components/com_calcbuilder',
'/components/com_jmsfileseller',
'/components/com_xmovie',
'/components/com_people',
'/components/com_idoblog',
'/components/com_adsmanager',
'/components/com_xgallery',
'/components/com_alfurqan15x',
'/components/com_alfurqan',
'/components/com_billyportfolio',
'/components/com_jimtawl',
'/components/com_content',
'/components/com_jfuploader',
'/components/com_kunena',
'/components/com_jooproperty',
'/components/com_jsupport',
'/components/com_markt',
'/components/com_img',
'/components/com_clanlist',
'/components/com_clan',
'/components/com_ckforms',
'/components/com_dcnews',
'/components/com_connect',
'/components/com_rsappt_pro2',
'/components/com_techfolio',
'/components/com_zcalendar',
'/components/com_tpjobs',
'/components/com_simpleshop',
'/components/com_sef',
'/components/com_searchlog',
'/components/com_contact',
'/components/com_enmasse',
'/components/com_elite_experts',
'/components/com_ezautos',
'/components/com_jgen',
'/components/com_jphone',
'/components/com_mosets',
'/components/com_jefaqpro',
'/components/com_picsell',
'/components/com_ongallery',
'/components/com_equipment',
'/components/com_zoomportfolio',
'/components/com_amblog',
'/components/com_joltcard',
'/components/com_jp_jobs',
'/components/com_bfquiztrial',
'/components/com_qpersonel',
'/components/com_pandafminigames',
'/components/com_golfcourseguid',
'/components/com_jejob',
'/components/com_jeajaxeventcalendar',
'/components/com_jradio',
'/components/com_spidercatalog',
'/components/com_/components/commedia',
'/components/com_fss',
'/components/com_icagenda',
'/components/com_spidercalendar',
'/components/com_joomgalaxy',
'/components/com_ornekek',
'/components/com_weblinks',
'/components/com_rokmodule',
'/components/com_discussions',
'/components/com_hm/components/community',
'/components/com_eslamiat',
'/components/com_listing',
'/components/com_jeemasms',
'/components/com_yjcontactus',
'/components/com_timereturns',
'/components/com_jce',
'/components/com_joomtouch',
'/components/com_jdirectory',
'/components/com_jesubmit',
'/components/com_sobi2',
'/components/com_acooldebate',
'/components/com_booklibrary',
'/components/com_acymailing',
'/components/com_doqment',
'/components/com_allcinevid',
'/components/com_jotloader',
'/components/com_jeauto',
'/components/com_ccboard',
'/components/com_ccinvoices',
'/components/com_flipwall',
'/components/com_sponsorwall',
'/components/com_cbe',
'/components/com_jscalendar',
'/components/com_restaurantguide',
'/components/com_nkc',
'/components/com_aardvertiser',
'/components/com_clantools',
'/components/com_remository',
'/components/com_dateconverter',
'/components/com_wmtpic',
'/components/com_donateprocess',
'/components/com_gamesbox',
'/components/com_jcafe',
'/components/com_awd_song',
'/components/com_picasa2gallery',
'/components/com_ybggal',
'/components/com_joomdocs',
'/components/com_answers',
'/components/com_galleryxml',
'/components/com_oziogallery2',
'/components/com_listbingo',
'/components/com_easygb',
'/components/com_jtickets',
'/components/com_jesectionfinder',
'/components/com_realtyna',
'/components/com_/components/community',
'/components/com_jomestate',
'/components/com_jtickets',
'/components/com_cinema',
'/components/com_jstore',
'/components/com_annonces',
'/components/com_lead',
'/components/com_sar_news',
'/components/com_chronocontact',
'/components/com_chronoconnectivity',
'/components/com_djartgallery',
'/components/com_quran',
'/components/com_g2bridge',
'/components/com_reservations',
'/components/com_jepoll',
'/components/com_mycar',
'/components/com_mediqna',
'/components/com_zelig',
'/components/com_bookmarks',
'/components/com_hotproperty',
'/components/com_jombib',
'/components/com_store',
'/components/com_mosforms',
'/components/com_/components/comprofiler',
'/components/com_crowdsource',
'/components/com_camp',
'/components/com_ms/components/comment',
'/components/com_extcalendar',
'/components/com_imoti',
'/components/com_product',
'/components/com_event',
'/components/com_simpledownload',
'/components/com_news',
'/components/com_article',
'/components/com_jequoteform',
'/components/com_konsultasi',
'/components/com_sebercart',
'/components/com_php',
'/components/com_jinc',
'/components/com_mytube',
'/components/com_jbudgetsmagic',
'/components/com_surveymanager',
'/components/com_jreservation',
'/components/com_foobla_suggestions',
'/components/com_djcatalog',
'/components/com_turtushout',
'/components/com_alphauserpoints',
'/components/com_lucygames',
'/components/com_bfsurvey_profree',
'/components/com_tpdugg',
'/components/com_joomloc',
'/components/com_joomlub',
'/components/com_artportal',
'/components/com_agora',
'/components/com_gameserver',
'/components/com_digifolio',
'/components/com_bca-rss-syndicator',
'/components/com_expose',
'/components/com_equotes',
'/components/com_media',
'/components/com_misterestate',
'/components/com_wrapper',
'/components/com_mailto',
'/components/com_autartimonial',
'/components/com_artforms',
'/components/com_redshop',
'/components/com_staticxt',
'/components/com_spa',
'/components/com_jomtube',
'/components/com_golfcourseguide',
'/components/com_huruhelpdesk',
'/components/com_joomdle',
'/components/com_youtube',
'/components/com_joomla-visites',
'/components/com_ttvideo',
'/components/com_appointinator',
'/components/com_photomapgallery',
'/components/com_spielothek',
'/components/com_pbbooking',
'/components/com_beamospetition',
'/components/com_neorecruit',
'/components/com_cgtestimonial',
'/components/com_jgrid',
'/components/com_zina',
'/components/com_pro_desk',
'/components/com_user',
'/components/com_k2',
'/components/com_rsbook_15',
'/components/com_gk3_photoslide',
'/components/com_jvideodirect',
'/components/com_jcalpro',
'/components/com_banners',
'/components/com_datsogallery',
'/components/com_joomradio',
'/components/com_jfbconnect',
'/components/com_myblog',
'/components/com_phocamaps',
'/components/com_contact_enhanced',
'/components/com_aicontactsafe',
'/components/com_poll']
def one():
def test(target,path):
if 'http://' not in target:
target = 'http://'+target
bb = target+path
try:
a = urllib2.urlopen(bb)
c = a.getcode()
if c == 200:
print 'Found ---> '+path[12:]
except urllib2.URLError:
pass
thrdlst = []
target = raw_input('\nEnter site : ')
for path in paths:
t = Thread(target=test , args=(target,path))
t.start()
thrdlst.append(t)
sleep(0.009)
for b in thrdlst:
b.join()
def lista():
path = raw_input('\nEnter List Path : ')
sites = open(path,'r')
sites = sites.readlines()
print '\n'
for site in sites:
if 'http://' not in site:
site = 'http://'+site
site = site.strip()
print '\n[*] Target : %s\n' % site
for path in paths:
bb = site+path
try:
a = urllib2.urlopen(bb)
c = a.getcode()
if c == 200:
print 'Found ---> '+path[12:]
except urllib2.URLError:
pass
def init():
print '\n[1]-Single URL'
print '[2]-List Of URLs\n'
line_1 = "Enter Option : "
choose = raw_input(line_1)
if choose.isdigit():
choose = int(choose)
pass
else :
print "Choose From List Bro"
exit()
if choose == 1:
one()
if choose == 2:
lista()
| artistic-2.0 | 5,440,260,959,330,500,000 | 24.685185 | 55 | 0.727469 | false | 2.923077 | false | false | false |
anentropic/django-oscar | tests/integration/offer/availability_tests.py | 40 | 1143 | from django.test import TestCase
from oscar.apps.offer import models
from oscar.test.factories import (
create_order, OrderDiscountFactory, UserFactory)
class TestAPerUserConditionalOffer(TestCase):
def setUp(self):
self.offer = models.ConditionalOffer(max_user_applications=1)
self.user = UserFactory()
def test_is_available_with_no_applications(self):
self.assertTrue(self.offer.is_available())
def test_max_applications_is_correct_when_no_applications(self):
self.assertEqual(1, self.offer.get_max_applications(self.user))
def test_max_applications_is_correct_when_equal_applications(self):
order = create_order(user=self.user)
OrderDiscountFactory(
order=order, offer_id=self.offer.id, frequency=1)
self.assertEqual(0, self.offer.get_max_applications(self.user))
def test_max_applications_is_correct_when_more_applications(self):
order = create_order(user=self.user)
OrderDiscountFactory(
order=order, offer_id=self.offer.id, frequency=5)
self.assertEqual(0, self.offer.get_max_applications(self.user))
| bsd-3-clause | -853,793,773,920,993,200 | 37.1 | 71 | 0.712161 | false | 3.675241 | true | false | false |
uskudnik/ggrc-core | src/ggrc/migrations/versions/20130910232450_53ef72c8a867_remove_old_audit_tab.py | 2 | 13296 | """Remove old audit tables
Revision ID: 53ef72c8a867
Revises: 526117e15ce4
Create Date: 2013-09-10 23:24:50.751098
"""
# revision identifiers, used by Alembic.
revision = '53ef72c8a867'
down_revision = '526117e15ce4'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
NOT_NULL_COLS = [
('control_assessments', 'pbc_list_id'),
('control_assessments', 'control_id'),
('system_controls', 'system_id'),
('system_controls', 'control_id'),
('responses', 'request_id'),
('responses', 'system_id'),
]
EXPLICIT_INDEXES = [
('control_assessments', 'control_id', 'controls', 'control_assessments_ibfk_1'),
('control_assessments', 'pbc_list_id', 'pbc_lists', 'control_assessments_ibfk_2'),
('system_controls', 'system_id', 'systems', 'system_controls_ibfk_3'),
('system_controls', 'control_id', 'controls', 'system_controls_ibfk_1'),
('responses', 'request_id', 'requests', 'responses_ibfk_1'),
('responses', 'system_id', 'systems', 'responses_ibfk_2'),
]
UNIQUE_CONSTRAINTS = [('control_assessments', ['pbc_list_id', 'control_id']),
('system_controls', ['system_id', 'control_id']),
('responses', ['request_id', 'system_id']),
]
def create_explicit_index(table, column, referred_table, constraint_name):
" Explicit indexes need to be created to work around http://bugs.mysql.com/bug.php?id=21395 "
op.drop_constraint(constraint_name, table, type_='foreignkey')
op.create_index('ix_' + column, table, [column])
op.create_foreign_key(constraint_name, table, referred_table, [column], ['id'])
def drop_explicit_index(table, column, referred_table, constraint_name):
op.drop_constraint(constraint_name, table, type_='foreignkey')
op.drop_index('ix_' + column, table)
op.create_foreign_key(constraint_name, table, referred_table, [column], ['id'])
def upgrade():
op.drop_table(u'system_controls')
op.drop_table(u'meetings')
op.drop_table(u'population_samples')
op.drop_table(u'responses')
op.drop_table(u'requests')
op.drop_table(u'control_assessments')
op.drop_table(u'pbc_lists')
op.drop_table(u'cycles')
op.drop_table(u'transactions')
def downgrade():
op.create_table(u'transactions',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'modified_by_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column(u'created_at', sa.DATETIME(), nullable=True),
sa.Column(u'updated_at', sa.DATETIME(), nullable=True),
sa.Column(u'description', sa.TEXT(), nullable=True),
sa.Column(u'title', sa.VARCHAR(length=250), nullable=True),
sa.Column(u'system_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column(u'context_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['context_id'], [u'contexts.id'], name=u'fk_transactions_contexts'),
sa.ForeignKeyConstraint(['system_id'], [u'systems.id'], name=u'transactions_ibfk_1'),
sa.PrimaryKeyConstraint(u'id'),
mysql_default_charset=u'utf8',
mysql_engine=u'InnoDB'
)
op.create_table(u'cycles',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'modified_by_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column(u'created_at', sa.DATETIME(), nullable=True),
sa.Column(u'updated_at', sa.DATETIME(), nullable=True),
sa.Column(u'description', sa.TEXT(), nullable=True),
sa.Column(u'start_at', sa.DATE(), nullable=True),
sa.Column(u'complete', sa.BOOLEAN, nullable=False),
sa.Column(u'title', sa.VARCHAR(length=250), nullable=True),
sa.Column(u'audit_firm', sa.VARCHAR(length=250), nullable=True),
sa.Column(u'audit_lead', sa.VARCHAR(length=250), nullable=True),
sa.Column(u'status', sa.VARCHAR(length=250), nullable=True),
sa.Column(u'notes', sa.TEXT(), nullable=True),
sa.Column(u'end_at', sa.DATE(), nullable=True),
sa.Column(u'program_id', sa.INTEGER(), autoincrement=False, nullable=False),
sa.Column(u'report_due_at', sa.DATE(), nullable=True),
sa.Column(u'context_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['context_id'], [u'contexts.id'], name=u'fk_cycles_contexts'),
sa.ForeignKeyConstraint(['program_id'], [u'programs.id'], name=u'cycles_ibfk_1'),
sa.PrimaryKeyConstraint(u'id'),
mysql_default_charset=u'utf8',
mysql_engine=u'InnoDB'
)
op.create_table(u'pbc_lists',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'modified_by_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column(u'created_at', sa.DATETIME(), nullable=True),
sa.Column(u'updated_at', sa.DATETIME(), nullable=True),
sa.Column(u'audit_cycle_id', sa.INTEGER(), autoincrement=False, nullable=False),
sa.Column(u'context_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['audit_cycle_id'], [u'cycles.id'], name=u'pbc_lists_ibfk_1'),
sa.ForeignKeyConstraint(['context_id'], [u'contexts.id'], name=u'fk_pbc_lists_contexts'),
sa.PrimaryKeyConstraint(u'id'),
mysql_default_charset=u'utf8',
mysql_engine=u'InnoDB'
)
op.create_table(u'control_assessments',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'modified_by_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column(u'created_at', sa.DATETIME(), nullable=True),
sa.Column(u'updated_at', sa.DATETIME(), nullable=True),
sa.Column(u'pbc_list_id', sa.INTEGER(), autoincrement=False, nullable=False),
sa.Column(u'control_id', sa.INTEGER(), autoincrement=False, nullable=False),
sa.Column(u'control_version', sa.VARCHAR(length=250), nullable=True),
sa.Column(u'internal_tod', sa.BOOLEAN, nullable=True),
sa.Column(u'internal_toe', sa.BOOLEAN, nullable=True),
sa.Column(u'external_tod', sa.BOOLEAN, nullable=True),
sa.Column(u'external_toe', sa.BOOLEAN, nullable=True),
sa.Column(u'notes', sa.TEXT(), nullable=True),
sa.Column(u'context_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['context_id'], [u'contexts.id'], name=u'fk_control_assessments_contexts'),
sa.ForeignKeyConstraint(['control_id'], [u'controls.id'], name=u'control_assessments_ibfk_1'),
sa.ForeignKeyConstraint(['pbc_list_id'], [u'pbc_lists.id'], name=u'control_assessments_ibfk_2'),
sa.PrimaryKeyConstraint(u'id'),
mysql_default_charset=u'utf8',
mysql_engine=u'InnoDB'
)
op.create_table(u'requests',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'modified_by_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column(u'created_at', sa.DATETIME(), nullable=True),
sa.Column(u'updated_at', sa.DATETIME(), nullable=True),
sa.Column(u'pbc_list_id', sa.INTEGER(), autoincrement=False, nullable=False),
sa.Column(u'type_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column(u'pbc_control_code', sa.VARCHAR(length=250), nullable=True),
sa.Column(u'pbc_control_desc', sa.TEXT(), nullable=True),
sa.Column(u'request', sa.TEXT(), nullable=True),
sa.Column(u'test', sa.TEXT(), nullable=True),
sa.Column(u'notes', sa.TEXT(), nullable=True),
sa.Column(u'company_responsible', sa.VARCHAR(length=250), nullable=True),
sa.Column(u'auditor_responsible', sa.VARCHAR(length=250), nullable=True),
sa.Column(u'date_requested', sa.DATETIME(), nullable=True),
sa.Column(u'status', sa.VARCHAR(length=250), nullable=True),
sa.Column(u'control_assessment_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column(u'response_due_at', sa.DATE(), nullable=True),
sa.Column(u'context_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['context_id'], [u'contexts.id'], name=u'fk_requests_contexts'),
sa.ForeignKeyConstraint(['control_assessment_id'], [u'control_assessments.id'], name=u'requests_ibfk_1'),
sa.ForeignKeyConstraint(['pbc_list_id'], [u'pbc_lists.id'], name=u'requests_ibfk_2'),
sa.PrimaryKeyConstraint(u'id'),
mysql_default_charset=u'utf8',
mysql_engine=u'InnoDB'
)
op.create_table(u'responses',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'modified_by_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column(u'created_at', sa.DATETIME(), nullable=True),
sa.Column(u'updated_at', sa.DATETIME(), nullable=True),
sa.Column(u'request_id', sa.INTEGER(), autoincrement=False, nullable=False),
sa.Column(u'system_id', sa.INTEGER(), autoincrement=False, nullable=False),
sa.Column(u'status', sa.VARCHAR(length=250), nullable=True),
sa.Column(u'context_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['context_id'], [u'contexts.id'], name=u'fk_responses_contexts'),
sa.ForeignKeyConstraint(['request_id'], [u'requests.id'], name=u'responses_ibfk_1'),
sa.ForeignKeyConstraint(['system_id'], [u'systems.id'], name=u'responses_ibfk_2'),
sa.PrimaryKeyConstraint(u'id'),
mysql_default_charset=u'utf8',
mysql_engine=u'InnoDB'
)
op.create_table(u'population_samples',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'modified_by_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column(u'created_at', sa.DATETIME(), nullable=True),
sa.Column(u'updated_at', sa.DATETIME(), nullable=True),
sa.Column(u'response_id', sa.INTEGER(), autoincrement=False, nullable=False),
sa.Column(u'population_document_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column(u'population', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column(u'sample_worksheet_document_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column(u'samples', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column(u'sample_evidence_document_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column(u'context_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['context_id'], [u'contexts.id'], name=u'fk_population_samples_contexts'),
sa.ForeignKeyConstraint(['population_document_id'], [u'documents.id'], name=u'population_samples_ibfk_1'),
sa.ForeignKeyConstraint(['response_id'], [u'responses.id'], name=u'population_samples_ibfk_2'),
sa.ForeignKeyConstraint(['sample_evidence_document_id'], [u'documents.id'], name=u'population_samples_ibfk_3'),
sa.ForeignKeyConstraint(['sample_worksheet_document_id'], [u'documents.id'], name=u'population_samples_ibfk_4'),
sa.PrimaryKeyConstraint(u'id'),
mysql_default_charset=u'utf8',
mysql_engine=u'InnoDB'
)
op.create_table(u'meetings',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'modified_by_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column(u'created_at', sa.DATETIME(), nullable=True),
sa.Column(u'updated_at', sa.DATETIME(), nullable=True),
sa.Column(u'response_id', sa.INTEGER(), autoincrement=False, nullable=False),
sa.Column(u'start_at', sa.DATETIME(), nullable=True),
sa.Column(u'calendar_url', sa.VARCHAR(length=250), nullable=True),
sa.Column(u'context_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['context_id'], [u'contexts.id'], name=u'fk_meetings_contexts'),
sa.ForeignKeyConstraint(['response_id'], [u'responses.id'], name=u'meetings_ibfk_1'),
sa.PrimaryKeyConstraint(u'id'),
mysql_default_charset=u'utf8',
mysql_engine=u'InnoDB'
)
op.create_table(u'system_controls',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'modified_by_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column(u'created_at', sa.DATETIME(), nullable=True),
sa.Column(u'updated_at', sa.DATETIME(), nullable=True),
sa.Column(u'system_id', sa.INTEGER(), autoincrement=False, nullable=False),
sa.Column(u'control_id', sa.INTEGER(), autoincrement=False, nullable=False),
sa.Column(u'state', sa.INTEGER(), autoincrement=False, nullable=False),
sa.Column(u'cycle_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column(u'context_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['context_id'], [u'contexts.id'], name=u'fk_system_controls_contexts'),
sa.ForeignKeyConstraint(['control_id'], [u'controls.id'], name=u'system_controls_ibfk_1'),
sa.ForeignKeyConstraint(['cycle_id'], [u'cycles.id'], name=u'system_controls_ibfk_2'),
sa.ForeignKeyConstraint(['system_id'], [u'systems.id'], name=u'system_controls_ibfk_3'),
sa.PrimaryKeyConstraint(u'id'),
mysql_default_charset=u'utf8',
mysql_engine=u'InnoDB'
)
for table, column in NOT_NULL_COLS:
op.alter_column(table, column, nullable=False, existing_type = sa.INTEGER)
for table, column, referred_table, constraint_name in EXPLICIT_INDEXES:
create_explicit_index(table, column, referred_table, constraint_name)
for table, columns in UNIQUE_CONSTRAINTS:
op.create_unique_constraint('uq_' + table, table, columns)
| apache-2.0 | -4,135,967,233,158,390,000 | 54.865546 | 116 | 0.675842 | false | 3.203084 | false | false | false |
ryokochang/Slab-GCS | ExtLibs/Mavlink/pymavlink/generator/lib/genxmlif/xmlifApi.py | 12 | 54395 | #
# genxmlif, Release 0.9.0
# file: xmlifapi.py
#
# API (interface) classes for generic interface package
#
# history:
# 2007-06-29 rl created, classes extracted from xmlifbase.py
#
# Copyright (c) 2005-2008 by Roland Leuthe. All rights reserved.
#
# --------------------------------------------------------------------
# The generic XML interface is
#
# Copyright (c) 2005-2008 by Roland Leuthe
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
__author__ = "Roland Leuthe <[email protected]>"
__date__ = "08. August 2008"
__version__ = "0.9.0"
import string
import os
import re
import copy
from types import TupleType, StringTypes
from xml.dom import EMPTY_PREFIX, EMPTY_NAMESPACE
from xmlifUtils import processWhitespaceAction, NsNameTupleFactory, splitQName, nsNameToQName, escapeCdata, escapeAttribute
########################################
# XML interface base class
# All not implemented methods have to be overloaded by the derived class!!
#
class XmlInterfaceBase:
"""XML interface base class.
All not implemented methods have to be overloaded by the derived class!!
"""
def __init__(self, verbose, useCaching, processXInclude):
"""Constructor of class XmlInterfaceBase.
Input parameter:
'verbose': 0 or 1: controls verbose print output for module genxmlif
'useCaching': 0 or 1: controls usage of caching for module genxmlif
'processXInclude': 0 or 1: controls XInclude processing during parsing
"""
self.verbose = verbose
self.useCaching = useCaching
self.processXInclude = processXInclude
# set default wrapper classes
self.setTreeWrapperClass (XmlTreeWrapper)
self.setElementWrapperClass (XmlElementWrapper)
def createXmlTree (self, namespace, xmlRootTagName, attributeDict={}, publicId=None, systemId=None):
"""Create a new XML TreeWrapper object (wrapper for DOM document or elementtree).
Input parameter:
'namespace': not yet handled (for future use)
'xmlRootTagName': specifies the tag name of the root element
'attributeDict': contains the attributes of the root node (optional)
'publicId': forwarded to contained DOM tree (unused for elementtree)
'systemId': forwarded to contained DOM tree (unused for elementtree)
Returns the created XML tree wrapper object.
Method has to be implemented by derived classes!
"""
raise NotImplementedError
def parse (self, filePath, baseUrl="", ownerDoc=None):
"""Call the XML parser for 'file'.
Input parameter:
'filePath': a file path or an URI
'baseUrl': if specified, it is used e.g. as base path for schema files referenced inside the XML file.
'ownerDoc': only used in case of 4DOM (forwarded to 4DOM parser).
Returns the respective XML tree wrapper object for the parsed XML file.
Method has to be implemented by derived classes!
"""
raise NotImplementedError
def parseString (self, text, baseUrl="", ownerDoc=None):
"""Call the XML parser for 'text'.
Input parameter:
'text': contains the XML string to be parsed
'baseUrl': if specified, it is used e.g. as base path for schema files referenced inside the XML string.
'ownerDoc': only used in case of 4DOM (forwarded to 4DOM parser).
Returns the respective XML tree wrapper object for the parsed XML 'text' string.
Method has to be implemented by derived classes!
"""
raise NotImplementedError
def setTreeWrapperClass (self, treeWrapperClass):
"""Set the tree wrapper class which shall be used by this interface.
Input parameter:
treeWrapperClass: tree wrapper class
"""
self.treeWrapperClass = treeWrapperClass
def setElementWrapperClass (self, elementWrapperClass):
"""Set the element wrapper classes which shall be used by this interface.
Input parameter:
elementWrapperClass: element wrapper class
"""
self.elementWrapperClass = elementWrapperClass
def getXmlIfType (self):
"""Retrieve the type of the XML interface."""
return self.xmlIfType
########################################
# Tree wrapper API (interface class)
#
class XmlTreeWrapper:
"""XML tree wrapper API.
Contains a DOM tree or an elementtree (depending on used XML parser)
"""
def __init__(self, xmlIf, tree, useCaching):
"""Constructor of wrapper class XmlTreeWrapper.
Input parameter:
'xmlIf': used XML interface class
'tree': DOM tree or elementtree which is wrapped by this object
'useCaching': 1 if caching shall be used inside genxmlif, otherwise 0
"""
self.xmlIf = xmlIf
self.__tree = tree
self.__useCaching = useCaching
def createElement (self, tupleOrLocalName, attributeDict=None, curNs=[]):
"""Create an ElementWrapper object.
Input parameter:
tupleOrLocalName: tag name of element node to be created
(tuple of namespace and localName or only localName if no namespace is used)
attributeDict: attributes for this elements
curNs: namespaces for scope of this element
Returns an ElementWrapper object containing the created element node.
"""
nsName = NsNameTupleFactory(tupleOrLocalName)
elementNode = self.__tree.xmlIfExtCreateElement(nsName, attributeDict, curNs)
return self.xmlIf.elementWrapperClass(elementNode, self, curNs)
def cloneTree (self):
"""Creates a copy of a whole XML DOM tree."""
rootElementWrapperCopy = self.getRootNode().cloneNode(deep=1)
treeWrapperCopy = self.__class__(self.xmlIf,
self.__tree.xmlIfExtCloneTree(rootElementWrapperCopy.element),
self.__useCaching)
for elementWrapper in rootElementWrapperCopy.getIterator():
elementWrapper.treeWrapper = treeWrapperCopy
return treeWrapperCopy
def getRootNode (self):
"""Retrieve the wrapper object of the root element of the contained XML tree.
Returns the ElementWrapper object of the root element.
"""
return self.__tree.xmlIfExtGetRootNode().xmlIfExtElementWrapper
def getTree (self):
"""Retrieve the contained XML tree.
Returns the contained XML tree object (internal DOM tree wrapper or elementtree).
"""
return self.__tree
def printTree (self, prettyPrint=0, printElementValue=1, encoding=None):
"""Return the string representation of the contained XML tree.
Input parameter:
'prettyPrint': aligns the columns of the attributes of childNodes
'printElementValue': controls if the lement values are printed or not.
Returns a string with the string representation of the whole XML tree.
"""
if not encoding:
encoding = "utf-8"
if encoding != "utf-8" and encoding != "us-ascii":
text = "<?xml version='1.0' encoding='%s'?>\n" % encoding
else:
text = ""
return text + self.getRootNode().printNode(deep=1, prettyPrint=prettyPrint, printElementValue=printElementValue, encoding=encoding)
def useCaching (self):
"""Return 1 if caching should be used for the contained XML tree."""
return self.__useCaching
def setExternalCacheUsage (self, used):
"""Set external cache usage for the whole tree
unlink commands are ignored if used by an external cache
Input parameter:
used: 0 or 1 (used by external cache)
"""
self.getRootNode().setExternalCacheUsage (used, deep=1)
def unlink (self):
"""Break circular references of the complete XML tree.
To be called if the XML tree is not longer used => garbage collection!
"""
self.getRootNode().unlink()
def __str__ (self):
"""Return the string representation of the contained XML tree."""
return self.printTree()
########################################
# Element wrapper API (interface class)
#
class XmlElementWrapper:
"""XML element wrapper API.
Contains a XML element node
All not implemented methods have to be overloaded by the derived class!!
"""
def __init__(self, element, treeWrapper, curNs=[], initAttrSeq=1):
"""Constructor of wrapper class XmlElementWrapper.
Input parameter:
element: XML element node which is wrapped by this object
treeWrapper: XML tree wrapper class the current element belongs to
curNs: namespaces for scope of this element
"""
self.element = element
self.element.xmlIfExtElementWrapper = self
self.treeWrapper = treeWrapper
self.nodeUsedByExternalCache = 0
if self.__useCaching():
self.__childrenCache = {}
self.__firstChildCache = {}
self.__qNameAttrCache = {}
self.baseUrl = None
self.absUrl = None
self.filePath = None
self.startLineNumber = None
self.endLineNumber = None
self.curNs = curNs[:]
self.attributeSequence = []
if initAttrSeq:
self.attributeSequence = self.getAttributeDict().keys()
def unlink (self):
"""Break circular references of this element and its children."""
for childWrapper in self.getChildren():
childWrapper.unlink()
if not self.isUsedByExternalCache():
self.element.xmlIfExtUnlink()
def cloneNode (self, deep, cloneCallback=None):
"""Create a copy of the current element wrapper.
The reference to the parent node is set to None!"""
elementCopy = self.element.xmlIfExtCloneNode()
elementWrapperCopy = self.__class__(elementCopy, self.treeWrapper, initAttrSeq=0)
elementWrapperCopy.treeWrapper = None
elementWrapperCopy.baseUrl = self.baseUrl
elementWrapperCopy.absUrl = self.absUrl
elementWrapperCopy.filePath = self.filePath
elementWrapperCopy.startLineNumber = self.startLineNumber
elementWrapperCopy.endLineNumber = self.endLineNumber
elementWrapperCopy.curNs = self.curNs[:]
elementWrapperCopy.attributeSequence = self.attributeSequence[:]
if cloneCallback: cloneCallback(elementWrapperCopy)
if deep:
for childElement in self.element.xmlIfExtGetChildren():
childWrapperElementCopy = childElement.xmlIfExtElementWrapper.cloneNode(deep, cloneCallback)
childWrapperElementCopy.element.xmlIfExtSetParentNode(elementWrapperCopy.element)
elementWrapperCopy.element.xmlIfExtAppendChild(childWrapperElementCopy.element)
return elementWrapperCopy
def clearNodeCache (self):
"""Clear all caches used by this element wrapper which contains element wrapper references."""
self.__clearChildrenCache()
def isUsedByExternalCache (self):
"""Check if this node is used by an external cache.
unlink commands are ignored if used by an external cache"""
return self.nodeUsedByExternalCache
def setExternalCacheUsage (self, used, deep=1):
"""Set external cache usage for this node and its children
unlink commands are ignored if used by an external cache
Input parameter:
used: 0 or 1 (used by external cache)
deep: 0 or 1: controls if the child elements are also marked as used by external cache
"""
self.nodeUsedByExternalCache = used
if deep:
for childWrapper in self.getChildren():
childWrapper.setExternalCacheUsage (used, deep)
##########################################################
# attributes of the current node can be accessed via key operator
def __getitem__(self, tupleOrAttrName):
"""Attributes of the contained element node can be accessed via key operator.
Input parameter:
tupleOrAttrName: name of the attribute (tuple of namespace and attributeName or only attributeName)
Returns the attribute value.
"""
attrValue = self.getAttribute (tupleOrAttrName)
if attrValue != None:
return attrValue
else:
raise AttributeError, "Attribute %s not found!" %(repr(tupleOrAttrName))
def __setitem__(self, tupleOrAttrName, attributeValue):
"""Attributes of the contained element node can be accessed via key operator.
Input parameter:
tupleOrAttrName: name of the attribute (tuple of namespace and attributeName or only attributeName)
attributeValue: attribute value to be set
"""
self.setAttribute (tupleOrAttrName, attributeValue)
#++++++++++++ methods concerning the tag name ++++++++++++++++++++++++
def getTagName (self):
"""Retrieve the (complete) tag name of the contained element node
Returns the (complete) tag name of the contained element node
"""
return self.element.xmlIfExtGetTagName()
def getLocalName (self):
"""Retrieve the local name (without namespace) of the contained element node
Returns the local name (without namespace) of the contained element node
"""
try:
return self.__localNameCache
except:
prefix, localName = splitQName (self.getTagName())
if self.__useCaching():
self.__localNameCache = localName
return localName
def getNamespaceURI (self):
"""Retrieve the namespace URI of the contained element node
Returns the namespace URI of the contained element node (None if no namespace is used).
"""
try:
return self.__nsUriCache
except:
prefix = self.element.xmlIfExtGetNamespaceURI()
if self.__useCaching():
self.__nsUriCache = prefix
return prefix
def getNsName (self):
"""Retrieve a tuple (namespace, localName) of the contained element node
Returns a tuple (namespace, localName) of the contained element node (namespace is None if no namespace is used).
"""
try:
return self.__nsNameCache
except:
nsName = NsNameTupleFactory( (self.getNamespaceURI(), self.getLocalName()) )
if self.__useCaching():
self.__nsNameCache = nsName
return nsName
def getQName (self):
"""Retrieve a string prefix and localName of the contained element node
Returns a string "prefix:localName" of the contained element node
"""
return self.nsName2QName(self.getNsName())
def getPrefix (self):
"""Retrieve the namespace prefix of the contained element node
Returns the namespace prefix of the contained element node (None if no namespace is used).
"""
return self.getNsPrefix(self.getNsName())
#++++++++++++ methods concerning print support ++++++++++++++++++++++++
def __str__ (self):
"""Retrieve the textual representation of the contained element node."""
return self.printNode()
def printNode (self, indent="", deep=0, prettyPrint=0, attrMaxLengthDict={}, printElementValue=1, encoding=None):
"""Retrieve the textual representation of the contained element node.
Input parameter:
indent: indentation to be used for string representation
deep: 0 or 1: controls if the child element nodes are also printed
prettyPrint: aligns the columns of the attributes of childNodes
attrMaxLengthDict: dictionary containing the length of the attribute values (used for prettyprint)
printElementValue: 0 or 1: controls if the element value is printed
Returns the string representation
"""
patternXmlTagShort = '''\
%(indent)s<%(qName)s%(attributeString)s/>%(tailText)s%(lf)s'''
patternXmlTagLong = '''\
%(indent)s<%(qName)s%(attributeString)s>%(elementValueString)s\
%(lf)s%(subTreeString)s\
%(indent)s</%(qName)s>%(tailText)s%(lf)s'''
subTreeStringList = []
tailText = ""
addIndent = ""
lf = ""
if deep:
childAttrMaxLengthDict = {}
if prettyPrint:
for childNode in self.getChildren():
childNode.__updateAttrMaxLengthDict(childAttrMaxLengthDict)
lf = "\n"
addIndent = " "
for childNode in self.getChildren():
subTreeStringList.append (childNode.printNode(indent + addIndent, deep, prettyPrint, childAttrMaxLengthDict, printElementValue))
tailText = escapeCdata(self.element.xmlIfExtGetElementTailText(), encoding)
attributeStringList = []
for attrName in self.getAttributeList():
attrValue = escapeAttribute(self.getAttribute(attrName), encoding)
if prettyPrint:
try:
align = attrMaxLengthDict[attrName]
except:
align = len(attrValue)
else:
align = len(attrValue)
qName = self.nsName2QName(attrName)
attributeStringList.append (' %s="%s"%*s' %(qName, attrValue, align - len(attrValue), ""))
attributeString = string.join (attributeStringList, "")
qName = self.getQName()
if printElementValue:
if deep:
elementValueString = escapeCdata(self.element.xmlIfExtGetElementText(), encoding)
else:
elementValueString = escapeCdata(self.getElementValue(ignoreEmtpyStringFragments=1), encoding)
else:
elementValueString = ""
if subTreeStringList == [] and elementValueString == "":
printPattern = patternXmlTagShort
else:
if subTreeStringList != []:
subTreeString = string.join (subTreeStringList, "")
else:
subTreeString = ""
printPattern = patternXmlTagLong
return printPattern % vars()
#++++++++++++ methods concerning the parent of the current node ++++++++++++++++++++++++
def getParentNode (self):
"""Retrieve the ElementWrapper object of the parent element node.
Returns the ElementWrapper object of the parent element node.
"""
parent = self.element.xmlIfExtGetParentNode()
if parent != None:
return parent.xmlIfExtElementWrapper
else:
return None
#++++++++++++ methods concerning the children of the current node ++++++++++++++++++++++++
def getChildren (self, tagFilter=None):
"""Retrieve the ElementWrapper objects of the children element nodes.
Input parameter:
tagFilter: retrieve only the children with this tag name ('*' or None returns all children)
Returns all children of this element node which match 'tagFilter' (list)
"""
if tagFilter in (None, '*', (None, '*')):
children = self.element.xmlIfExtGetChildren()
elif tagFilter[1] == '*':
# handle (namespace, '*')
children = filter(lambda child:child.xmlIfExtElementWrapper.getNamespaceURI() == tagFilter[0],
self.element.xmlIfExtGetChildren())
else:
nsNameFilter = NsNameTupleFactory(tagFilter)
try:
children = self.__childrenCache[nsNameFilter]
except:
children = self.element.xmlIfExtGetChildren(nsNameFilter)
if self.__useCaching():
self.__childrenCache[nsNameFilter] = children
return map(lambda child: child.xmlIfExtElementWrapper, children)
def getChildrenNS (self, namespaceURI, tagFilter=None):
"""Retrieve the ElementWrapper objects of the children element nodes using a namespace.
Input parameter:
namespaceURI: the namespace URI of the children or None
tagFilter: retrieve only the children with this localName ('*' or None returns all children)
Returns all children of this element node which match 'namespaceURI' and 'tagFilter' (list)
"""
return self.getChildren((namespaceURI, tagFilter))
def getChildrenWithKey (self, tagFilter=None, keyAttr=None, keyValue=None):
"""Retrieve the ElementWrapper objects of the children element nodes.
Input parameter:
tagFilter: retrieve only the children with this tag name ('*' or None returns all children)
keyAttr: name of the key attribute
keyValue: value of the key
Returns all children of this element node which match 'tagFilter' (list)
"""
children = self.getChildren(tagFilter)
return filter(lambda child:child[keyAttr]==keyValue, children)
def getFirstChild (self, tagFilter=None):
"""Retrieve the ElementWrapper objects of the first child element node.
Input parameter:
tagFilter: retrieve only the first child with this tag name ('*' or None: no filter)
Returns the first child of this element node which match 'tagFilter'
or None if no suitable child element was found
"""
if tagFilter in (None, '*', (None, '*')):
element = self.element.xmlIfExtGetFirstChild()
elif tagFilter[1] == '*':
# handle (namespace, '*')
children = filter(lambda child:child.xmlIfExtElementWrapper.getNamespaceURI() == tagFilter[0],
self.element.xmlIfExtGetChildren())
try:
element = children[0]
except:
element = None
else:
nsNameFilter = NsNameTupleFactory(tagFilter)
try:
element = self.__firstChildCache[nsNameFilter]
except:
element = self.element.xmlIfExtGetFirstChild(nsNameFilter)
if self.__useCaching():
self.__firstChildCache[nsNameFilter] = element
if element != None:
return element.xmlIfExtElementWrapper
else:
return None
def getFirstChildNS (self, namespaceURI, tagFilter=None):
"""Retrieve the ElementWrapper objects of the first child element node using a namespace.
Input parameter:
namespaceURI: the namespace URI of the children or None
tagFilter: retrieve only the first child with this localName ('*' or None: no filter)
Returns the first child of this element node which match 'namespaceURI' and 'tagFilter'
or None if no suitable child element was found
"""
return self.getFirstChild ((namespaceURI, tagFilter))
def getFirstChildWithKey (self, tagFilter=None, keyAttr=None, keyValue=None):
"""Retrieve the ElementWrapper objects of the children element nodes.
Input parameter:
tagFilter: retrieve only the children with this tag name ('*' or None returns all children)
keyAttr: name of the key attribute
keyValue: value of the key
Returns all children of this element node which match 'tagFilter' (list)
"""
children = self.getChildren(tagFilter)
childrenWithKey = filter(lambda child:child[keyAttr]==keyValue, children)
if childrenWithKey != []:
return childrenWithKey[0]
else:
return None
def getElementsByTagName (self, tagFilter=None):
"""Retrieve all descendant ElementWrapper object of current node whose tag name match 'tagFilter'.
Input parameter:
tagFilter: retrieve only the children with this tag name ('*' or None returns all descendants)
Returns all descendants of this element node which match 'tagFilter' (list)
"""
if tagFilter in (None, '*', (None, '*'), (None, None)):
descendants = self.element.xmlIfExtGetElementsByTagName()
elif tagFilter[1] == '*':
# handle (namespace, '*')
descendants = filter(lambda desc:desc.xmlIfExtElementWrapper.getNamespaceURI() == tagFilter[0],
self.element.xmlIfExtGetElementsByTagName())
else:
nsNameFilter = NsNameTupleFactory(tagFilter)
descendants = self.element.xmlIfExtGetElementsByTagName(nsNameFilter)
return map(lambda descendant: descendant.xmlIfExtElementWrapper, descendants)
def getElementsByTagNameNS (self, namespaceURI, tagFilter=None):
"""Retrieve all descendant ElementWrapper object of current node whose tag name match 'namespaceURI' and 'tagFilter'.
Input parameter:
namespaceURI: the namespace URI of the descendants or None
tagFilter: retrieve only the descendants with this localName ('*' or None returns all descendants)
Returns all descendants of this element node which match 'namespaceURI' and 'tagFilter' (list)
"""
return self.getElementsByTagName((namespaceURI, tagFilter))
def getIterator (self, tagFilter=None):
"""Creates a tree iterator. The iterator loops over this element
and all subelements, in document order, and returns all elements
whose tag name match 'tagFilter'.
Input parameter:
tagFilter: retrieve only the children with this tag name ('*' or None returns all descendants)
Returns all element nodes which match 'tagFilter' (list)
"""
if tagFilter in (None, '*', (None, '*'), (None, None)):
matchingElements = self.element.xmlIfExtGetIterator()
elif tagFilter[1] == '*':
# handle (namespace, '*')
matchingElements = filter(lambda desc:desc.xmlIfExtElementWrapper.getNamespaceURI() == tagFilter[0],
self.element.xmlIfExtGetIterator())
else:
nsNameFilter = NsNameTupleFactory(tagFilter)
matchingElements = self.element.xmlIfExtGetIterator(nsNameFilter)
return map(lambda e: e.xmlIfExtElementWrapper, matchingElements)
def appendChild (self, tupleOrLocalNameOrElement, attributeDict={}):
"""Append an element node to the children of the current node.
Input parameter:
tupleOrLocalNameOrElement: (namespace, localName) or tagName or ElementWrapper object of the new child
attributeDict: attribute dictionary containing the attributes of the new child (optional)
If not an ElementWrapper object is given, a new ElementWrapper object is created with tupleOrLocalName
Returns the ElementWrapper object of the new child.
"""
if not isinstance(tupleOrLocalNameOrElement, self.__class__):
childElementWrapper = self.__createElement (tupleOrLocalNameOrElement, attributeDict)
else:
childElementWrapper = tupleOrLocalNameOrElement
self.element.xmlIfExtAppendChild (childElementWrapper.element)
self.__clearChildrenCache(childElementWrapper.getNsName())
return childElementWrapper
def insertBefore (self, tupleOrLocalNameOrElement, refChild, attributeDict={}):
"""Insert an child element node before the given reference child of the current node.
Input parameter:
tupleOrLocalNameOrElement: (namespace, localName) or tagName or ElementWrapper object of the new child
refChild: reference child ElementWrapper object
attributeDict: attribute dictionary containing the attributes of the new child (optional)
If not an ElementWrapper object is given, a new ElementWrapper object is created with tupleOrLocalName
Returns the ElementWrapper object of the new child.
"""
if not isinstance(tupleOrLocalNameOrElement, self.__class__):
childElementWrapper = self.__createElement (tupleOrLocalNameOrElement, attributeDict)
else:
childElementWrapper = tupleOrLocalNameOrElement
if refChild == None:
self.appendChild (childElementWrapper)
else:
self.element.xmlIfExtInsertBefore(childElementWrapper.element, refChild.element)
self.__clearChildrenCache(childElementWrapper.getNsName())
return childElementWrapper
def removeChild (self, childElementWrapper):
"""Remove the given child element node from the children of the current node.
Input parameter:
childElementWrapper: ElementWrapper object to be removed
"""
self.element.xmlIfExtRemoveChild(childElementWrapper.element)
self.__clearChildrenCache(childElementWrapper.getNsName())
def insertSubtree (self, refChildWrapper, subTreeWrapper, insertSubTreeRootNode=1):
"""Insert the given subtree before 'refChildWrapper' ('refChildWrapper' is not removed!)
Input parameter:
refChildWrapper: reference child ElementWrapper object
subTreeWrapper: subtree wrapper object which contains the subtree to be inserted
insertSubTreeRootNode: if 1, root node of subtree is inserted into parent tree, otherwise not
"""
if refChildWrapper != None:
self.element.xmlIfExtInsertSubtree (refChildWrapper.element, subTreeWrapper.getTree(), insertSubTreeRootNode)
else:
self.element.xmlIfExtInsertSubtree (None, subTreeWrapper.getTree(), insertSubTreeRootNode)
self.__clearChildrenCache()
def replaceChildBySubtree (self, childElementWrapper, subTreeWrapper, insertSubTreeRootNode=1):
"""Replace child element node by XML subtree (e.g. expanding included XML files)
Input parameter:
childElementWrapper: ElementWrapper object to be replaced
subTreeWrapper: XML subtree wrapper object to be inserted
insertSubTreeRootNode: if 1, root node of subtree is inserted into parent tree, otherwise not
"""
self.insertSubtree (childElementWrapper, subTreeWrapper, insertSubTreeRootNode)
self.removeChild(childElementWrapper)
#++++++++++++ methods concerning the attributes of the current node ++++++++++++++++++++++++
def getAttributeDict (self):
"""Retrieve a dictionary containing all attributes of the current element node.
Returns a dictionary (copy) containing all attributes of the current element node.
"""
return self.element.xmlIfExtGetAttributeDict()
def getAttributeList (self):
"""Retrieve a list containing all attributes of the current element node
in the sequence specified in the input XML file.
Returns a list (copy) containing all attributes of the current element node
in the sequence specified in the input XML file (TODO: does currently not work for 4DOM/pyXML interface).
"""
attrList = map(lambda a: NsNameTupleFactory(a), self.attributeSequence)
return attrList
def getAttribute (self, tupleOrAttrName):
"""Retrieve an attribute value of the current element node.
Input parameter:
tupleOrAttrName: tuple '(namespace, attributeName)' or 'attributeName' if no namespace is used
Returns the value of the specified attribute.
"""
nsName = NsNameTupleFactory(tupleOrAttrName)
return self.element.xmlIfExtGetAttribute(nsName)
def getAttributeOrDefault (self, tupleOrAttrName, defaultValue):
"""Retrieve an attribute value of the current element node or the given default value if the attribute doesn't exist.
Input parameter:
tupleOrAttrName: tuple '(namespace, attributeName)' or 'attributeName' if no namespace is used
Returns the value of the specified attribute or the given default value if the attribute doesn't exist.
"""
attributeValue = self.getAttribute (tupleOrAttrName)
if attributeValue == None:
attributeValue = defaultValue
return attributeValue
def getQNameAttribute (self, tupleOrAttrName):
"""Retrieve a QName attribute value of the current element node.
Input parameter:
tupleOrAttrName: tuple '(namespace, attributeName)' or 'attributeName' if no namespace is used
Returns the value of the specified QName attribute as tuple (namespace, localName),
i.e. the prefix is converted into the corresponding namespace value.
"""
nsNameAttrName = NsNameTupleFactory(tupleOrAttrName)
try:
return self.__qNameAttrCache[nsNameAttrName]
except:
qNameValue = self.getAttribute (nsNameAttrName)
nsNameValue = self.qName2NsName(qNameValue, useDefaultNs=1)
if self.__useCaching():
self.__qNameAttrCache[nsNameAttrName] = nsNameValue
return nsNameValue
def hasAttribute (self, tupleOrAttrName):
"""Checks if the requested attribute exist for the current element node.
Returns 1 if the attribute exists, otherwise 0.
"""
nsName = NsNameTupleFactory(tupleOrAttrName)
attrValue = self.element.xmlIfExtGetAttribute(nsName)
if attrValue != None:
return 1
else:
return 0
def setAttribute (self, tupleOrAttrName, attributeValue):
"""Sets an attribute value of the current element node.
If the attribute does not yet exist, it will be created.
Input parameter:
tupleOrAttrName: tuple '(namespace, attributeName)' or 'attributeName' if no namespace is used
attributeValue: attribute value to be set
"""
if not isinstance(attributeValue, StringTypes):
raise TypeError, "%s (attribute %s) must be a string!" %(repr(attributeValue), repr(tupleOrAttrName))
nsNameAttrName = NsNameTupleFactory(tupleOrAttrName)
if nsNameAttrName not in self.attributeSequence:
self.attributeSequence.append(nsNameAttrName)
if self.__useCaching():
if self.__qNameAttrCache.has_key(nsNameAttrName):
del self.__qNameAttrCache[nsNameAttrName]
self.element.xmlIfExtSetAttribute(nsNameAttrName, attributeValue, self.getCurrentNamespaces())
def setAttributeDefault (self, tupleOrAttrName, defaultValue):
"""Create attribute and set value to default if it does not yet exist for the current element node.
If the attribute is already existing nothing is done.
Input parameter:
tupleOrAttrName: tuple '(namespace, attributeName)' or 'attributeName' if no namespace is used
defaultValue: default attribute value to be set
"""
if not self.hasAttribute(tupleOrAttrName):
self.setAttribute(tupleOrAttrName, defaultValue)
def removeAttribute (self, tupleOrAttrName):
"""Removes an attribute from the current element node.
No exception is raised if there is no matching attribute.
Input parameter:
tupleOrAttrName: tuple '(namespace, attributeName)' or 'attributeName' if no namespace is used
"""
nsNameAttrName = NsNameTupleFactory(tupleOrAttrName)
if self.__useCaching():
if self.__qNameAttrCache.has_key(nsNameAttrName):
del self.__qNameAttrCache[nsNameAttrName]
self.element.xmlIfExtRemoveAttribute(nsNameAttrName)
def processWsAttribute (self, tupleOrAttrName, wsAction):
"""Process white space action for the specified attribute according to requested 'wsAction'.
Input parameter:
tupleOrAttrName: tuple '(namespace, attributeName)' or 'attributeName' if no namespace is used
wsAction: 'collapse': substitute multiple whitespace characters by a single ' '
'replace': substitute each whitespace characters by a single ' '
"""
attributeValue = self.getAttribute(tupleOrAttrName)
newValue = processWhitespaceAction (attributeValue, wsAction)
if newValue != attributeValue:
self.setAttribute(tupleOrAttrName, newValue)
return newValue
#++++++++++++ methods concerning the content of the current node ++++++++++++++++++++++++
def getElementValue (self, ignoreEmtpyStringFragments=0):
"""Retrieve the content of the current element node.
Returns the content of the current element node as string.
The content of multiple text nodes / CDATA nodes are concatenated to one string.
Input parameter:
ignoreEmtpyStringFragments: if 1, text nodes containing only whitespaces are ignored
"""
return "".join (self.getElementValueFragments(ignoreEmtpyStringFragments))
def getElementValueFragments (self, ignoreEmtpyStringFragments=0):
"""Retrieve the content of the current element node as value fragment list.
Returns the content of the current element node as list of string fragments.
Each list element represents one text nodes / CDATA node.
Input parameter:
ignoreEmtpyStringFragments: if 1, text nodes containing only whitespaces are ignored
Method has to be implemented by derived classes!
"""
return self.element.xmlIfExtGetElementValueFragments (ignoreEmtpyStringFragments)
def setElementValue (self, elementValue):
"""Set the content of the current element node.
Input parameter:
elementValue: string containing the new element value
If multiple text nodes / CDATA nodes are existing, 'elementValue' is set
for the first text node / CDATA node. All other text nodes /CDATA nodes are set to ''.
"""
self.element.xmlIfExtSetElementValue(elementValue)
def processWsElementValue (self, wsAction):
"""Process white space action for the content of the current element node according to requested 'wsAction'.
Input parameter:
wsAction: 'collapse': substitute multiple whitespace characters by a single ' '
'replace': substitute each whitespace characters by a single ' '
"""
self.element.xmlIfExtProcessWsElementValue(wsAction)
return self.getElementValue()
#++++++++++++ methods concerning the info about the current node in the XML file ++++++++++++++++++++
def getStartLineNumber (self):
"""Retrieve the start line number of the current element node.
Returns the start line number of the current element node in the XML file
"""
return self.startLineNumber
def getEndLineNumber (self):
"""Retrieve the end line number of the current element node.
Returns the end line number of the current element node in the XML file
"""
return self.endLineNumber
def getAbsUrl (self):
"""Retrieve the absolute URL of the XML file the current element node belongs to.
Returns the absolute URL of the XML file the current element node belongs to.
"""
return self.absUrl
def getBaseUrl (self):
"""Retrieve the base URL of the XML file the current element node belongs to.
Returns the base URL of the XML file the current element node belongs to.
"""
return self.baseUrl
def getFilePath (self):
"""Retrieve the file path of the XML file the current element node belongs to.
Returns the file path of the XML file the current element node belongs to.
"""
return self.filePath
def getLocation (self, end=0, fullpath=0):
"""Retrieve a string containing file name and line number of the current element node.
Input parameter:
end: 1 if end line number shall be shown, 0 for start line number
fullpath: 1 if the full path of the XML file shall be shown, 0 for only the file name
Returns a string containing file name and line number of the current element node.
(e.g. to be used for traces or error messages)
"""
lineMethod = (self.getStartLineNumber, self.getEndLineNumber)
pathFunc = (os.path.basename, os.path.abspath)
return "%s, %d" % (pathFunc[fullpath](self.getFilePath()), lineMethod[end]())
#++++++++++++ miscellaneous methods concerning namespaces ++++++++++++++++++++
def getCurrentNamespaces (self):
"""Retrieve the namespace prefixes visible for the current element node
Returns a list of the namespace prefixes visible for the current node.
"""
return self.curNs
def qName2NsName (self, qName, useDefaultNs):
"""Convert a qName 'prefix:localName' to a tuple '(namespace, localName)'.
Input parameter:
qName: qName to be converted
useDefaultNs: 1 if default namespace shall be used
Returns the corresponding tuple '(namespace, localName)' for 'qName'.
"""
if qName != None:
qNamePrefix, qNameLocalName = splitQName (qName)
for prefix, namespaceURI in self.getCurrentNamespaces():
if qNamePrefix == prefix:
if prefix != EMPTY_PREFIX or useDefaultNs:
nsName = (namespaceURI, qNameLocalName)
break
else:
if qNamePrefix == None:
nsName = (EMPTY_NAMESPACE, qNameLocalName)
else:
raise ValueError, "Namespace prefix '%s' not bound to a namespace!" % (qNamePrefix)
else:
nsName = (None, None)
return NsNameTupleFactory(nsName)
def nsName2QName (self, nsLocalName):
"""Convert a tuple '(namespace, localName)' to a string 'prefix:localName'
Input parameter:
nsLocalName: tuple '(namespace, localName)' to be converted
Returns the corresponding string 'prefix:localName' for 'nsLocalName'.
"""
qName = nsNameToQName (nsLocalName, self.getCurrentNamespaces())
if qName == "xmlns:None": qName = "xmlns"
return qName
def getNamespace (self, qName):
"""Retrieve namespace for a qName 'prefix:localName'.
Input parameter:
qName: qName 'prefix:localName'
Returns the corresponding namespace for the prefix of 'qName'.
"""
if qName != None:
qNamePrefix, qNameLocalName = splitQName (qName)
for prefix, namespaceURI in self.getCurrentNamespaces():
if qNamePrefix == prefix:
namespace = namespaceURI
break
else:
if qNamePrefix == None:
namespace = EMPTY_NAMESPACE
else:
raise LookupError, "Namespace for QName '%s' not found!" % (qName)
else:
namespace = EMPTY_NAMESPACE
return namespace
def getNsPrefix (self, nsLocalName):
"""Retrieve prefix for a tuple '(namespace, localName)'.
Input parameter:
nsLocalName: tuple '(namespace, localName)'
Returns the corresponding prefix for the namespace of 'nsLocalName'.
"""
ns = nsLocalName[0]
for prefix, namespace in self.getCurrentNamespaces():
if ns == namespace:
return prefix
else:
if ns == None:
return None
else:
raise LookupError, "Prefix for namespaceURI '%s' not found!" % (ns)
#++++++++++++ limited XPath support ++++++++++++++++++++
def getXPath (self, xPath, namespaceRef=None, useDefaultNs=1, attrIgnoreList=[]):
"""Retrieve node list or attribute list for specified XPath
Input parameter:
xPath: string containing xPath specification
namespaceRef: scope for namespaces (default is own element node)
useDefaultNs: 1, if default namespace shall be used if no prefix is available
attrIgnoreList: list of attributes to be ignored if wildcard is specified for attributes
Returns all nodes which match xPath specification or
list of attribute values if xPath specifies an attribute
"""
return self.getXPathList(xPath, namespaceRef, useDefaultNs, attrIgnoreList)[0]
def getXPathList (self, xPath, namespaceRef=None, useDefaultNs=1, attrIgnoreList=[]):
"""Retrieve node list or attribute list for specified XPath
Input parameter:
xPath: string containing xPath specification
namespaceRef: scope for namespaces (default is own element node)
useDefaultNs: 1, if default namespace shall be used if no prefix is available
attrIgnoreList: list of attributes to be ignored if wildcard is specified for attributes
Returns tuple (completeChildList, attrNodeList, attrNsNameFirst).
completeChildList: contains all child node which match xPath specification or
list of attribute values if xPath specifies an attribute
attrNodeList: contains all child nodes where the specified attribute was found
attrNsNameFirst: contains the name of the first attribute which was found
TODO: Re-design namespace and attribute handling of this method
"""
reChild = re.compile('child *::')
reAttribute = re.compile('attribute *::')
if namespaceRef == None: namespaceRef = self
xPath = reChild.sub('./', xPath)
xPath = reAttribute.sub('@', xPath)
xPathList = string.split (xPath, "|")
completeChildDict = {}
completeChildList = []
attrNodeList = []
attrNsNameFirst = None
for xRelPath in xPathList:
xRelPath = string.strip(xRelPath)
descendantOrSelf = 0
if xRelPath[:3] == ".//":
descendantOrSelf = 1
xRelPath = xRelPath[3:]
xPathLocalStepList = string.split (xRelPath, "/")
childList = [self, ]
for localStep in xPathLocalStepList:
localStep = string.strip(localStep)
stepChildList = []
if localStep == "":
raise IOError ("Invalid xPath '%s'!" %(xRelPath))
elif localStep == ".":
continue
elif localStep[0] == '@':
if len(localStep) == 1:
raise ValueError ("Attribute name is missing in xPath!")
if descendantOrSelf:
childList = self.getElementsByTagName()
attrName = localStep[1:]
for childNode in childList:
if attrName == '*':
attrNodeList.append (childNode)
attrDict = childNode.getAttributeDict()
for attrIgnore in attrIgnoreList:
if attrDict.has_key(attrIgnore):
del attrDict[attrIgnore]
stepChildList.extend(attrDict.values())
try:
attrNsNameFirst = attrDict.keys()[0]
except:
pass
else:
attrNsName = namespaceRef.qName2NsName (attrName, useDefaultNs=0)
if attrNsName[1] == '*':
for attr in childNode.getAttributeDict().keys():
if attr[0] == attrNsName[0]:
if attrNodeList == []:
attrNsNameFirst = attrNsName
attrNodeList.append (childNode)
stepChildList.append (childNode.getAttribute(attr))
elif childNode.hasAttribute(attrNsName):
if attrNodeList == []:
attrNsNameFirst = attrNsName
attrNodeList.append (childNode)
stepChildList.append (childNode.getAttribute(attrNsName))
childList = stepChildList
else:
nsLocalName = namespaceRef.qName2NsName (localStep, useDefaultNs=useDefaultNs)
if descendantOrSelf:
descendantOrSelf = 0
if localStep == "*":
stepChildList = self.getElementsByTagName()
else:
stepChildList = self.getElementsByTagName(nsLocalName)
else:
for childNode in childList:
if localStep == "*":
stepChildList.extend (childNode.getChildren())
else:
stepChildList.extend (childNode.getChildrenNS(nsLocalName[0], nsLocalName[1]))
childList = stepChildList
# filter duplicated childs
for child in childList:
try:
childKey = child.element
except:
childKey = child
if not completeChildDict.has_key(childKey):
completeChildList.append(child)
completeChildDict[childKey] = 1
return completeChildList, attrNodeList, attrNsNameFirst
###############################################################
# PRIVATE methods
###############################################################
def __createElement (self, tupleOrLocalName, attributeDict):
"""Create a new ElementWrapper object.
Input parameter:
tupleOrLocalName: tuple '(namespace, localName)' or 'localName' if no namespace is used
attributeDict: dictionary which contains the attributes and their values of the element node to be created
Returns the created ElementWrapper object
"""
childElementWrapper = self.treeWrapper.createElement (tupleOrLocalName, attributeDict, self.curNs[:]) # TODO: when to be adapted???)
childElementWrapper.element.xmlIfExtSetParentNode(self.element)
return childElementWrapper
def __updateAttrMaxLengthDict (self, attrMaxLengthDict):
"""Update dictionary which contains the maximum length of node attributes.
Used for pretty print to align the attributes of child nodes.
attrMaxLengthDict is in/out parameter.
"""
for attrName, attrValue in self.getAttributeDict().items():
attrLength = len(attrValue)
if not attrMaxLengthDict.has_key(attrName):
attrMaxLengthDict[attrName] = attrLength
else:
attrMaxLengthDict[attrName] = max(attrMaxLengthDict[attrName], attrLength)
def __clearChildrenCache (self, childNsName=None):
"""Clear children cache.
"""
if self.__useCaching():
if childNsName != None:
if self.__childrenCache.has_key(childNsName):
del self.__childrenCache[childNsName]
if self.__firstChildCache.has_key(childNsName):
del self.__firstChildCache[childNsName]
else:
self.__childrenCache.clear()
self.__firstChildCache.clear()
def __useCaching(self):
return self.treeWrapper.useCaching()
| gpl-3.0 | 7,730,973,759,348,299,000 | 41.134005 | 144 | 0.616803 | false | 4.911069 | false | false | false |
stefelisabeth/dplace | dplace_app/api_views.py | 1 | 21716 | import json
import re
import datetime
from itertools import groupby
import logging
from django.db import connection
from django.db.models import Prefetch, Q, Count
from django.shortcuts import get_object_or_404
from django.http import Http404
from rest_framework import viewsets
from rest_framework.pagination import PageNumberPagination
from rest_framework.decorators import api_view, permission_classes, renderer_classes
from rest_framework.permissions import AllowAny
from rest_framework.views import Response
from rest_framework.renderers import JSONRenderer
from dplace_app.filters import GeographicRegionFilter
from dplace_app.renderers import DPLACECSVRenderer
from dplace_app import serializers
from dplace_app import models
from dplace_app.tree import update_newick
log = logging.getLogger('profile')
class VariableViewSet(viewsets.ReadOnlyModelViewSet):
serializer_class = serializers.VariableSerializer
filter_fields = ('label', 'name', 'index_categories', 'niche_categories', 'source')
queryset = models.Variable.objects\
.prefetch_related('index_categories', 'niche_categories')
# Override retrieve to use the detail serializer, which includes categories
def retrieve(self, request, *args, **kwargs):
self.object = self.get_object()
serializer = serializers.VariableDetailSerializer(self.object)
return Response(serializer.data)
class CategoryViewSet(viewsets.ReadOnlyModelViewSet):
serializer_class = serializers.CategorySerializer
filter_fields = ('name', 'type', 'index_variables', 'niche_variables')
queryset = models.Category.objects.all()
# Override retrieve to use the detail serializer, which includes variables
def retrieve(self, request, *args, **kwargs):
self.object = self.get_object()
serializer = serializers.CategoryDetailSerializer(self.object)
return Response(serializer.data)
class CodeDescriptionViewSet(viewsets.ReadOnlyModelViewSet):
serializer_class = serializers.CodeDescriptionSerializer
filter_fields = ('variable',)
queryset = models.CodeDescription.objects.all()
class ValueViewSet(viewsets.ReadOnlyModelViewSet):
serializer_class = serializers.ValueSerializer
filter_fields = ('variable', 'coded_value', 'code', 'society',)
# Avoid additional database trips by select_related for foreign keys
queryset = models.Value.objects.filter(variable__type='cultural')\
.select_related('variable', 'code', 'source').all()
class SocietyViewSet(viewsets.ReadOnlyModelViewSet):
serializer_class = serializers.SocietySerializer
queryset = models.Society.objects.all().select_related(
'source', 'language__iso_code', 'language__family')
lookup_field = 'ext_id'
def detail(self, request, society_id):
# block spider attacks
if len(request.GET) > 0 and request.path.startswith('/society'):
raise Http404
society = get_object_or_404(models.Society, ext_id=society_id)
# gets the society's location for inset map
location = {}
if society.location:
location = {
'lat': society.location['coordinates'][1],
'lng': society.location['coordinates'][0]
}
# gets other societies in database with the same xd_id
xd_id = models.Society.objects.filter(
xd_id=society.xd_id).exclude(ext_id=society_id)
if society.hraf_link and '(' in society.hraf_link:
hraf_link = society.hraf_link.split('(')[len(society.hraf_link.split('('))-1]
else:
hraf_link = ''
environmentals = society.get_environmental_data()
cultural_traits = society.get_cultural_trait_data()
references = society.get_data_references()
language_classification = None
if society.language:
# just glottolog at the moment
language_classification = models.LanguageFamily.objects\
.filter(name=society.language.family.name)
return Response(
{
'society': society,
'hraf_link': hraf_link[0:len(hraf_link)-1],
'xd_id': xd_id,
'location': location,
'language_classification': language_classification,
'environmentals': dict(environmentals),
'cultural_traits': dict(cultural_traits),
'references': references
},
template_name='society.html'
)
class ISOCodeViewSet(viewsets.ReadOnlyModelViewSet):
serializer_class = serializers.ISOCodeSerializer
filter_fields = ('iso_code',)
queryset = models.ISOCode.objects.all()
class LargeResultsSetPagination(PageNumberPagination):
page_size = 1000
page_size_query_param = 'page_size'
max_page_size = 1000
class LanguageViewSet(viewsets.ReadOnlyModelViewSet):
serializer_class = serializers.LanguageSerializerWithSocieties
filter_fields = ('name', 'iso_code', 'societies', 'family',)
queryset = models.Language.objects.all()\
.select_related('family', 'iso_code')\
.prefetch_related(Prefetch(
'societies',
queryset=models.Society.objects.exclude(value__isnull=True)
))
pagination_class = LargeResultsSetPagination
class LanguageFamilyViewSet(viewsets.ReadOnlyModelViewSet):
serializer_class = serializers.LanguageFamilySerializer
filter_fields = ('name',)
queryset = models.LanguageFamily.objects.all()\
.annotate(language_count=Count('language__societies'))\
.order_by('name')
pagination_class = LargeResultsSetPagination
class TreeResultsSetPagination(PageNumberPagination):
"""
Since trees may have *many* languages, which are serialized as well, we limit the
page size to just 1.
"""
page_size = 1
page_size_query_param = 'page_size'
max_page_size = 10
class LanguageTreeViewSet(viewsets.ReadOnlyModelViewSet):
serializer_class = serializers.LanguageTreeSerializer
filter_fields = ('name',)
queryset = models.LanguageTree.objects.all()
pagination_class = TreeResultsSetPagination
class LanguageTreeLabelsViewSet(viewsets.ReadOnlyModelViewSet):
serializer_class = serializers.LanguageTreeLabelsSerializer
filter_fields = ('label',)
queryset = models.LanguageTreeLabels.objects.all()
pagination_class = LargeResultsSetPagination
class SourceViewSet(viewsets.ReadOnlyModelViewSet):
serializer_class = serializers.SourceSerializer
filter_fields = ('author', 'name')
queryset = models.Source.objects.all()
def get_query_from_json(request):
query_string = request.query_params.get('query')
if query_string is None:
raise Http404('missing query parameter')
try:
query_dict = json.loads(query_string)
except ValueError:
raise Http404('malformed query parameter')
if not isinstance(query_dict, dict):
raise Http404('malformed query parameter')
return query_dict
def result_set_from_query_dict(query_dict):
from time import time
_s = time()
log.info('enter result_set_from_query_dict')
result_set = serializers.SocietyResultSet()
sql_joins, sql_where = [], []
def id_array(l):
return '(%s)' % ','.join('%s' % int(i) for i in l)
if 'l' in query_dict:
sql_joins.append(('language', 'l', 'l.id = s.language_id'))
sql_where.append('l.id IN ' + id_array(query_dict['l']))
for lang in models.Language.objects.filter(id__in=query_dict['l']):
result_set.languages.add(lang)
if 'c' in query_dict:
variables = {
v.id: v for v in models.Variable.objects
.filter(id__in=[int(x.split('-')[0]) for x in query_dict['c']])
.prefetch_related(Prefetch(
'codes',
queryset=models.CodeDescription.objects
.filter(id__in=[int(x.split('-')[1]) for x in query_dict['c'] if len(x.split('-')) == 2])))
}
for variable, codes in groupby(
sorted(query_dict['c'], key=lambda c: int(c.split('-')[0])),
key=lambda x: int(str(x).split('-')[0])
):
variable = variables[variable]
codes = [{
'id': None if (len(c.split('-')) > 2 or len(c.split('-')) == 1) else int(c.split('-')[1]),
'min': None if len(c.split('-')) < 3 else float(c.split('-')[1]),
'max': None if len(c.split('-')) < 3 else float(c.split('-')[2])
} for c in list(codes)]
alias = 'cv%s' % variable.id
sql_joins.append((
"value",
alias,
"{0}.society_id = s.id AND {0}.variable_id = {1}".format(alias, variable.id)
))
if variable.data_type and variable.data_type == 'Continuous':
include_NA = not all((c['min'] is not None) for c in codes)
ors = [
"({0}.coded_value_float >= %(min)f AND {0}.coded_value_float <= %(max)f)".format(alias) % c
for c in codes if ('min' in c and c['min'] is not None)]
if include_NA:
ors.append("%s.coded_value = 'NA'" % alias)
sql_where.append("(%s)" % ' OR '.join(ors))
if not include_NA:
sql_where.append("{0}.coded_value != 'NA'".format(alias))
else:
assert all('id' in c for c in codes)
sql_where.append("{0}.code_id IN %s".format(alias) % id_array([x['id'] for x in codes]))
result_set.variable_descriptions.add(serializers.VariableCode(variable.codes, variable))
if 'e' in query_dict:
# There can be multiple filters, so we must aggregate the results.
for varid, criteria in groupby(
sorted(query_dict['e'], key=lambda c: c[0]),
key=lambda x: x[0]
):
alias = 'ev%s' % varid
sql_joins.append((
"value",
alias,
"{0}.society_id = s.id AND {0}.variable_id = {1}".format(alias, int(varid))))
for varid, operator, params in criteria:
params = map(float, params)
if operator == 'inrange':
sql_where.append("{0}.coded_value_float >= {1:f} AND {0}.coded_value_float <= {2:f}".format(alias, params[0], params[1]))
elif operator == 'outrange':
sql_where.append("{0}.coded_value_float >= {1:f} AND {0}.coded_value_float <= {2:f}".format(alias, params[1], params[0]))
elif operator == 'gt':
sql_where.append("{0}.coded_value_float >= {1:f}".format(alias, params[0]))
elif operator == 'lt':
sql_where.append("{0}.coded_value_float <= {1:f}".format(alias, params[0]))
for variable in models.Variable.objects.filter(id__in=[x[0] for x in query_dict['e']]):
result_set.environmental_variables.add(variable)
if 'p' in query_dict:
sql_joins.append(('geographicregion', 'r', 'r.id = s.region_id'))
sql_where.append('r.id IN %s' % id_array(query_dict['p']))
for region in models.GeographicRegion.objects.filter(id__in=query_dict['p']):
result_set.geographic_regions.add(region)
if sql_where:
cursor = connection.cursor()
sql = "select distinct s.id from dplace_app_society as s %s where %s" % (
' '.join('join dplace_app_%s as %s on %s' % t for t in sql_joins),
' AND '.join(sql_where))
cursor.execute(sql)
soc_ids = [r[0] for r in cursor.fetchall()]
else:
soc_ids = []
soc_query = models.Society.objects.filter(id__in=soc_ids)\
.select_related('source', 'language__family', 'language__iso_code', 'region')
if result_set.geographic_regions:
soc_query = soc_query.select_related('region')
if result_set.variable_descriptions:
soc_query = soc_query.prefetch_related(Prefetch(
'value_set',
to_attr='selected_cvalues',
queryset=models.Value.objects
# FIXME: this selects possibly too many values, in case there are multiple
# values for the same variable, not all of them matching the criteria.
.filter(variable_id__in=[v.variable.id for v in result_set.variable_descriptions])
.select_related('code')
.prefetch_related('references')))
if result_set.environmental_variables:
soc_query = soc_query.prefetch_related(Prefetch(
'value_set',
to_attr='selected_evalues',
queryset=models.Value.objects
.filter(variable_id__in=[v.id for v in result_set.environmental_variables])
.prefetch_related('references')))
for i, soc in enumerate(soc_query):
soc_result = serializers.SocietyResult(soc)
if result_set.variable_descriptions:
for cval in soc.selected_cvalues:
soc_result.variable_coded_values.add(cval)
if result_set.environmental_variables:
for eval in soc.selected_evalues:
soc_result.environmental_values.add(eval)
result_set.societies.add(soc_result)
log.info('mid 1: %s' % (time() - _s,))
# Filter the results to those that matched all criteria
#result_set.finalize(criteria)
log.info('mid 2: %s' % (time() - _s,))
return result_set
@api_view(['GET'])
@permission_classes((AllowAny,))
def trees_from_societies(request):
language_trees = []
for k, v in request.query_params.lists():
soc_ids = v
labels = models.LanguageTreeLabels.objects.filter(societies__id__in=soc_ids).all()
global_tree = None
global_newick = []
global_isolates = []
for t in models.LanguageTree.objects\
.filter(taxa__societies__id__in=soc_ids)\
.prefetch_related(
'taxa__languagetreelabelssequence_set__labels',
'taxa__languagetreelabelssequence_set__society',
)\
.distinct():
if 'global' in t.name:
global_tree = t
# TODO ask @Bibiko once the isolates are in the db under global.tree as string: isol1,isol2,isol3,...
# global_isolates.extend(t.newick_string.split(','))
global_isolates.extend(['alse1251','amas1236','bana1292','calu1239','chim1301','chit1248','chon1248','coah1252','coos1249','furr1244','gaga1251','guai1237','guat1253','hadz1240','high1242','kara1289','karo1304','klam1254','kute1249','lara1258','mull1237','natc1249','nort2938','paez1247','pume1238','pura1257','pure1242','sali1253','sand1273','seri1257','shom1245','sius1254','sout1439','take1257','ticu1245','timu1245','tiwi1244','toll1241','trum1247','uruu1244','wara1303','wash1253','yama1264','yuch1247','zuni1245'])
else:
if update_newick(t, labels):
language_trees.append(t)
if 'glotto' in t.name:
#remove last ; in order to be able to join the trees
global_newick.append(t.newick_string[:-1])
if global_tree:
langs_in_tree = [str(l.label) for l in labels]
#add isolates if present in current selection
[global_newick.append('(' + isolate + ':1)') for isolate in global_isolates if isolate in langs_in_tree]
#join all pruned glottolog trees into the global one
global_tree.newick_string = '(' + ','.join(global_newick) + ');'
language_trees.append(global_tree)
return Response(serializers.LanguageTreeSerializer(language_trees, many=True).data)
@api_view(['GET'])
@permission_classes((AllowAny,))
def find_societies(request):
"""
View to find the societies that match an input request. Currently expects
{ language_filters: [{language_ids: [1,2,3]}], variable_codes: [4,5,6...],
environmental_filters: [{id: 1, operator: 'gt', params: [0.0]},
{id:3, operator 'inrange', params: [10.0,20.0] }] }
Returns serialized collection of SocietyResult objects
"""
from time import time
from django.db import connection
s = time()
log.info('%s find_societies 1: %s queries' % (time() - s, len(connection.queries)))
query = {}
if 'name' in request.query_params:
result_set = serializers.SocietyResultSet()
q = request.query_params['name']
if q:
soc = models.Society.objects.filter(
Q(name__icontains=q) | Q(alternate_names__unaccent__icontains=q))
for s in soc:
if s.value_set.count():
result_set.societies.add(serializers.SocietyResult(s))
return Response(serializers.SocietyResultSetSerializer(result_set).data)
for k, v in request.query_params.lists():
if str(k) == 'c':
query[k] = v
else:
query[k] = [json.loads(vv) for vv in v]
result_set = result_set_from_query_dict(query)
log.info('%s find_societies 2: %s queries' % (time() - s, len(connection.queries)))
d = serializers.SocietyResultSetSerializer(result_set).data
log.info('%s find_societies 3: %s queries' % (time() - s, len(connection.queries)))
for i, q in enumerate(
sorted(connection.queries, key=lambda q: q['time'], reverse=True)):
if 10 < i < 20: # pragma: no cover
log.info('%s for %s' % (q['time'], q['sql'][:500]))
return Response(d)
@api_view(['GET'])
@permission_classes((AllowAny,))
def get_categories(request):
"""
Filters categories for sources, as some categories are empty for some sources
"""
query_dict = get_query_from_json(request)
categories = models.Category.objects.filter(type='cultural')
source_categories = []
if 'source' in query_dict:
source = models.Source.objects.filter(id=query_dict['source'])
variables = models.Variable.objects.filter(source=source)
for c in categories:
if variables.filter(index_categories=c.id):
source_categories.append(c)
return Response(
serializers.CategorySerializer(source_categories, many=True).data)
return Response(serializers.CategorySerializer(categories, many=True).data)
@api_view(['GET'])
@permission_classes((AllowAny,))
def get_dataset_sources(request):
return Response(
serializers.SourceSerializer(
models.Source.objects.all().exclude(name=""), many=True).data)
class GeographicRegionViewSet(viewsets.ReadOnlyModelViewSet):
serializer_class = serializers.GeographicRegionSerializer
model = models.GeographicRegion
filter_class = GeographicRegionFilter
queryset = models.GeographicRegion.objects.all()
@api_view(['GET'])
@permission_classes((AllowAny,))
@renderer_classes((JSONRenderer,))
def get_min_and_max(request):
res = {}
varid = get_query_from_json(request).get('environmental_id')
if varid:
values = [
v.coded_value_float for v in models.Value.objects.filter(variable__id=varid)
if v.coded_value_float is not None]
vmin = min(values) if values else 0.0
vmax = max(values) if values else 0.0
res = {'min': format(vmin, '.4f'), 'max': format(vmax, '.4f')}
return Response(res)
@api_view(['GET'])
@permission_classes((AllowAny,))
@renderer_classes((JSONRenderer,))
def bin_cont_data(request): # MAKE THIS GENERIC
bf_id = get_query_from_json(request).get('bf_id')
bins = []
if bf_id:
values = models.Value.objects.filter(variable__id=bf_id)
min_value = None
max_value = 0.0
missing_data_option = False
for v in values:
if re.search('[a-zA-Z]', v.coded_value):
if not missing_data_option:
bins.append({
'code': v.coded_value,
'description': v.code.description,
'variable': bf_id,
})
missing_data_option = True
continue
else:
v.coded_value = v.coded_value.replace(',', '')
if min_value is None:
min_value = float(v.coded_value)
elif float(v.coded_value) < min_value:
min_value = float(v.coded_value)
elif float(v.coded_value) > max_value:
max_value = float(v.coded_value)
min_value = min_value or 0.0 # This is the case when there are no values!
data_range = max_value - min_value
bin_size = data_range / 5
min_bin = min_value
for x in range(0, 5):
min = min_bin
max = min_bin + bin_size
bins.append({
'code': x,
'description': str(min) + ' - ' + str(max),
'min': min_bin,
'max': min_bin + bin_size,
'variable': bf_id,
})
min_bin = min_bin + bin_size + 1
return Response(bins)
@api_view(['GET'])
@permission_classes((AllowAny,))
@renderer_classes((DPLACECSVRenderer,))
def csv_download(request):
query_dict = get_query_from_json(request)
result_set = result_set_from_query_dict(query_dict)
response = Response(serializers.SocietyResultSetSerializer(result_set).data)
filename = "dplace-societies-%s.csv" % datetime.datetime.now().strftime("%Y-%m-%d")
response['Content-Disposition'] = 'attachment; filename="%s"' % filename
return response
| mit | 6,217,632,777,301,787,000 | 39.819549 | 536 | 0.611945 | false | 3.708967 | false | false | false |
vjmac15/Lyilis | lib/youtube_dl/extractor/aparat.py | 15 | 2195 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
int_or_none,
mimetype2ext,
)
class AparatIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?aparat\.com/(?:v/|video/video/embed/videohash/)(?P<id>[a-zA-Z0-9]+)'
_TEST = {
'url': 'http://www.aparat.com/v/wP8On',
'md5': '131aca2e14fe7c4dcb3c4877ba300c89',
'info_dict': {
'id': 'wP8On',
'ext': 'mp4',
'title': 'تیم گلکسی 11 - زومیت',
'age_limit': 0,
},
# 'skip': 'Extremely unreliable',
}
def _real_extract(self, url):
video_id = self._match_id(url)
# Note: There is an easier-to-parse configuration at
# http://www.aparat.com/video/video/config/videohash/%video_id
# but the URL in there does not work
webpage = self._download_webpage(
'http://www.aparat.com/video/video/embed/vt/frame/showvideo/yes/videohash/' + video_id,
video_id)
title = self._search_regex(r'\s+title:\s*"([^"]+)"', webpage, 'title')
file_list = self._parse_json(
self._search_regex(
r'fileList\s*=\s*JSON\.parse\(\'([^\']+)\'\)', webpage,
'file list'),
video_id)
formats = []
for item in file_list[0]:
file_url = item.get('file')
if not file_url:
continue
ext = mimetype2ext(item.get('type'))
label = item.get('label')
formats.append({
'url': file_url,
'ext': ext,
'format_id': label or ext,
'height': int_or_none(self._search_regex(
r'(\d+)[pP]', label or '', 'height', default=None)),
})
self._sort_formats(formats)
thumbnail = self._search_regex(
r'image:\s*"([^"]+)"', webpage, 'thumbnail', fatal=False)
return {
'id': video_id,
'title': title,
'thumbnail': thumbnail,
'age_limit': self._family_friendly_search(webpage),
'formats': formats,
}
| gpl-3.0 | -1,589,022,954,366,422,800 | 30.623188 | 106 | 0.499542 | false | 3.4912 | false | false | false |
denys-duchier/django | tests/file_uploads/tests.py | 45 | 23742 | import base64
import hashlib
import os
import shutil
import sys
import tempfile as sys_tempfile
import unittest
from io import BytesIO, StringIO
from urllib.parse import quote
from django.core.files import temp as tempfile
from django.core.files.uploadedfile import SimpleUploadedFile
from django.http.multipartparser import MultiPartParser, parse_header
from django.test import SimpleTestCase, TestCase, client, override_settings
from django.utils.encoding import force_bytes
from . import uploadhandler
from .models import FileModel
UNICODE_FILENAME = 'test-0123456789_中文_Orléans.jpg'
MEDIA_ROOT = sys_tempfile.mkdtemp()
UPLOAD_TO = os.path.join(MEDIA_ROOT, 'test_upload')
@override_settings(MEDIA_ROOT=MEDIA_ROOT, ROOT_URLCONF='file_uploads.urls', MIDDLEWARE=[])
class FileUploadTests(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
if not os.path.isdir(MEDIA_ROOT):
os.makedirs(MEDIA_ROOT)
@classmethod
def tearDownClass(cls):
shutil.rmtree(MEDIA_ROOT)
super().tearDownClass()
def test_simple_upload(self):
with open(__file__, 'rb') as fp:
post_data = {
'name': 'Ringo',
'file_field': fp,
}
response = self.client.post('/upload/', post_data)
self.assertEqual(response.status_code, 200)
def test_large_upload(self):
file = tempfile.NamedTemporaryFile
with file(suffix=".file1") as file1, file(suffix=".file2") as file2:
file1.write(b'a' * (2 ** 21))
file1.seek(0)
file2.write(b'a' * (10 * 2 ** 20))
file2.seek(0)
post_data = {
'name': 'Ringo',
'file_field1': file1,
'file_field2': file2,
}
for key in list(post_data):
try:
post_data[key + '_hash'] = hashlib.sha1(post_data[key].read()).hexdigest()
post_data[key].seek(0)
except AttributeError:
post_data[key + '_hash'] = hashlib.sha1(force_bytes(post_data[key])).hexdigest()
response = self.client.post('/verify/', post_data)
self.assertEqual(response.status_code, 200)
def _test_base64_upload(self, content, encode=base64.b64encode):
payload = client.FakePayload("\r\n".join([
'--' + client.BOUNDARY,
'Content-Disposition: form-data; name="file"; filename="test.txt"',
'Content-Type: application/octet-stream',
'Content-Transfer-Encoding: base64',
'']))
payload.write(b"\r\n" + encode(force_bytes(content)) + b"\r\n")
payload.write('--' + client.BOUNDARY + '--\r\n')
r = {
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': client.MULTIPART_CONTENT,
'PATH_INFO': "/echo_content/",
'REQUEST_METHOD': 'POST',
'wsgi.input': payload,
}
response = self.client.request(**r)
self.assertEqual(response.json()['file'], content)
def test_base64_upload(self):
self._test_base64_upload("This data will be transmitted base64-encoded.")
def test_big_base64_upload(self):
self._test_base64_upload("Big data" * 68000) # > 512Kb
def test_big_base64_newlines_upload(self):
self._test_base64_upload("Big data" * 68000, encode=base64.encodebytes)
def test_unicode_file_name(self):
with sys_tempfile.TemporaryDirectory() as temp_dir:
# This file contains Chinese symbols and an accented char in the name.
with open(os.path.join(temp_dir, UNICODE_FILENAME), 'w+b') as file1:
file1.write(b'b' * (2 ** 10))
file1.seek(0)
response = self.client.post('/unicode_name/', {'file_unicode': file1})
self.assertEqual(response.status_code, 200)
def test_unicode_file_name_rfc2231(self):
"""
Test receiving file upload when filename is encoded with RFC2231
(#22971).
"""
payload = client.FakePayload()
payload.write('\r\n'.join([
'--' + client.BOUNDARY,
'Content-Disposition: form-data; name="file_unicode"; filename*=UTF-8\'\'%s' % quote(UNICODE_FILENAME),
'Content-Type: application/octet-stream',
'',
'You got pwnd.\r\n',
'\r\n--' + client.BOUNDARY + '--\r\n'
]))
r = {
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': client.MULTIPART_CONTENT,
'PATH_INFO': "/unicode_name/",
'REQUEST_METHOD': 'POST',
'wsgi.input': payload,
}
response = self.client.request(**r)
self.assertEqual(response.status_code, 200)
def test_unicode_name_rfc2231(self):
"""
Test receiving file upload when filename is encoded with RFC2231
(#22971).
"""
payload = client.FakePayload()
payload.write(
'\r\n'.join([
'--' + client.BOUNDARY,
'Content-Disposition: form-data; name*=UTF-8\'\'file_unicode; filename*=UTF-8\'\'%s' % quote(
UNICODE_FILENAME
),
'Content-Type: application/octet-stream',
'',
'You got pwnd.\r\n',
'\r\n--' + client.BOUNDARY + '--\r\n'
])
)
r = {
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': client.MULTIPART_CONTENT,
'PATH_INFO': "/unicode_name/",
'REQUEST_METHOD': 'POST',
'wsgi.input': payload,
}
response = self.client.request(**r)
self.assertEqual(response.status_code, 200)
def test_blank_filenames(self):
"""
Receiving file upload when filename is blank (before and after
sanitization) should be okay.
"""
# The second value is normalized to an empty name by
# MultiPartParser.IE_sanitize()
filenames = ['', 'C:\\Windows\\']
payload = client.FakePayload()
for i, name in enumerate(filenames):
payload.write('\r\n'.join([
'--' + client.BOUNDARY,
'Content-Disposition: form-data; name="file%s"; filename="%s"' % (i, name),
'Content-Type: application/octet-stream',
'',
'You got pwnd.\r\n'
]))
payload.write('\r\n--' + client.BOUNDARY + '--\r\n')
r = {
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': client.MULTIPART_CONTENT,
'PATH_INFO': '/echo/',
'REQUEST_METHOD': 'POST',
'wsgi.input': payload,
}
response = self.client.request(**r)
self.assertEqual(response.status_code, 200)
# Empty filenames should be ignored
received = response.json()
for i, name in enumerate(filenames):
self.assertIsNone(received.get('file%s' % i))
def test_dangerous_file_names(self):
"""Uploaded file names should be sanitized before ever reaching the view."""
# This test simulates possible directory traversal attacks by a
# malicious uploader We have to do some monkeybusiness here to construct
# a malicious payload with an invalid file name (containing os.sep or
# os.pardir). This similar to what an attacker would need to do when
# trying such an attack.
scary_file_names = [
"/tmp/hax0rd.txt", # Absolute path, *nix-style.
"C:\\Windows\\hax0rd.txt", # Absolute path, win-style.
"C:/Windows/hax0rd.txt", # Absolute path, broken-style.
"\\tmp\\hax0rd.txt", # Absolute path, broken in a different way.
"/tmp\\hax0rd.txt", # Absolute path, broken by mixing.
"subdir/hax0rd.txt", # Descendant path, *nix-style.
"subdir\\hax0rd.txt", # Descendant path, win-style.
"sub/dir\\hax0rd.txt", # Descendant path, mixed.
"../../hax0rd.txt", # Relative path, *nix-style.
"..\\..\\hax0rd.txt", # Relative path, win-style.
"../..\\hax0rd.txt" # Relative path, mixed.
]
payload = client.FakePayload()
for i, name in enumerate(scary_file_names):
payload.write('\r\n'.join([
'--' + client.BOUNDARY,
'Content-Disposition: form-data; name="file%s"; filename="%s"' % (i, name),
'Content-Type: application/octet-stream',
'',
'You got pwnd.\r\n'
]))
payload.write('\r\n--' + client.BOUNDARY + '--\r\n')
r = {
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': client.MULTIPART_CONTENT,
'PATH_INFO': "/echo/",
'REQUEST_METHOD': 'POST',
'wsgi.input': payload,
}
response = self.client.request(**r)
# The filenames should have been sanitized by the time it got to the view.
received = response.json()
for i, name in enumerate(scary_file_names):
got = received["file%s" % i]
self.assertEqual(got, "hax0rd.txt")
def test_filename_overflow(self):
"""File names over 256 characters (dangerous on some platforms) get fixed up."""
long_str = 'f' * 300
cases = [
# field name, filename, expected
('long_filename', '%s.txt' % long_str, '%s.txt' % long_str[:251]),
('long_extension', 'foo.%s' % long_str, '.%s' % long_str[:254]),
('no_extension', long_str, long_str[:255]),
('no_filename', '.%s' % long_str, '.%s' % long_str[:254]),
('long_everything', '%s.%s' % (long_str, long_str), '.%s' % long_str[:254]),
]
payload = client.FakePayload()
for name, filename, _ in cases:
payload.write("\r\n".join([
'--' + client.BOUNDARY,
'Content-Disposition: form-data; name="{}"; filename="{}"',
'Content-Type: application/octet-stream',
'',
'Oops.',
''
]).format(name, filename))
payload.write('\r\n--' + client.BOUNDARY + '--\r\n')
r = {
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': client.MULTIPART_CONTENT,
'PATH_INFO': "/echo/",
'REQUEST_METHOD': 'POST',
'wsgi.input': payload,
}
response = self.client.request(**r)
result = response.json()
for name, _, expected in cases:
got = result[name]
self.assertEqual(expected, got, 'Mismatch for {}'.format(name))
self.assertLess(len(got), 256,
"Got a long file name (%s characters)." % len(got))
def test_file_content(self):
file = tempfile.NamedTemporaryFile
with file(suffix=".ctype_extra") as no_content_type, file(suffix=".ctype_extra") as simple_file:
no_content_type.write(b'no content')
no_content_type.seek(0)
simple_file.write(b'text content')
simple_file.seek(0)
simple_file.content_type = 'text/plain'
string_io = StringIO('string content')
bytes_io = BytesIO(b'binary content')
response = self.client.post('/echo_content/', {
'no_content_type': no_content_type,
'simple_file': simple_file,
'string': string_io,
'binary': bytes_io,
})
received = response.json()
self.assertEqual(received['no_content_type'], 'no content')
self.assertEqual(received['simple_file'], 'text content')
self.assertEqual(received['string'], 'string content')
self.assertEqual(received['binary'], 'binary content')
def test_content_type_extra(self):
"""Uploaded files may have content type parameters available."""
file = tempfile.NamedTemporaryFile
with file(suffix=".ctype_extra") as no_content_type, file(suffix=".ctype_extra") as simple_file:
no_content_type.write(b'something')
no_content_type.seek(0)
simple_file.write(b'something')
simple_file.seek(0)
simple_file.content_type = 'text/plain; test-key=test_value'
response = self.client.post('/echo_content_type_extra/', {
'no_content_type': no_content_type,
'simple_file': simple_file,
})
received = response.json()
self.assertEqual(received['no_content_type'], {})
self.assertEqual(received['simple_file'], {'test-key': 'test_value'})
def test_truncated_multipart_handled_gracefully(self):
"""
If passed an incomplete multipart message, MultiPartParser does not
attempt to read beyond the end of the stream, and simply will handle
the part that can be parsed gracefully.
"""
payload_str = "\r\n".join([
'--' + client.BOUNDARY,
'Content-Disposition: form-data; name="file"; filename="foo.txt"',
'Content-Type: application/octet-stream',
'',
'file contents'
'--' + client.BOUNDARY + '--',
'',
])
payload = client.FakePayload(payload_str[:-10])
r = {
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': client.MULTIPART_CONTENT,
'PATH_INFO': '/echo/',
'REQUEST_METHOD': 'POST',
'wsgi.input': payload,
}
self.assertEqual(self.client.request(**r).json(), {})
def test_empty_multipart_handled_gracefully(self):
"""
If passed an empty multipart message, MultiPartParser will return
an empty QueryDict.
"""
r = {
'CONTENT_LENGTH': 0,
'CONTENT_TYPE': client.MULTIPART_CONTENT,
'PATH_INFO': '/echo/',
'REQUEST_METHOD': 'POST',
'wsgi.input': client.FakePayload(b''),
}
self.assertEqual(self.client.request(**r).json(), {})
def test_custom_upload_handler(self):
file = tempfile.NamedTemporaryFile
with file() as smallfile, file() as bigfile:
# A small file (under the 5M quota)
smallfile.write(b'a' * (2 ** 21))
smallfile.seek(0)
# A big file (over the quota)
bigfile.write(b'a' * (10 * 2 ** 20))
bigfile.seek(0)
# Small file posting should work.
self.assertIn('f', self.client.post('/quota/', {'f': smallfile}).json())
# Large files don't go through.
self.assertNotIn('f', self.client.post("/quota/", {'f': bigfile}).json())
def test_broken_custom_upload_handler(self):
with tempfile.NamedTemporaryFile() as file:
file.write(b'a' * (2 ** 21))
file.seek(0)
# AttributeError: You cannot alter upload handlers after the upload has been processed.
with self.assertRaises(AttributeError):
self.client.post('/quota/broken/', {'f': file})
def test_fileupload_getlist(self):
file = tempfile.NamedTemporaryFile
with file() as file1, file() as file2, file() as file2a:
file1.write(b'a' * (2 ** 23))
file1.seek(0)
file2.write(b'a' * (2 * 2 ** 18))
file2.seek(0)
file2a.write(b'a' * (5 * 2 ** 20))
file2a.seek(0)
response = self.client.post('/getlist_count/', {
'file1': file1,
'field1': 'test',
'field2': 'test3',
'field3': 'test5',
'field4': 'test6',
'field5': 'test7',
'file2': (file2, file2a)
})
got = response.json()
self.assertEqual(got.get('file1'), 1)
self.assertEqual(got.get('file2'), 2)
def test_fileuploads_closed_at_request_end(self):
file = tempfile.NamedTemporaryFile
with file() as f1, file() as f2a, file() as f2b:
response = self.client.post('/fd_closing/t/', {
'file': f1,
'file2': (f2a, f2b),
})
request = response.wsgi_request
# The files were parsed.
self.assertTrue(hasattr(request, '_files'))
file = request._files['file']
self.assertTrue(file.closed)
files = request._files.getlist('file2')
self.assertTrue(files[0].closed)
self.assertTrue(files[1].closed)
def test_no_parsing_triggered_by_fd_closing(self):
file = tempfile.NamedTemporaryFile
with file() as f1, file() as f2a, file() as f2b:
response = self.client.post('/fd_closing/f/', {
'file': f1,
'file2': (f2a, f2b),
})
request = response.wsgi_request
# The fd closing logic doesn't trigger parsing of the stream
self.assertFalse(hasattr(request, '_files'))
def test_file_error_blocking(self):
"""
The server should not block when there are upload errors (bug #8622).
This can happen if something -- i.e. an exception handler -- tries to
access POST while handling an error in parsing POST. This shouldn't
cause an infinite loop!
"""
class POSTAccessingHandler(client.ClientHandler):
"""A handler that'll access POST during an exception."""
def handle_uncaught_exception(self, request, resolver, exc_info):
ret = super().handle_uncaught_exception(request, resolver, exc_info)
request.POST # evaluate
return ret
# Maybe this is a little more complicated that it needs to be; but if
# the django.test.client.FakePayload.read() implementation changes then
# this test would fail. So we need to know exactly what kind of error
# it raises when there is an attempt to read more than the available bytes:
try:
client.FakePayload(b'a').read(2)
except Exception as err:
reference_error = err
# install the custom handler that tries to access request.POST
self.client.handler = POSTAccessingHandler()
with open(__file__, 'rb') as fp:
post_data = {
'name': 'Ringo',
'file_field': fp,
}
try:
self.client.post('/upload_errors/', post_data)
except reference_error.__class__ as err:
self.assertFalse(
str(err) == str(reference_error),
"Caught a repeated exception that'll cause an infinite loop in file uploads."
)
except Exception as err:
# CustomUploadError is the error that should have been raised
self.assertEqual(err.__class__, uploadhandler.CustomUploadError)
def test_filename_case_preservation(self):
"""
The storage backend shouldn't mess with the case of the filenames
uploaded.
"""
# Synthesize the contents of a file upload with a mixed case filename
# so we don't have to carry such a file in the Django tests source code
# tree.
vars = {'boundary': 'oUrBoUnDaRyStRiNg'}
post_data = [
'--%(boundary)s',
'Content-Disposition: form-data; name="file_field"; filename="MiXeD_cAsE.txt"',
'Content-Type: application/octet-stream',
'',
'file contents\n'
'',
'--%(boundary)s--\r\n',
]
response = self.client.post(
'/filename_case/',
'\r\n'.join(post_data) % vars,
'multipart/form-data; boundary=%(boundary)s' % vars
)
self.assertEqual(response.status_code, 200)
id = int(response.content)
obj = FileModel.objects.get(pk=id)
# The name of the file uploaded and the file stored in the server-side
# shouldn't differ.
self.assertEqual(os.path.basename(obj.testfile.path), 'MiXeD_cAsE.txt')
@override_settings(MEDIA_ROOT=MEDIA_ROOT)
class DirectoryCreationTests(SimpleTestCase):
"""
Tests for error handling during directory creation
via _save_FIELD_file (ticket #6450)
"""
@classmethod
def setUpClass(cls):
super().setUpClass()
if not os.path.isdir(MEDIA_ROOT):
os.makedirs(MEDIA_ROOT)
@classmethod
def tearDownClass(cls):
shutil.rmtree(MEDIA_ROOT)
super().tearDownClass()
def setUp(self):
self.obj = FileModel()
@unittest.skipIf(sys.platform == 'win32', "Python on Windows doesn't have working os.chmod().")
def test_readonly_root(self):
"""Permission errors are not swallowed"""
os.chmod(MEDIA_ROOT, 0o500)
self.addCleanup(os.chmod, MEDIA_ROOT, 0o700)
with self.assertRaises(PermissionError):
self.obj.testfile.save('foo.txt', SimpleUploadedFile('foo.txt', b'x'), save=False)
def test_not_a_directory(self):
"""The correct IOError is raised when the upload directory name exists but isn't a directory"""
# Create a file with the upload directory name
open(UPLOAD_TO, 'wb').close()
self.addCleanup(os.remove, UPLOAD_TO)
with self.assertRaises(IOError) as exc_info:
with SimpleUploadedFile('foo.txt', b'x') as file:
self.obj.testfile.save('foo.txt', file, save=False)
# The test needs to be done on a specific string as IOError
# is raised even without the patch (just not early enough)
self.assertEqual(exc_info.exception.args[0], "%s exists and is not a directory." % UPLOAD_TO)
class MultiParserTests(unittest.TestCase):
def test_empty_upload_handlers(self):
# We're not actually parsing here; just checking if the parser properly
# instantiates with empty upload handlers.
MultiPartParser({
'CONTENT_TYPE': 'multipart/form-data; boundary=_foo',
'CONTENT_LENGTH': '1'
}, StringIO('x'), [], 'utf-8')
def test_rfc2231_parsing(self):
test_data = (
(b"Content-Type: application/x-stuff; title*=us-ascii'en-us'This%20is%20%2A%2A%2Afun%2A%2A%2A",
"This is ***fun***"),
(b"Content-Type: application/x-stuff; title*=UTF-8''foo-%c3%a4.html",
"foo-ä.html"),
(b"Content-Type: application/x-stuff; title*=iso-8859-1''foo-%E4.html",
"foo-ä.html"),
)
for raw_line, expected_title in test_data:
parsed = parse_header(raw_line)
self.assertEqual(parsed[1]['title'], expected_title)
def test_rfc2231_wrong_title(self):
"""
Test wrongly formatted RFC 2231 headers (missing double single quotes).
Parsing should not crash (#24209).
"""
test_data = (
(b"Content-Type: application/x-stuff; title*='This%20is%20%2A%2A%2Afun%2A%2A%2A",
b"'This%20is%20%2A%2A%2Afun%2A%2A%2A"),
(b"Content-Type: application/x-stuff; title*='foo.html",
b"'foo.html"),
(b"Content-Type: application/x-stuff; title*=bar.html",
b"bar.html"),
)
for raw_line, expected_title in test_data:
parsed = parse_header(raw_line)
self.assertEqual(parsed[1]['title'], expected_title)
| bsd-3-clause | -218,260,698,169,522,560 | 38.558333 | 115 | 0.554624 | false | 3.972385 | true | false | false |
onyxfish/agate | agate/table/bar_chart.py | 3 | 1303 | #!/usr/bin/env python
# pylint: disable=W0212
import leather
from agate import utils
def bar_chart(self, label=0, value=1, path=None, width=None, height=None):
"""
Render a bar chart using :class:`leather.Chart`.
:param label:
The name or index of a column to plot as the labels of the chart.
Defaults to the first column in the table.
:param value:
The name or index of a column to plot as the values of the chart.
Defaults to the second column in the table.
:param path:
If specified, the resulting SVG will be saved to this location. If
:code:`None` and running in IPython, then the SVG will be rendered
inline. Otherwise, the SVG data will be returned as a string.
:param width:
The width of the output SVG.
:param height:
The height of the output SVG.
"""
if type(label) is int:
label_name = self.column_names[label]
else:
label_name = label
if type(value) is int:
value_name = self.column_names[value]
else:
value_name = value
chart = leather.Chart()
chart.add_x_axis(name=value_name)
chart.add_y_axis(name=label_name)
chart.add_bars(self, x=value, y=label)
return chart.to_svg(path=path, width=width, height=height)
| mit | 4,765,904,349,357,894,000 | 29.302326 | 74 | 0.646201 | false | 3.670423 | false | false | false |
heiscsy/evolutus_ros_src | evolutus_filter/build/catkin_generated/installspace/_setup_util.py | 4 | 12275 | #!/usr/bin/python
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
'''This file generates shell code for the setup.SHELL scripts to set environment variables'''
from __future__ import print_function
import argparse
import copy
import errno
import os
import platform
import sys
CATKIN_MARKER_FILE = '.catkin'
system = platform.system()
IS_DARWIN = (system == 'Darwin')
IS_WINDOWS = (system == 'Windows')
# subfolder of workspace prepended to CMAKE_PREFIX_PATH
ENV_VAR_SUBFOLDERS = {
'CMAKE_PREFIX_PATH': '',
'CPATH': 'include',
'LD_LIBRARY_PATH' if not IS_DARWIN else 'DYLD_LIBRARY_PATH': ['lib', os.path.join('lib', 'x86_64-linux-gnu')],
'PATH': 'bin',
'PKG_CONFIG_PATH': [os.path.join('lib', 'pkgconfig'), os.path.join('lib', 'x86_64-linux-gnu', 'pkgconfig')],
'PYTHONPATH': 'lib/python2.7/dist-packages',
}
def rollback_env_variables(environ, env_var_subfolders):
'''
Generate shell code to reset environment variables
by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH.
This does not cover modifications performed by environment hooks.
'''
lines = []
unmodified_environ = copy.copy(environ)
for key in sorted(env_var_subfolders.keys()):
subfolders = env_var_subfolders[key]
if not isinstance(subfolders, list):
subfolders = [subfolders]
for subfolder in subfolders:
value = _rollback_env_variable(unmodified_environ, key, subfolder)
if value is not None:
environ[key] = value
lines.append(assignment(key, value))
if lines:
lines.insert(0, comment('reset environment variables by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH'))
return lines
def _rollback_env_variable(environ, name, subfolder):
'''
For each catkin workspace in CMAKE_PREFIX_PATH remove the first entry from env[NAME] matching workspace + subfolder.
:param subfolder: str '' or subfoldername that may start with '/'
:returns: the updated value of the environment variable.
'''
value = environ[name] if name in environ else ''
env_paths = [path for path in value.split(os.pathsep) if path]
value_modified = False
if subfolder:
if subfolder.startswith(os.path.sep) or (os.path.altsep and subfolder.startswith(os.path.altsep)):
subfolder = subfolder[1:]
if subfolder.endswith(os.path.sep) or (os.path.altsep and subfolder.endswith(os.path.altsep)):
subfolder = subfolder[:-1]
for ws_path in _get_workspaces(environ, include_fuerte=True, include_non_existing=True):
path_to_find = os.path.join(ws_path, subfolder) if subfolder else ws_path
path_to_remove = None
for env_path in env_paths:
env_path_clean = env_path[:-1] if env_path and env_path[-1] in [os.path.sep, os.path.altsep] else env_path
if env_path_clean == path_to_find:
path_to_remove = env_path
break
if path_to_remove:
env_paths.remove(path_to_remove)
value_modified = True
new_value = os.pathsep.join(env_paths)
return new_value if value_modified else None
def _get_workspaces(environ, include_fuerte=False, include_non_existing=False):
'''
Based on CMAKE_PREFIX_PATH return all catkin workspaces.
:param include_fuerte: The flag if paths starting with '/opt/ros/fuerte' should be considered workspaces, ``bool``
'''
# get all cmake prefix paths
env_name = 'CMAKE_PREFIX_PATH'
value = environ[env_name] if env_name in environ else ''
paths = [path for path in value.split(os.pathsep) if path]
# remove non-workspace paths
workspaces = [path for path in paths if os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE)) or (include_fuerte and path.startswith('/opt/ros/fuerte')) or (include_non_existing and not os.path.exists(path))]
return workspaces
def prepend_env_variables(environ, env_var_subfolders, workspaces):
'''
Generate shell code to prepend environment variables
for the all workspaces.
'''
lines = []
lines.append(comment('prepend folders of workspaces to environment variables'))
paths = [path for path in workspaces.split(os.pathsep) if path]
prefix = _prefix_env_variable(environ, 'CMAKE_PREFIX_PATH', paths, '')
lines.append(prepend(environ, 'CMAKE_PREFIX_PATH', prefix))
for key in sorted([key for key in env_var_subfolders.keys() if key != 'CMAKE_PREFIX_PATH']):
subfolder = env_var_subfolders[key]
prefix = _prefix_env_variable(environ, key, paths, subfolder)
lines.append(prepend(environ, key, prefix))
return lines
def _prefix_env_variable(environ, name, paths, subfolders):
'''
Return the prefix to prepend to the environment variable NAME, adding any path in NEW_PATHS_STR without creating duplicate or empty items.
'''
value = environ[name] if name in environ else ''
environ_paths = [path for path in value.split(os.pathsep) if path]
checked_paths = []
for path in paths:
if not isinstance(subfolders, list):
subfolders = [subfolders]
for subfolder in subfolders:
path_tmp = path
if subfolder:
path_tmp = os.path.join(path_tmp, subfolder)
# exclude any path already in env and any path we already added
if path_tmp not in environ_paths and path_tmp not in checked_paths:
checked_paths.append(path_tmp)
prefix_str = os.pathsep.join(checked_paths)
if prefix_str != '' and environ_paths:
prefix_str += os.pathsep
return prefix_str
def assignment(key, value):
if not IS_WINDOWS:
return 'export %s="%s"' % (key, value)
else:
return 'set %s=%s' % (key, value)
def comment(msg):
if not IS_WINDOWS:
return '# %s' % msg
else:
return 'REM %s' % msg
def prepend(environ, key, prefix):
if key not in environ or not environ[key]:
return assignment(key, prefix)
if not IS_WINDOWS:
return 'export %s="%s$%s"' % (key, prefix, key)
else:
return 'set %s=%s%%%s%%' % (key, prefix, key)
def find_env_hooks(environ, cmake_prefix_path):
'''
Generate shell code with found environment hooks
for the all workspaces.
'''
lines = []
lines.append(comment('found environment hooks in workspaces'))
generic_env_hooks = []
generic_env_hooks_workspace = []
specific_env_hooks = []
specific_env_hooks_workspace = []
generic_env_hooks_by_filename = {}
specific_env_hooks_by_filename = {}
generic_env_hook_ext = 'bat' if IS_WINDOWS else 'sh'
specific_env_hook_ext = environ['CATKIN_SHELL'] if not IS_WINDOWS and 'CATKIN_SHELL' in environ and environ['CATKIN_SHELL'] else None
# remove non-workspace paths
workspaces = [path for path in cmake_prefix_path.split(os.pathsep) if path and os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE))]
for workspace in reversed(workspaces):
env_hook_dir = os.path.join(workspace, 'etc', 'catkin', 'profile.d')
if os.path.isdir(env_hook_dir):
for filename in sorted(os.listdir(env_hook_dir)):
if filename.endswith('.%s' % generic_env_hook_ext):
# remove previous env hook with same name if present
if filename in generic_env_hooks_by_filename:
i = generic_env_hooks.index(generic_env_hooks_by_filename[filename])
generic_env_hooks.pop(i)
generic_env_hooks_workspace.pop(i)
# append env hook
generic_env_hooks.append(os.path.join(env_hook_dir, filename))
generic_env_hooks_workspace.append(workspace)
generic_env_hooks_by_filename[filename] = generic_env_hooks[-1]
elif specific_env_hook_ext is not None and filename.endswith('.%s' % specific_env_hook_ext):
# remove previous env hook with same name if present
if filename in specific_env_hooks_by_filename:
i = specific_env_hooks.index(specific_env_hooks_by_filename[filename])
specific_env_hooks.pop(i)
specific_env_hooks_workspace.pop(i)
# append env hook
specific_env_hooks.append(os.path.join(env_hook_dir, filename))
specific_env_hooks_workspace.append(workspace)
specific_env_hooks_by_filename[filename] = specific_env_hooks[-1]
env_hooks = generic_env_hooks + specific_env_hooks
env_hooks_workspace = generic_env_hooks_workspace + specific_env_hooks_workspace
count = len(env_hooks)
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_COUNT', count))
for i in range(count):
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d' % i, env_hooks[i]))
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d_WORKSPACE' % i, env_hooks_workspace[i]))
return lines
def _parse_arguments(args=None):
parser = argparse.ArgumentParser(description='Generates code blocks for the setup.SHELL script.')
parser.add_argument('--extend', action='store_true', help='Skip unsetting previous environment variables to extend context')
return parser.parse_known_args(args=args)[0]
if __name__ == '__main__':
try:
try:
args = _parse_arguments()
except Exception as e:
print(e, file=sys.stderr)
sys.exit(1)
# environment at generation time
CMAKE_PREFIX_PATH = '/home/yuhan/catkin_ws/devel;/opt/ros/indigo'.split(';')
# prepend current workspace if not already part of CPP
base_path = os.path.dirname(__file__)
if base_path not in CMAKE_PREFIX_PATH:
CMAKE_PREFIX_PATH.insert(0, base_path)
CMAKE_PREFIX_PATH = os.pathsep.join(CMAKE_PREFIX_PATH)
environ = dict(os.environ)
lines = []
if not args.extend:
lines += rollback_env_variables(environ, ENV_VAR_SUBFOLDERS)
lines += prepend_env_variables(environ, ENV_VAR_SUBFOLDERS, CMAKE_PREFIX_PATH)
lines += find_env_hooks(environ, CMAKE_PREFIX_PATH)
print('\n'.join(lines))
# need to explicitly flush the output
sys.stdout.flush()
except IOError as e:
# and catch potantial "broken pipe" if stdout is not writable
# which can happen when piping the output to a file but the disk is full
if e.errno == errno.EPIPE:
print(e, file=sys.stderr)
sys.exit(2)
raise
sys.exit(0)
| gpl-2.0 | 4,278,920,657,189,255,000 | 41.770035 | 213 | 0.657515 | false | 3.812112 | false | false | false |
Subsets and Splits