repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
arcturusannamalai/Ezhil-Lang | ezhil/ezhil_transforms.py | 2 | 7334 | #!/usr/bin/python
# -*- coding: utf-8 -*-
##
## (C) 2015 Muthiah Annamalai
## Licensed under GPL Version 3
##
## Interpreter for Ezhil language
from __future__ import print_function
import sys
PYTHON3 = (sys.version[0] == '3')
if PYTHON3:
unicode = str
## Tx
from .transform import Visitor, TransformVisitor
from .ezhil_scanner import EzhilToken
## AST elements
from .ast import Expr, ExprCall, ExprList, Stmt, ReturnStmt, \
BreakStmt, ContinueStmt, ElseStmt, IfStmt, WhileStmt, \
ForStmt, AssignStmt, PrintStmt, EvalStmt, ArgList, \
ValueList, Function, StmtList, Identifier, Number, \
String, Boolean
from .errors import RuntimeException, SemanticException
class TransformEntryExitProfile(TransformVisitor):
def __init__(self, **kwargs):
TransformVisitor.__init__(self, **kwargs)
def visit_program_or_script(self, stmt_list):
l, c = 0, 0
stmt_list.dbg_msg(" add call : profile(\"begin\")")
begin = ValueList([String("begin")], l, c, self.debug)
call_profile_begin = ExprCall(Identifier("profile", l, c), begin, l, c,
self.debug)
stmt_list.List.insert(0, call_profile_begin)
stmt_list.dbg_msg(" add call : 'profile(\"results\")'")
results = ValueList([String("results")], l, c, self.debug)
call_profile_results = ExprCall(Identifier("profile", l, c), results,
l, c, self.debug)
stmt_list.append(call_profile_results)
return
class TransformSafeModeFunctionCheck(TransformVisitor):
def __init__(self, **kwargs):
self.forbidden_fcn_names = [u'raw_input',u'input',u'fopen',u'open',u'fclose',\
u'உள்ளீடு',u'turtle',u'கோப்பை_எழுது',u'கோப்பை_திற',u'கோப்பை_மூடு']
TransformVisitor.__init__(self, **kwargs)
def visit_expr_call(self, expr_call):
callee = expr_call.func_id.id
if callee in self.forbidden_fcn_names:
raise RuntimeException(
u"ERROR %s:\n\t %s may not be used in SAFE MODE ." %
(self.interpreter.get_fname(), unicode(expr_call)))
if expr_call.arglist:
expr_call.arglist.visit(self)
return
# Type checker for ezhil - rules list #65
class TransformSemanticAnalyzer(TransformVisitor):
def __init__(self, **kwargs):
TransformVisitor.__init__(self, **kwargs)
return
# Find a list of rules for type checking Ezhil AST.
# You may only add like types. I.e. (You may only add numbers or strings but never between each other)
# You may index arrays with only integers or numbers or dictionaries with Strings
# You can type check argument types, and number of builtin functions.
# You may type check arguments for number of args in a function call.
def visit_expr_call(self, expr_call):
callee = expr_call.func_id.id
if callee == u"__getitem__":
# T.B.D
pass
if expr_call.arglist:
expr_call.arglist.visit(self)
return
# check if the constants are on lhs of assignment statements
# check if the strings are added to numbers
# check ...
def visit_assign_stmt(self, assign_stmt):
if any(
map(lambda typename: isinstance(assign_stmt.lvalue, typename),
[Number, String, Boolean, Function])):
raise SemanticException(
"Cannot use number, string, constant or functions on LHS of assignment %s"
% unicode(assign_stmt))
if assign_stmt.lvalue:
assign_stmt.lvalue.visit(self)
if assign_stmt.rvalue:
assign_stmt.rvalue.visit(self)
return
def visit_binary_expr(self, binexpr):
lhs_is_string = isinstance(binexpr.term, String)
rhs_is_string = isinstance(binexpr.next_expr, String)
lhs_id_expr_call = isinstance(binexpr.term, ExprCall) or isinstance(
binexpr.term, Identifier)
rhs_id_expr_call = isinstance(binexpr.next_expr,
ExprCall) or isinstance(
binexpr.next_expr, Identifier)
if isinstance(binexpr.next_expr, Expr):
binexpr.next_expr.visit(self)
return
binexpr.term.visit(self)
if binexpr.binop.kind != EzhilToken.PLUS:
if lhs_is_string or rhs_is_string:
if binexpr.binop.kind in EzhilToken.COMPARE or binexpr.binop.kind == EzhilToken.PROD:
pass
else:
raise SemanticException(
"Cannot use string with operators other than '+','>=','<=','!=','==','>','<' or '*' at expression %s %s"
% (unicode(binexpr), binexpr.get_pos()))
else:
if lhs_is_string or rhs_is_string:
if not ((lhs_is_string and rhs_is_string) or \
(lhs_is_string and rhs_id_expr_call) or \
(rhs_is_string and lhs_id_expr_call)):
raise SemanticException(
"Cannot join strings and expression at expression %s" %
unicode(binexpr))
return
def visit_import(self, importstmt):
if not isinstance(importstmt.filename, String):
raise SemanticException(
"Import statement should be a string at time of interpretation at %s"
% unicode(importstmt))
return
class TransformConstantFolder(TransformVisitor):
def __init__(self, **kwargs):
TransformVisitor.__init__(self, **kwargs)
self.rval = None
#print(self.top_ast)
return
def constant_fold(self, binexpr):
return binexpr.evaluate(None)
def can_fold_expr(self, expr):
if isinstance(expr, Number):
return True, expr
def reset(self):
self.rval = None
def get_rval(self):
op = self.rval
self.reset()
return op
def visit_number(self, num):
self.rval = num
return
def visit_binary_expr(self, binexpr):
# if lhs is constant and you are able to fold rhs
# then replace binexpr with the value
#print(type(binexpr.term))
#print(type(binexpr.next_expr))
next_expr_alt = None
if isinstance(binexpr.next_expr, Expr):
binexpr.next_expr.visit(self)
next_expr_alt = self.get_rval()
else:
next_expr_alt = binexpr.next_expr
binexpr.term.visit(self)
term_expr_alt = self.get_rval()
print(type(term_expr_alt))
#print("-------")
if next_expr_alt == None or term_expr_alt == None:
return None
#print("------x------")
lhs_is_num = isinstance(term_expr_alt, Number)
[foldable, val] = self.can_fold_expr(next_expr_alt)
if foldable:
print("foldable")
# new API needed to replace the node
binexpr.term = term_expr_alt
binexpr.next_expr = next_expr_alt
newval = self.constant_fold(binexpr)
binexpr.replace(newval)
print(str(newval), newval)
return Number(newval)
return None
| gpl-3.0 | 3,367,327,256,590,115,300 | 34.73399 | 128 | 0.583954 | false |
tpoy0099/option_calculator | gui_impl/position_editor.py | 1 | 4509 | #coding=utf8
from qt_ui.ui_position_editor import Ui_position_editor_dialog
from gui_impl.qt_mvc_impl import MatrixModel, AutoFormDelegate
from gui_impl.qtableview_utility import getSelectedRows
from utility.data_handler import TableHandler
from PyQt4.QtCore import *
from PyQt4.QtGui import *
##############################################################################
class PosEditor(QDialog, Ui_position_editor_dialog):
EDIT_TABLE_HEADERS = ('group', 'code', 'dir', 'lots', 'open_price', 'margin', 'open_date')
def __init__(self, parent=None):
super(PosEditor, self).__init__(parent)
self.setupUi(self)
self.setModal(True)
#signal&slot
self.connect(self.cancel_button, SIGNAL("clicked()"), self.onCancelBtClicked)
self.connect(self.save_button, SIGNAL("clicked()"), self.onSaveAllBtClicked)
self.connect(self.reload_button, SIGNAL("clicked()"), self.onReloadBtClicked)
self.connect(self.save_csv_button, SIGNAL("clicked()"), self.onSaveCsvBtClicked)
self.connect(self.addrow_button, SIGNAL("clicked()"), self.onAddrowBtClicked)
self.connect(self.delrows_button, SIGNAL("clicked()"), self.onDelRowBtClicked)
#init mvc impl
self.model = MatrixModel(self)
self.delegate = AutoFormDelegate(self)
self.position_edit_vtable.setItemDelegate(self.delegate)
self.position_edit_vtable.setModel(self.model)
#init data
self.controler = None
self.model.setSize(0, PosEditor.EDIT_TABLE_HEADERS)
def setControler(self, ctl):
self.controler = ctl
#--------------------------------------------------
def wakeupEditor(self):
self.show()
def setEditTableContent(self, table_hdl_inst):
self.model.setTableContent(table_hdl_inst)
#--------------------------------------------------
def onAddrowBtClicked(self):
self.model.appendRows()
def onDelRowBtClicked(self):
rows = getSelectedRows(self.position_edit_vtable)
if rows:
self.model.deleteRows(rows)
def onCancelBtClicked(self):
self.model.clearContent()
self.close()
@staticmethod
def findInvalidRows(t_data=TableHandler()):
invalid_rows = list()
for r in range(0, t_data.rows):
for h in ['group', 'code', 'dir', 'lots', 'open_price', 'margin']:
val = t_data.getByHeader(r, h)
if val is None or val == '':
invalid_rows.append(r)
return invalid_rows
def onSaveAllBtClicked(self):
rtn = QMessageBox.question(self, 'Confirm', 'Save position changes?',
QMessageBox.Yes, QMessageBox.No)
if rtn == QMessageBox.Yes:
data = TableHandler()
data.copy(self.model.data)
invalid_rows = PosEditor.findInvalidRows(data)
if invalid_rows:
data.delRows(invalid_rows)
if data.rows > 0:
self.controler.onEditorClickBtSaveAll(data)
else:
QMessageBox.warning(self, 'Error',
'None valid records!', QMessageBox.Yes)
#notify
if invalid_rows:
info_str = 'Invalid rows deleted:\n%s' % str([i+1 for i in invalid_rows])
QMessageBox.warning(self, 'Warning', info_str, QMessageBox.Yes)
else:
self.close()
return
def onReloadBtClicked(self):
rtn = QMessageBox.question(self, 'Confirm', 'Reloading from position.csv?',
QMessageBox.Yes, QMessageBox.No)
if rtn == QMessageBox.Yes:
self.controler.onEditorClickBtReloadPosition()
return
def onSaveCsvBtClicked(self):
rtn = QMessageBox.question(self, 'Confirm', 'Writing positions to position.csv?',
QMessageBox.Yes, QMessageBox.No)
if rtn == QMessageBox.Yes:
self.controler.onSavePosition2Csv()
return
#######################################################################
if __name__ == '__main__':
import sys, random
app = QApplication(sys.argv)
pedit = PosEditor()
th = TableHandler()
th.reset(10, PosEditor.EDIT_TABLE_HEADERS)
for r in range(0, 10):
for h in PosEditor.EDIT_TABLE_HEADERS:
th.setByHeader(r, h, random.randint(0,10))
pedit.wakeupEditor()
sys.exit(app.exec_())
| gpl-2.0 | -6,616,695,230,852,411,000 | 37.211864 | 94 | 0.573963 | false |
ezekial4/atomic_neu | atomic/tests/test_atomic_data.py | 1 | 4985 | import unittest
import numpy as np
import atomic_neu as atomic
class TestAtomicData(unittest.TestCase):
def test_element_data_names_abbreviated_and_long(self):
"""This does not require that the user has downloaded data"""
data1 = atomic.atomic_data._element_data('Li')
data2 = atomic.atomic_data._element_data('lithium')
self.assertEqual(data1['recombination'],'acd96_li.dat')
self.assertEqual(data2['recombination'],'acd96_li.dat')
def test_element_data_dict_no_cx_power(self):
data1 = atomic.atomic_data._element_data_dict('li', 96)
expected = {'recombination': 'acd96_li.dat',
'line_power': 'plt96_li.dat',
'ionisation': 'scd96_li.dat',
'ionisation_potential': 'ecd96_li.dat',
'continuum_power': 'prb96_li.dat'}
self.assertEqual(data1, expected)
def test_element_data_dict_cx_power(self):
data1 = atomic.atomic_data._element_data_dict('ar', 89, has_cx_power=True)
self.assertEqual(data1['cx_power'], 'prc89_ar.dat')
def test_element_data_not_implemented_error(self):
with self.assertRaises(NotImplementedError):
atomic.atomic_data._element_data('adamantium')
def test_from_element(self):
"""Requires data from ./fetch_adas_data to be in the correct spot."""
ad = atomic.AtomicData.from_element('Li')
self.assertEqual(ad.nuclear_charge,3)
self.assertEqual(ad.element,'Li')
rc = ad.coeffs['ionisation']
self.assertIsInstance(rc, atomic.atomic_data.RateCoefficient)
def test_make_element_initial_uppercase(self):
ad = atomic.AtomicData.from_element('c')
self.assertEqual(ad.element,'C')
class TestRateCoefficient(unittest.TestCase):
def setUp(self):
data = atomic.atomic_data._element_data('Li')
self.ionis = atomic.atomic_data._full_path(data['ionisation'])
self.rc = atomic.atomic_data.RateCoefficient.from_adf11(self.ionis)
def test_density_grid(self):
length = 16
self.assertEqual(length, len(self.rc.density_grid))
min_density = 1.0e14
self.assertEqual(min_density, self.rc.density_grid[0])
def test_temperature_grid(self):
"""Test that temperature_grid returns values in [eV]"""
length = 25
self.assertEqual(length, len(self.rc.temperature_grid))
min_temperature = 2.0009e-1
result = self.rc.temperature_grid[0]
self.assertAlmostEqual(min_temperature, result, 4)
def test___call__(self):
"""Units are in [m^3/s]"""
expected = np.array([2.068e-13])
result = self.rc(0, 10, 1e19)
self.assertAlmostEqual(expected, result, 3)
def test___call__both_backwards(self):
"""Units are in [m^3/s]"""
expected = 2
result = self.rc(0, np.array([20, 10]), np.array([1e19,1e20]))
self.assertEqual(expected, len(result))
@unittest.skip("")
def test___init__(self):
# rate_coefficient = RateCoefficient(nuclear_charge, element, log_temperature, log_density, log_coeff, name)
assert False # TODO: implement your test here
@unittest.skip("")
def test_copy(self):
# rate_coefficient = RateCoefficient(nuclear_charge, element, log_temperature, log_density, log_coeff, name)
# self.assertEqual(expected, rate_coefficient.copy())
assert False # TODO: implement your test here
@unittest.skip("")
def test_from_adf11(self):
# rate_coefficient = RateCoefficient(nuclear_charge, element, log_temperature, log_density, log_coeff, name)
# self.assertEqual(expected, rate_coefficient.from_adf11(name))
assert False # TODO: implement your test here
def test_log10_one_one(self):
tp = self.rc.temperature_grid[0]
dp = self.rc.density_grid[0]
log_rc = self.rc.log10(0, tp, dp)
expected = np.array([-22.8983])
np.testing.assert_allclose(expected, log_rc)
def test_log10_many_many(self):
tg = self.rc.temperature_grid
dg = self.rc.density_grid
log_rcs = self.rc.log10(0,tg[:len(dg)], dg)
expected = np.array([-22.8983 , -19.74759, -17.24704, -15.99432, -15.06829, -13.54429,
-13.24645, -13.07121, -12.95795, -12.73103, -12.60551, -12.28684,
-12.20186, -12.11832, -12.11021, -12.08091])
np.testing.assert_allclose(expected, log_rcs)
class TestZeroCoefficient(unittest.TestCase):
@unittest.skip("")
def test___call__(self):
# zero_coefficient = ZeroCoefficient()
# self.assertEqual(expected, zero_coefficient.__call__(k, Te, ne))
assert False # TODO: implement your test here
@unittest.skip("")
def test___init__(self):
# zero_coefficient = ZeroCoefficient()
assert False # TODO: implement your test here
if __name__ == '__main__':
unittest.main()
| mit | 245,744,006,883,106,300 | 40.541667 | 116 | 0.626078 | false |
nasa-gibs/onearth | src/empty_tile/oe_generate_empty_tile.py | 1 | 11279 | #!/usr/bin/env python3
# Copyright (c) 2002-2017, California Institute of Technology.
# All rights reserved. Based on Government Sponsored Research under contracts NAS7-1407 and/or NAS7-03001.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the California Institute of Technology (Caltech), its operating division the Jet Propulsion Laboratory (JPL),
# the National Aeronautics and Space Administration (NASA), nor the names of its contributors may be used to
# endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE CALIFORNIA INSTITUTE OF TECHNOLOGY BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# oe_generate_empty_tile.py
# The OnEarth Empty Tile Generator.
#
#
# Global Imagery Browse Services
# NASA Jet Propulsion Laboratory
# 2015
import sys
import urllib.request, urllib.parse, urllib.error
import xml.dom.minidom
from optparse import OptionParser
import png
toolName = "oe_generate_empty_tile.py"
versionNumber = "v1.4.0"
class ColorMap:
"""ColorMap metadata"""
def __init__(self, units, colormap_entries, style):
self.units = units
self.colormap_entries = colormap_entries
self.style = str(style).lower()
def __repr__(self):
if self.units != None:
xml = '<ColorMap units="%s">' % (self.units)
else:
xml = '<ColorMap>'
for colormap_entry in self.colormap_entries:
xml = xml + '\n ' + colormap_entry.__repr__()
xml = xml + '\n</ColorMap>'
return xml
def __str__(self):
return self.__repr__().encode(sys.stdout.encoding)
class ColorMapEntry:
"""ColorMapEntry values within a ColorMap"""
def __init__(self, red, green, blue, transparent, source_value, value, label, nodata):
self.red = int(red)
self.green = int(green)
self.blue = int(blue)
self.transparent = transparent
self.source_value = source_value
self.value = value
self.label = label
self.nodata = nodata
self.color = [float(red)/255.0,float(green)/255.0,float(blue)/255.0]
def __repr__(self):
if self.value != None:
xml = '<ColorMapEntry rgb="%d,%d,%d" transparent="%s" nodata="%s" sourceValue="%s" value="%s" label="%s"/>' % (self.red, self.green, self.blue, self.transparent, self.nodata, self.source_value, self.value, self.label)
else:
xml = '<ColorMapEntry rgb="%d,%d,%d" transparent="%s" nodata="%s" sourceValue="%s" label="%s"/>' % (self.red, self.green, self.blue, self.transparent, self.nodata, self.source_value, self.label)
return xml
def __str__(self):
return self.__repr__().encode(sys.stdout.encoding)
def parse_colormap(colormap_location, verbose):
try:
if verbose:
print("Reading color map:", colormap_location)
colormap_file = open(colormap_location,'r')
dom = xml.dom.minidom.parse(colormap_file)
colormap_file.close()
except IOError:
print("Accessing URL", colormap_location)
try:
dom = xml.dom.minidom.parse(urllib.request.urlopen(colormap_location))
except:
msg = "URL " + colormap_location + " is not accessible"
print(msg, file=sys.stderr)
raise Exception(msg)
style = "discrete"
colormap_entries = []
colormapentry_elements = dom.getElementsByTagName("ColorMapEntry")
for colormapentry in colormapentry_elements:
rgb = colormapentry.attributes['rgb'].value
red, green, blue = rgb.split(',')
try:
value = colormapentry.attributes['value'].value
if "(" in value or "[" in value:
style = "range"
except KeyError:
value = None
style = "classification"
try:
transparent = True if colormapentry.attributes['transparent'].value.lower() == 'true' else False
except KeyError:
transparent = False
try:
source_value = colormapentry.attributes['sourceValue'].value
except KeyError:
source_value = value
try:
label = colormapentry.attributes['label'].value
except KeyError:
label = value
try:
nodata = True if colormapentry.attributes['nodata'].value.lower() == 'true' else False
except KeyError:
nodata = False
colormap_entries.append(ColorMapEntry(red, green , blue, transparent, source_value, value, label, nodata))
colormap = ColorMap(None, colormap_entries, style)
if verbose:
print("ColorMap style:", style)
print(colormap)
return colormap
#-------------------------------------------------------------------------------
print(toolName + ' ' + versionNumber + '\n')
usageText = toolName + " --colormap [file] --output [file] --height [int] --width [int] --type [palette]"
# Define command line options and args.
parser=OptionParser(usage=usageText, version=versionNumber)
parser.add_option('-c', '--colormap',
action='store', type='string', dest='colormap',
help='Full path or URL of colormap filename.')
parser.add_option('-f', '--format',
action='store', type='string', dest='format', default = 'png',
help='Format of output file. Supported formats: png')
parser.add_option('-i', '--index',
action='store', type='string', dest='index',
help='The index of the color map to be used as the empty tile palette entry, overrides nodata value')
parser.add_option('-o', '--output',
action='store', type='string', dest='output',
help='The full path of the output file')
parser.add_option('-t', '--type',
action='store', type='string', dest='type', default = 'palette',
help='The image type: rgba or palette. Default: palette')
parser.add_option("-v", "--verbose", action="store_true", dest="verbose",
default=False, help="Print out detailed log messages")
parser.add_option('-x', '--width',
action='store', type='string', dest='width', default = '512',
help='Width of the empty tile (default: 512)')
parser.add_option('-y', '--height',
action='store', type='string', dest='height', default = '512',
help='Height of the empty tile (default: 512)' )
# read command line args
(options, args) = parser.parse_args()
if options.colormap:
colormap_location = options.colormap
else:
print("colormap file must be specified...exiting")
exit()
if options.output:
output_location = options.output
else:
print("output file must be specified...exiting")
exit()
color_index = 0
# parse colormap and get color entry
try:
colormap = parse_colormap(colormap_location, options.verbose)
colormap_entry = colormap.colormap_entries[color_index] # default to first entry if none specified
if options.index != None:
colormap_entry = colormap.colormap_entries[int(options.index)]
color_index = int(options.index)
else:
for index,entry in enumerate(colormap.colormap_entries):
if entry.nodata == True:
colormap_entry = entry
color_index = index
break # use first nodata entry found
except Exception as e:
print(toolName + ": ERROR: " + str(e) + "\n", file=sys.stderr)
sys.exit(1)
# generate empty_tile
try:
if options.verbose:
print("Using index " + str(color_index) + " with entry:\n" + str(colormap_entry))
f = open(output_location, 'wb')
if options.type == "palette":
palette = []
for j in range (0, 256):
try:
entry = colormap.colormap_entries[j]
if entry.transparent == True:
alpha = 0
else:
alpha = 255
palette.append((entry.red,entry.green,entry.blue,alpha))
except IndexError: # pad with zeroes
palette.append((0,0,0,0))
rows = []
img = []
for i in range (1, (int(options.width))+1):
rows.append(color_index)
for i in range (0, int(options.height)):
img.append(rows)
w = png.Writer(int(options.width), int(options.height), palette=palette, bitdepth=8)
w.write(f, img)
else: # use RGBA
rows = []
img = []
for i in range (1, (int(options.width)*4)+1):
if i%4 == 1:
rows.append(colormap_entry.red)
elif i%4 == 2:
rows.append(colormap_entry.green)
elif i%4 == 3:
rows.append(colormap_entry.blue)
elif i%4 == 0:
if colormap_entry.transparent == True:
rows.append(0)
else:
rows.append(255)
for i in range (0, int(options.height)):
img.append(rows)
w = png.Writer(int(options.width), int(options.height), alpha=True)
w.write(f, img)
f.close()
print("\nSuccessfully generated empty tile " + output_location + " of size: " + str(options.width) + " by " + str(options.height))
except IOError as e:
print(toolName + ": " + str(e), file=sys.stderr)
sys.exit(1)
exit()
| apache-2.0 | 599,956,554,437,236,400 | 38.855124 | 229 | 0.61282 | false |
LearnEra/LearnEraPlaftform | common/lib/xmodule/xmodule/modulestore/tests/test_publish.py | 1 | 7315 | """
Test the publish code (mostly testing that publishing doesn't result in orphans)
"""
from xmodule.modulestore.exceptions import ItemNotFoundError
from xmodule.modulestore.tests.test_split_w_old_mongo import SplitWMongoCourseBoostrapper
from xmodule.modulestore.tests.factories import check_mongo_calls
from xmodule.modulestore import ModuleStoreEnum
class TestPublish(SplitWMongoCourseBoostrapper):
"""
Test the publish code (primary causing orphans)
"""
def _create_course(self):
"""
Create the course, publish all verticals
* some detached items
"""
# There are 12 created items and 7 parent updates
# create course: finds: 1 to verify uniqueness, 1 to find parents
# sends: 1 to create course, 1 to create overview
with check_mongo_calls(5, 2):
super(TestPublish, self)._create_course(split=False) # 2 inserts (course and overview)
# with bulk will delay all inheritance computations which won't be added into the mongo_calls
with self.draft_mongo.bulk_operations(self.old_course_key):
# finds: 1 for parent to add child
# sends: 1 for insert, 1 for parent (add child)
with check_mongo_calls(1, 2):
self._create_item('chapter', 'Chapter1', {}, {'display_name': 'Chapter 1'}, 'course', 'runid', split=False)
with check_mongo_calls(2, 2):
self._create_item('chapter', 'Chapter2', {}, {'display_name': 'Chapter 2'}, 'course', 'runid', split=False)
# For each vertical (2) created:
# - load draft
# - load non-draft
# - get last error
# - load parent
# - load inheritable data
with check_mongo_calls(7, 4):
self._create_item('vertical', 'Vert1', {}, {'display_name': 'Vertical 1'}, 'chapter', 'Chapter1', split=False)
self._create_item('vertical', 'Vert2', {}, {'display_name': 'Vertical 2'}, 'chapter', 'Chapter1', split=False)
# For each (4) item created
# - try to find draft
# - try to find non-draft
# - retrieve draft of new parent
# - get last error
# - load parent
# - load inheritable data
# - load parent
# count for updates increased to 16 b/c of edit_info updating
with check_mongo_calls(16, 8):
self._create_item('html', 'Html1', "<p>Goodbye</p>", {'display_name': 'Parented Html'}, 'vertical', 'Vert1', split=False)
self._create_item(
'discussion', 'Discussion1',
"discussion discussion_category=\"Lecture 1\" discussion_id=\"a08bfd89b2aa40fa81f2c650a9332846\" discussion_target=\"Lecture 1\"/>\n",
{
"discussion_category": "Lecture 1",
"discussion_target": "Lecture 1",
"display_name": "Lecture 1 Discussion",
"discussion_id": "a08bfd89b2aa40fa81f2c650a9332846"
},
'vertical', 'Vert1',
split=False
)
self._create_item('html', 'Html2', "<p>Hello</p>", {'display_name': 'Hollow Html'}, 'vertical', 'Vert1', split=False)
self._create_item(
'discussion', 'Discussion2',
"discussion discussion_category=\"Lecture 2\" discussion_id=\"b08bfd89b2aa40fa81f2c650a9332846\" discussion_target=\"Lecture 2\"/>\n",
{
"discussion_category": "Lecture 2",
"discussion_target": "Lecture 2",
"display_name": "Lecture 2 Discussion",
"discussion_id": "b08bfd89b2aa40fa81f2c650a9332846"
},
'vertical', 'Vert2',
split=False
)
with check_mongo_calls(0, 2):
# 2 finds b/c looking for non-existent parents
self._create_item('static_tab', 'staticuno', "<p>tab</p>", {'display_name': 'Tab uno'}, None, None, split=False)
self._create_item('course_info', 'updates', "<ol><li><h2>Sep 22</h2><p>test</p></li></ol>", {}, None, None, split=False)
def test_publish_draft_delete(self):
"""
To reproduce a bug (STUD-811) publish a vertical, convert to draft, delete a child, move a child, publish.
See if deleted and moved children still is connected or exists in db (bug was disconnected but existed)
"""
vert_location = self.old_course_key.make_usage_key('vertical', block_id='Vert1')
item = self.draft_mongo.get_item(vert_location, 2)
# Finds:
# 1 get draft vert,
# 2-10 for each child: (3 children x 3 queries each)
# get draft and then published child
# compute inheritance
# 11 get published vert
# 12-15 get each ancestor (count then get): (2 x 2),
# 16 then fail count of course parent (1)
# 17 compute inheritance
# 18 get last error
# 19-20 get draft and published vert
# Sends:
# delete the subtree of drafts (1 call),
# update the published version of each node in subtree (4 calls),
# update the ancestors up to course (2 calls)
with check_mongo_calls(20, 7):
self.draft_mongo.publish(item.location, self.user_id)
# verify status
item = self.draft_mongo.get_item(vert_location, 0)
self.assertFalse(getattr(item, 'is_draft', False), "Item was published. Draft should not exist")
# however, children are still draft, but I'm not sure that's by design
# delete the draft version of the discussion
location = self.old_course_key.make_usage_key('discussion', block_id='Discussion1')
self.draft_mongo.delete_item(location, self.user_id)
draft_vert = self.draft_mongo.get_item(vert_location, 0)
self.assertTrue(getattr(draft_vert, 'is_draft', False), "Deletion didn't convert parent to draft")
self.assertNotIn(location, draft_vert.children)
# move the other child
other_child_loc = self.old_course_key.make_usage_key('html', block_id='Html2')
draft_vert.children.remove(other_child_loc)
other_vert = self.draft_mongo.get_item(self.old_course_key.make_usage_key('vertical', block_id='Vert2'), 0)
other_vert.children.append(other_child_loc)
self.draft_mongo.update_item(draft_vert, self.user_id)
self.draft_mongo.update_item(other_vert, self.user_id)
# publish
self.draft_mongo.publish(vert_location, self.user_id)
item = self.draft_mongo.get_item(draft_vert.location, revision=ModuleStoreEnum.RevisionOption.published_only)
self.assertNotIn(location, item.children)
self.assertIsNone(self.draft_mongo.get_parent_location(location))
with self.assertRaises(ItemNotFoundError):
self.draft_mongo.get_item(location)
self.assertNotIn(other_child_loc, item.children)
self.assertTrue(self.draft_mongo.has_item(other_child_loc), "Oops, lost moved item")
| agpl-3.0 | 2,435,826,882,299,244,500 | 52.394161 | 154 | 0.5892 | false |
eek6/squeakspace | lib/squeakspace/common/util_http.py | 1 | 14648 |
import urlparse
import json
import Cookie
import squeakspace.common.squeak_ex as ex
def json_fun(object):
#return json.dumps(object)
return json.dumps(object, indent=4) + '\n'
def respond(environ, start_response, status, content, response_headers=None):
if response_headers == None:
response_headers = [('Content-type', 'text/plain'),
('Content-length', str(len(content)))]
start_response(status, response_headers)
return [content]
# delete this.
def respond_json(environ, start_response, status, object):
content = json_fun(content, sort_keys=True)
return respond(environ, start_response, status, content)
def json_response_headers(body):
return [('Content-Type', 'application/json'),
('Content-Length', str(len(body)))]
class Response(Exception):
def __init__(self, body, response_headers=None):
self.body = body
self.response_headers = response_headers
def attach_cookies(self, simplecookie):
# cookies_str = simplecookie.output(header='', sep=';')
# if len(cookies_str) > 1 and cookies_str[0] == ' ':
# # get rid of a weird leading space.
# cookies_str = cookies_str[1:]
#
# self.response_headers.append(('Set-Cookie', cookies_str))
#
# print ('cookies_str', cookies_str)
#
for cookie_name in simplecookie:
cookie_str = simplecookie[cookie_name].output(header='')
if len(cookie_str) > 1 and cookie_str[0] == ' ':
# get rid of leading space
cookie_str = cookie_str[1:]
self.response_headers.append(('Set-Cookie', cookie_str))
#print ('cookie_str', cookie_str)
return self
def load_cookies(self, data):
return self.attach_cookies(Cookie.SimpleCookie(data))
def clear_cookies(self, cookies):
simplecookie = Cookie.SimpleCookie()
for cookie_name in cookies:
simplecookie[cookie_name] = ''
simplecookie[cookie_name]['path'] = '/'
simplecookie[cookie_name]['expires'] = 'Thu, 01 Jan 1970 00:00:00 UTC'
return self.attach_cookies(simplecookie)
def respond(self, environ, start_response):
return respond(environ, start_response, self.status, self.body, self.response_headers)
class OkResponse(Response):
status = '200 OK'
class BadRequestResponse(Response):
status = '400 Bad Request'
class ForbiddenResponse(Response):
status = '403 Forbidden'
class NotFoundResponse(Response):
status = '404 Not Found'
class MethodNotAllowedResponse(Response):
status = '405 Method Not Allowed'
class ConflictResponse(Response):
status = '409 Conflict'
class LengthRequiredResponse(Response):
status = '411 Length Required'
class RequestEntityTooLargeResponse(Response):
status = '413 Request Entity Too Large'
class RequestUriTooLongResponse(Response):
status = '414 Request-URI Too Long'
class ServerErrorResponse(Response):
status = '500 Internal Server Error'
class ServerErrorJsonResponse(ServerErrorResponse):
def __init__(self):
self.body = json_fun(
{'status' : 'error',
'reason' : 'server error'})
self.response_headers = [('Content-Type', 'application/json'),
('Content-Length', str(len(self.body)))]
class QueryTooLongResponse(RequestEntityTooLargeResponse):
def __init__(self, query_length, max_length):
self.query_length = query_length
self.max_length = max_length
self.body = json_fun(
{'status' : 'error',
'reason' : 'query too long',
'query_length' : query_length,
'max_length' : max_length})
self.response_headers = [('Content-Type', 'application/json'),
('Content-Length', str(len(self.body)))]
class MalformedQueryStringResponse(BadRequestResponse):
def __init__(self, query_string):
self.query_string = query_string
self.body = json_fun(
{'status' : 'error',
'reason' : 'malformed query string',
'query_string' : query_string})
self.response_headers = [('Content-Type', 'application/json'),
('Content-Length', str(len(self.body)))]
class ContentLengthRequiredResponse(LengthRequiredResponse):
def __init__(self):
self.body = json_fun(
{'status' : 'error',
'reason' : 'Content-Length required'})
self.response_headers = [('Content-Type', 'application/json'),
('Content-Length', str(len(self.body)))]
class MalformedContentLengthResponse(BadRequestResponse):
def __init__(self, content_length):
self.content_length = content_length
self.body = json_fun(
{'status' : 'error',
'reason' : 'malformed content length',
'content_length' : content_length})
self.response_headers = [('Content-Type', 'application/json'),
('Content-Length', str(len(self.body)))]
class ContentLengthTooLargeResponse(RequestEntityTooLargeResponse):
def __init__(self, content_length, max_length):
self.content_length = content_length
self.max_length = max_length
self.body = json_fun(
{'status' : 'error',
'reason' : 'Content-Length too large',
'content_length' : content_length,
'max_length' : max_length})
self.response_headers = [('Content-Type', 'application/json'),
('Content-Length', str(len(self.body)))]
class IncorrectContentLengthResponse(BadRequestResponse):
def __init__(self, content_length, actual_body_length):
self.content_length = content_length
self.actual_content_length = actual_content_length
self.body = json_fun(
{'status' : 'error',
'reason' : 'incorrect Content-Length',
'content_length' : content_length,
'actual_content_length' : actual_content_length})
self.response_headers = [('Content-Type', 'application/json'),
('Content-Length', str(len(self.body)))]
class InvalidContentTypeResponse(BadRequestResponse):
def __init__(self, content_type, supported_content_type):
self.content_type = content_type
self.supported_content_type = supported_content_type
self.body = json_fun(
{'status' : 'error',
'reason' : 'Content-Type invalid',
'content_type' : content_type,
'supported_content_type' : supported_content_type})
self.response_headers = [('Content-Type', 'application/json'),
('Content-Length', str(len(self.body)))]
class MalformedContentResponse(BadRequestResponse):
# There should be a cut off here. Don't send the content
# back if it's too large.
def __init__(self, content):
self.content = content
self.body = json_fun(
{'status' : 'error',
'reason' : 'malformed content',
'content' : content})
self.response_headers = [('Content-Type', 'application/json'),
('Content-Length', str(len(self.body)))]
class FieldRequiredResponse(BadRequestResponse):
def __init__(self, field):
self.field = field
self.body = json_fun(
{'status' : 'error',
'reason' : 'field required',
'field' : field})
self.response_headers = [('Content-Type', 'application/json'),
('Content-Length', str(len(self.body)))]
class CookieRequiredResponse(BadRequestResponse):
def __init__(self, cookie):
self.cookie = cookie
self.body = json_fun(
{'status' : 'error',
'reason' : 'cookie required',
'cookie' : cookie})
self.response_headers = [('Content-Type', 'application/json'),
('Content-Length', str(len(self.body)))]
class BadFieldResponse(BadRequestResponse):
def __init__(self, field, value):
self.field = field
self.value = value
self.body = json_fun(
{'status' : 'error',
'reason' : 'bad field',
'field' : field,
'value' : value})
self.response_headers = [('Content-Type', 'application/json'),
('Content-Length', str(len(self.body)))]
class MethodNotAllowedJsonResponse(MethodNotAllowedResponse):
def __init__(self, method, allow):
allow_str = ', '.join(allow)
self.method = method
self.allow = allow
self.body = json_fun(
{'status' : 'error',
'reason' : 'method not allowed',
'method' : method,
'allow' : allow})
self.response_headers = [('Content-Type', 'application/json'),
('Content-Length', str(len(self.body))),
('Allow', allow_str)]
def parse_get_request(environ, max_length = 2048):
query_string = environ['QUERY_STRING']
query_length = len(query_string)
if len(query_string) > max_length:
raise QueryTooLongResponse(query_length, max_length)
try:
# keep_blank_values = False, strict_parsing = True
return urlparse.parse_qs(query_string, False, True)
except ValueError:
raise MalformedQueryStringResponse(query_string)
def check_content_type(expected_content_type, content_type):
return expected_content_type == content_type or \
content_type[:len(expected_content_type) + 1] == expected_content_type + ';'
def parse_post_request(environ, max_length = 200*1024*1024): # 200 MB ok?
content_length_str = environ.get('CONTENT_LENGTH')
if content_length_str == None:
raise ContentLengthRequiredResponse()
content_length = None
try:
content_length = int(content_length_str)
except ValueError:
raise MalformedContentLengthResponse(content_length_str)
if content_length > max_length:
raise ContentLengthTooLargeResponse(content_length, max_length)
content_type = environ.get('CONTENT_TYPE')
supported_content_type = 'application/x-www-form-urlencoded'
if not check_content_type(supported_content_type, content_type):
raise InvalidContentTypeResponse(content_type, supported_content_type)
content_input = environ['wsgi.input']
content = content_input.read(content_length)
if content_length != len(content):
raise IncorrectContentLengthResponse(content_length, len(content))
try:
return urlparse.parse_qs(content, False, True)
except ValueError:
raise MalformedContentResponse(content)
def parse_cookies(environ):
cookies_str = environ.get('HTTP_COOKIE')
if cookies_str == None:
return None
else:
return Cookie.SimpleCookie(cookies_str)
def get_required(query_table, field):
try:
return query_table[field][0]
except KeyError:
raise FieldRequiredResponse(field)
def get_optional(query_table, field):
try:
return query_table[field][0]
except KeyError:
return None
def get_required_cookie(simplecookie, cookie):
if simplecookie == None:
raise CookieRequiredResponse(cookie)
try:
return simplecookie[cookie].value
except KeyError:
raise CookieRequiredResponse(cookie)
def get_optional_cookie(simplecookie, cookie):
try:
return simplecookie[cookie].value
except KeyError:
return None
def convert_int(string, field):
try:
if string != None:
return int(string)
else:
return None
except ValueError:
raise BadFieldResponse(field, string)
def convert_bool(string, field):
if string == None:
return None
lower = string.lower()
if lower == 'true':
return True
elif lower == 'false':
return False
else:
raise BadFieldResponse(field, string)
def convert_nat(string, field):
value = convert_int(string, field)
if value < 0:
raise BadFieldResponse(field, string)
return value
def dispatch_on_method(environ, handlers):
method = environ['REQUEST_METHOD']
handler = handlers.get(method)
if handler == None:
allow_array = handlers.keys()
allow_array.sort()
raise MethodNotAllowedJsonResponse(method, allow_array)
handler(environ)
def respond_with_handler(environ, start_response, handler):
response = None
try:
response = handler(environ)
except Response as r:
response = r
return response.respond(environ, start_response)
status_conversion_map = {ex.SqueakStatusCodes.bad_request : BadRequestResponse,
ex.SqueakStatusCodes.too_large : RequestEntityTooLargeResponse,
ex.SqueakStatusCodes.conflict : ConflictResponse,
ex.SqueakStatusCodes.not_found : NotFoundResponse,
ex.SqueakStatusCodes.forbidden : ForbiddenResponse,
ex.SqueakStatusCodes.server_error : ServerErrorResponse}
def convert_squeak_exception(e):
constructor = status_conversion_map[e.type]
content = json_fun(e.dict())
headers = json_response_headers(content)
return constructor(content, headers)
def ok_json(object):
content = json_fun(object)
headers = json_response_headers(content)
return OkResponse(content, headers)
#def bad_request(environ, start_response, reason):
# status = '400 Bad Request'
# content = 'Bad Request: ' + reason
# return respond(environ, start_response, status, content)
#
#def conflict(environ, start_response, reason):
# status = '409 Conflict'
# content = 'Conflict: ' + reason
# return respond(environ, start_response, status, content)
#
#def need_content_length(environ, start_response):
# status = '411 Length Required'
# content = 'Length Required'
# return respond(environ, start_response, status, content)
#
#def request_entity_too_large(environ, start_response):
# status = '413 Request Entity Too Large'
# content = 'Request Entity Too Large'
# return respond(environ, start_response, status, content)
| gpl-3.0 | -6,834,943,958,178,356,000 | 32.290909 | 94 | 0.608684 | false |
hajicj/safire | build/lib/test/test_utils.py | 1 | 6748 | """
Testing the utility functions.
"""
import gzip
import logging
import math
import os
import unittest
import gensim
from gensim.models import TfidfModel
import operator
from safire.data import VTextCorpus, FrequencyBasedTransformer
from safire.data.filters.positionaltagfilter import PositionalTagTokenFilter
from safire.data.imagenetcorpus import ImagenetCorpus
from safire.datasets.dataset import Dataset, CompositeDataset
import safire.utils
from safire.utils.transcorp import bottom_corpus, run_transformations,\
compute_word2image_map, compute_docname_flatten_mapping
from safire.utils import benchmark
from test.safire_test_case import SafireTestCase
class TestUtils(SafireTestCase):
@classmethod
def setUpClass(cls):
super(TestUtils, cls).setUpClass()
cls.vtlist_file = os.path.join(cls.data_root, 'test-data.vtlist')
cls.vtlist = [ os.path.join(cls.data_root, l.strip())
for l in open(cls.vtlist_file) ]
cls.token_filter = PositionalTagTokenFilter(['N', 'A', 'V'], 0)
cls.strict_token_filter = PositionalTagTokenFilter(['N', 'A'], 0)
def test_uniform_steps(self):
iterable = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
stepped = safire.utils.uniform_steps(iterable, 4)
self.assertEqual([10, 8, 6, 4], stepped)
stepped = safire.utils.uniform_steps(iterable, 10)
self.assertEqual([10, 9, 8, 7, 6, 5, 4, 3, 2, 1], stepped)
stepped = safire.utils.uniform_steps(iterable, 6)
self.assertEqual([10, 9, 8, 7, 6, 5], stepped)
def test_id2word(self):
wid = 40
vtcorp = VTextCorpus(self.vtlist_file, input_root=self.data_root,
token_filter=self.token_filter,
pfilter=0.3, pfilter_full_freqs=True)
vtcorp.dry_run()
vt_word = safire.utils.transcorp.id2word(vtcorp, wid)
freq_transform = FrequencyBasedTransformer(vtcorp, 110, 10)
freq_vtcorp = freq_transform[vtcorp]
freq_vt_word = safire.utils.transcorp.id2word(freq_vtcorp, wid)
tfidf = TfidfModel(vtcorp)
tfidf_vtcorp = tfidf[vtcorp]
tfidf_vt_word = safire.utils.transcorp.id2word(tfidf_vtcorp, wid)
tfidf_freq = TfidfModel(freq_vtcorp)
tfidf_freq_vtcorp = tfidf_freq[freq_vtcorp]
tfidf_freq_vt_word = safire.utils.transcorp.id2word(tfidf_freq_vtcorp, wid)
freq_tfidf = FrequencyBasedTransformer(tfidf_vtcorp, 110, 10)
freq_tfidf_vtcorp = freq_tfidf[tfidf_vtcorp]
freq_tfidf_vt_word = safire.utils.transcorp.id2word(freq_tfidf_vtcorp, wid)
self.assertEqual(freq_vt_word, tfidf_freq_vt_word)
self.assertEqual(vt_word, tfidf_vt_word)
wordlist = [vt_word, freq_vt_word, tfidf_vt_word, tfidf_freq_vt_word,
freq_tfidf_vt_word]
print wordlist
def test_bottom_corpus(self):
vtcorp = VTextCorpus(self.vtlist_file, input_root=self.data_root,
token_filter=self.token_filter,
pfilter=0.3, pfilter_full_freqs=True)
freq_transform = FrequencyBasedTransformer(vtcorp, 110, 10)
freq_vtcorp = freq_transform[vtcorp]
tfidf_freq = TfidfModel(freq_vtcorp)
tfidf_freq_vtcorp = tfidf_freq[freq_vtcorp]
self.assertEqual(vtcorp, bottom_corpus(tfidf_freq_vtcorp))
self.assertEqual(vtcorp, bottom_corpus(freq_vtcorp))
def test_run_transformations(self):
vtcorp = VTextCorpus(self.vtlist_file, input_root=self.data_root,
token_filter=self.token_filter,
pfilter=0.3, pfilter_full_freqs=True)
vtcorp.dry_run()
freq_transform = FrequencyBasedTransformer(vtcorp, 110, 10)
freq_vtcorp = freq_transform[vtcorp]
tfidf_freq = TfidfModel(freq_vtcorp)
# with gzip.open(self.vtlist[0]) as vt_handle:
output = run_transformations(0,
vtcorp,
freq_transform,
tfidf_freq)
print output
normalized_output = gensim.matutils.unitvec(output)
print normalized_output
self.assertEqual(12, len(output))
self.assertAlmostEqual(1.0, math.sqrt(sum([f**2 for _, f in output])),
delta=0.0001)
self.assertAlmostEqual(1.0, math.sqrt(sum([f**2 for _, f in normalized_output])),
delta=0.0001)
def test_benchmark(self):
@benchmark
def simple_function(a, b):
result = []
for i in xrange(a**b):
result.append([unicode(b) for _ in xrange(b**a)])
return result
a = 3
b = 8
retval = simple_function(a, b)
self.assertEqual(len(retval), a**b)
self.assertEqual(len(retval[0]), b**a)
def test_compute_word2image_map(self):
image_file = os.path.join(self.data_root,
self.loader.layout.image_vectors)
icorp = ImagenetCorpus(image_file, delimiter=';', dim=4096, label='')
icorp.dry_run()
vtcorp = VTextCorpus(self.vtlist_file, input_root=self.data_root,
token_filter=self.strict_token_filter,
pfilter=0.3, pfilter_full_freqs=True)
vtcorp.dry_run()
idata = Dataset(icorp)
tdata = Dataset(vtcorp)
mmdata = CompositeDataset((tdata, idata), names=('text', 'img'),
aligned=False)
t2i_file = os.path.join(self.loader.root,
self.loader.layout.textdoc2imdoc)
t2i_indexes = compute_docname_flatten_mapping(mmdata, t2i_file)
w2i, i2w = compute_word2image_map(vtcorp, icorp, t2i_indexes)
self.assertIsInstance(w2i, dict)
self.assertIsInstance(i2w, dict)
print w2i[iter(w2i).next()]
print w2i['vzduch']
print w2i['cena-1']
print i2w['0047938150.jpg']
print u'\n'.join([u'{0}: {1}'.format(w, freq)
for w, freq in sorted(i2w['0047938150.jpg'].items(),
key=operator.itemgetter(1), reverse=True)][:10])
print u'\n'.join([u'{0}: {1}'.format(w, freq)
for w, freq in sorted(i2w['0000004532.jpg'].items(),
key=operator.itemgetter(1), reverse=True)][:10])
if __name__ == '__main__':
suite = unittest.TestSuite()
loader = unittest.TestLoader()
tests = loader.loadTestsFromTestCase(TestUtils)
suite.addTest(tests)
runner = unittest.TextTestRunner()
runner.run(suite) | gpl-3.0 | 4,269,886,714,641,634,300 | 33.968912 | 89 | 0.597659 | false |
T3kton/subcontractor | subcontractor/credentials.py | 1 | 1841 | import json
import ssl
from urllib import request
VAULT_TIMEOUT = 20
_handler = None
def getCredentials( value ):
if value is None:
return None
return _handler.get( value )
def setup( config ):
global _handler
vault_type = config.get( 'credentials', 'type', fallback=None )
if not vault_type: # could be None or ''
_handler = NullVault()
elif vault_type == 'hashicorp':
_handler = HashiCorptVault( config.get( 'credentials', 'host' ),
config.get( 'credentials', 'token' ),
config.get( 'credentials', 'proxy', fallback=None ),
config.getboolean( 'credentials', 'verify_ssl', fallback=True ) )
else:
raise ValueError( 'Unknown Credentials type "{0}"'.format( vault_type ) )
class NullVault():
def __init__( self ):
pass
def get( self, name ):
return None
class HashiCorptVault():
def __init__( self, host, token, proxy=None, verify_ssl=True ):
super().__init__()
if host[-1] == '/':
raise ValueError( 'VAULT_HOST must not end with "/"' )
self.host = host
handler_list = []
if proxy is not None:
handler_list.append( request.ProxyHandler( { 'http': proxy, 'https': proxy } ) )
else:
handler_list.append( request.ProxyHandler( {} ) )
if not verify_ssl:
handler_list.append( request.HTTPSHandler( context=ssl._create_unverified_context() ) )
self.opener = request.build_opener( *handler_list )
self.opener.addheaders = [
( 'X-Vault-Token', token ),
]
def get( self, url ):
req = request.Request( '{0}{1}'.format( self.host, url ), method='GET' )
resp = self.opener.open( req, timeout=VAULT_TIMEOUT )
# TODO: catch 404, 403, etc
return json.loads( resp.read().decode() )[ 'data' ][ 'data' ]
| apache-2.0 | 6,615,458,542,644,797,000 | 24.219178 | 97 | 0.595872 | false |
ronas/PythonGNF | Igor/Tabuada.py | 1 | 1262 | '''
numero = int(input("Número para a tabuada: "))
for multiplicador in range(1,11):
print (numero,"x",multiplicador,"=",(numero*multiplicador))
'''
#
# Calculo de Horas trabalhadas.
# Autor: Igor Nunes
# Materia: Programa Python
# Orientador: Ronaldo
# Aula de total de horas trabalhadas
#
#Leitura dos dados do teclado...
horasTrabalhadas = input("Horas trabalhadas: ")
valorHoras = input("Valor da Horas: ")
imposto = input("imposto: ")
#
#Tratamento de entrada do usuário...
horasTrabalhadas = horasTrabalhadas.replace( "," , "." )
valorHoras = valorHoras.replace(",",".")
imposto = imposto.replace(",",".")
#
#Conversão dos valores de texto para numérico (ponto flutuante)...
horasTrabalhadas = float(horasTrabalhadas)
valorHoras = float(valorHoras)
imposto = float(imposto)
totalBruto = ( horasTrabalhadas * valorHoras )
# Valor do imposto.
#imposto = 24
# Calculo de porcentagem de imposto.
impostoDevido = totalBruto * (imposto /100 )
#Calculo do valor liquido.
totalLiquido = (totalBruto - impostoDevido)
#
'''
Limpa tela.
import os
os.system ("clear")
'''
#
print ("totalBruto R$ ",totalBruto )
#
print ("impostoDevido",impostoDevido)
#
print("totalLiquido R$ ",totalLiquido)
| gpl-3.0 | 7,292,178,075,437,664,000 | 23.16 | 66 | 0.68124 | false |
poojavade/Genomics_Docker | Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/radical.utils-0.7.7-py2.7.egg/radical/utils/__init__.py | 1 | 1700 |
__author__ = "Radical.Utils Development Team (Andre Merzky, Ole Weidner)"
__copyright__ = "Copyright 2013, RADICAL@Rutgers"
__license__ = "MIT"
# import utility classes
from object_cache import ObjectCache
from plugin_manager import PluginManager
from singleton import Singleton
from threads import Thread, RLock, NEW, RUNNING, DONE, FAILED
from url import Url
from dict_mixin import DictMixin, dict_merge, dict_stringexpand
from lockable import Lockable
from registry import Registry, READONLY, READWRITE
from regex import ReString, ReSult
from reporter import Reporter
from benchmark import Benchmark
from lease_manager import LeaseManager
# import utility methods
from ids import generate_id, ID_SIMPLE, ID_UNIQUE
from read_json import read_json
from read_json import read_json_str
from read_json import parse_json
from read_json import parse_json_str
from tracer import trace, untrace
from which import which
from misc import split_dburl, mongodb_connect
from misc import parse_file_staging_directives
from misc import time_diff
from get_version import get_version
# import sub-modules
# from config import Configuration, Configurable, ConfigOption, getConfig
# ------------------------------------------------------------------------------
import os
_mod_root = os.path.dirname (__file__)
version = open (_mod_root + "/VERSION", "r").readline ().strip ()
version_detail = open (_mod_root + "/VERSION.git", "r").readline ().strip ()
# ------------------------------------------------------------------------------
| apache-2.0 | -6,634,251,882,934,553,000 | 33 | 81 | 0.626471 | false |
Tuxemon/Tuxemon | tuxemon/event/actions/add_item.py | 1 | 1612 | #
# Tuxemon
# Copyright (c) 2014-2017 William Edwards <[email protected]>,
# Benjamin Bean <[email protected]>
#
# This file is part of Tuxemon
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import annotations
from tuxemon.event.eventaction import EventAction
from typing import Union, NamedTuple, final
class AddItemActionParameters(NamedTuple):
item_slug: str
quantity: Union[int, None]
@final
class AddItemAction(EventAction[AddItemActionParameters]):
"""
Adds an item to the current player's inventory.
The action parameter must contain an item name to look up in the item
database.
"""
name = "add_item"
param_class = AddItemActionParameters
def start(self) -> None:
player = self.session.player
if self.parameters.quantity is None:
quantity = 1
else:
quantity = self.parameters.quantity
player.alter_item_quantity(self.session, self.parameters.item_slug, quantity)
| gpl-3.0 | -5,283,441,857,624,997,000 | 31.24 | 85 | 0.71464 | false |
rjonnal/zernike | __init__.py | 1 | 20006 | """This module contains functions for Zernike calculations. Mainly the private
function _zgen, a generator function for Zernike polynomials. The public
functions make use of _zgen to create height or slope maps in a unit
pupil, corresponding to individual Zernike terms.
Author: Ravi S. Jonnal / Werner Lab, UC Davis
Revision: 2.0 / 28 June 2014
"""
import numpy as np
from matplotlib import pyplot as plt
import sys
from time import sleep
import os
USE_CACHE_FILE = False
def fact(num):
"""Implementation of factorial function.
"""
# Check that the number is an integer.
assert(num%1==0)
# Check that $num\geq 0$.
assert(num>=0)
# Compute $num!$ recursively.
if num==0 or num==1:
return 1
else:
return num * fact(num-1)
def choose(a,b):
"""Binomial coefficient, implemented using
this module's factorial function.
See [here](http://www.encyclopediaofmath.org/index.php/Newton_binomial) for detail.
"""
assert(a>=b)
return fact(a)/(fact(b)*fact(a-b))
def splitEquation(eqStr,width,bookend):
if len(eqStr)<=width or len(eqStr)==0:
return eqStr
else:
spaceIndices = []
idx = 0
while idx>-1:
idx = eqStr.find(' ',idx+1)
spaceIndices.append(idx)
spaceIndices = spaceIndices[:-1]
idxList = [x for x in spaceIndices if x<width]
if len(idxList)==0:
return eqStr
else:
idx = idxList[-1]
head = eqStr[:idx]
innards = ' ' + bookend + '\n' + bookend
tail = splitEquation(eqStr[idx:],width,bookend)
test =head + innards + tail
return test
class Zernike:
def __init__(self):
if USE_CACHE_FILE:
cachedir = './cache/'
self._cachefn = os.path.join(cachedir,'zernike_cache.txt')
if not os.path.exists(cachedir):
os.makedirs(cachedir)
try:
self._termMatrix = np.loadtxt(self._cachefn).astype(np.int32)
except Exception as e:
print 'No term cache file. Creating.'
self._termMatrix = np.array([])
np.savetxt(self._cachefn,self._termMatrix)
# Make a dictionary of precomputed coefficients, using the cache file.
# This dictionary will be used to look up values when they exist in
# the dictionary, and will recompute them otherwise.
self._termDict = {}
if USE_CACHE_FILE:
for row in self._termMatrix:
n,m,kindIndex,s,j,k = row[:6]
t1,t2,t3,c,tXexp,tYexp = row[6:]
self._termDict[(n,m,kindIndex,s,j,k)] = (t1,t2,t3,c,tXexp,tYexp)
# The functions in this class can be asked for phase height,
# or partial x or partial y derivatives. 'Kind' refers to
# which of these is requested. Numerical encodings for 'kind'
# permit some arithmetical simplicity and generality
# (utilizing a number associated with the kind in a single
# equation, rather than having different sets of equations
# for each kind case).
self._kindDictionary = {}
self._kindDictionary['h'] = 0
self._kindDictionary['dx'] = 1
self._kindDictionary['dy'] = 2
def j2nm(self,j):
n = np.ceil((-3+np.sqrt(9+8*j))/2)
m = 2*j-n*(n+2)
return np.int(n),np.int(m)
def nm2j(self,n,m):
return np.int(n*(n+1)/2.0+(n+m)/2.0)
def _zeqn(self,n,m,kind='h',forceRecompute=False):
"""Return parameters sufficient for specifying a Zernike term
of desired order and azimuthal frequency.
Given an order (or degree) n and azimuthal frequency f, and x-
and y- rectangular (Cartesian) coordinates, produce parameters
necessary for constructing the appropriate Zernike
representation.
An individual polynomial has the format:
$$ Z_n^m = \sqrt{c} \Sigma^j\Sigma^k [a_{jk}X^jY^k] $$
This function returns a tuple ($c$,cdict). $c$ is the square
of the normalizing coefficient $\sqrt{c}$, and cdict contains
key-value pairs (($j$,$k$),$a$), mapping the $X$ and $Y$
exponents ($j$ and $k$, respectively) onto polynomial term
coefficients ($a$). The resulting structure can be used to
compute the wavefront height or slope for arbitrary pupil
coordinates, or to generate string representations of the
polynomials.
Zernike terms are only defined when n and m have the same
parity (both odd or both even).
Please see Schwiegerling lecture notes in
/doc/supporting_docs/ for eqn. references.
Args:
n (int): The Zernike order or degree.
m (int): The azimuthal frequency.
kind (str): 'h', 'dx', or 'dy', for height, partial x
derivative (slope) or partial y derivative,
respectively.
Returns:
params (tuple): (c,cdict), with c being the normalizing
coefficient c and cdict being the map of exponent pairs
onto inner coefficients.
"""
absm = np.abs(m)
kindIndex = self._kindDictionary[kind.lower()]
if USE_CACHE_FILE:
# open cache file in append mode:
self._cacheHandle = file(self._cachefn,'a')
# check that n and m are both even or both odd
if (float(n-absm))%2.0:
errString = 'zernike._zgen error: ' + \
'parity of n and m are different; n = %d, m = %d'%(n,m)
sys.exit(errString)
# check that n is non-negative:
if n<0:
errString = 'zernike._zgen error: ' + \
'n must be non-negative; n = %d'%n
sys.exit(errString)
# $|m|$ must be less than or equal to $n$.
if abs(m)>n:
errString = 'zernike._zgen error: ' + \
'|m| must be less than or equal to n, but n=%d and m=%d.'%(n,m)
sys.exit(errString)
# These are the squares of the outer coefficients. It's useful
# to keep them this way for _convertToString, since we'd
# prefer to print the $\sqrt{}$ rather than a truncated irrational
# number.
if m==0:
outerCoef = n+1
else:
outerCoef = 2*(n+1)
srange = range((n-absm)/2+1)
cdict = {}
for s in srange:
jrange = range(((n-absm)/2)-s+1)
for j in jrange:
# Subtract 1 from absm to determine range,
# only when m<0.
if m<0:
krange = range((absm-1)/2+1)
else:
krange = range(absm/2+1)
for k in krange:
# If m==0, k must also be 0;
# see eqn. 13c, 19c, and 20c, each of which
# only sum over s and j, not k.
if m==0:
assert(k==0)
# For m==0 cases, n/2 is used in coef denominator. Make
# sure that n is even, or else n/2 is not well-defined
# because n is an integer.
if m==0:
assert n%2==0
# Check to see if calculations are cached.
# If so, use cached values; if not, recalculate.
cached = self._termDict.has_key((n,m,kindIndex,s,j,k))
if cached and not forceRecompute:
t1,t2,t3,c,tXexp,tYexp = self._termDict[(n,m,kindIndex,s,j,k)]
else:
# The coefficient for each term in this
# polynomial has the format: $$\frac{t1n}{t1d1
# t1d2 t1d3} t2 t3$$. These six terms are
# computed here.
t1n = ((-1)**(s+k))*fact(n-s)
t1d1 = fact(s)
t1d2 = fact((n + absm)/2-s)
t1d3 = fact((n - absm)/2-s)
t1 = t1n/(t1d1*t1d2*t1d3)
t2 = choose((n - absm)/2 - s, j)
t3 = choose(absm, 2*k + (m<0))
if kind.lower()=='h':
# The (implied) coefficient of the $X^a Y^b$
# term at the end of eqns. 13a-c.
c = 1
tXexp = n - 2*(s+j+k) - (m<0)
tYexp = 2*(j+k) + (m<0)
elif kind.lower()=='dx':
# The coefficient of the $X^a Y^b$ term at
# the end of eqns. 19a-c.
c = (n - 2*(s+j+k) - (m<0))
# Could cacluate explicitly:
# $tXexp = X^{(n - 2*(s+j+k)- 1 - (m<0))}$
#
# However, piggy-backing on previous
# calculation of c speeds things up.
tXexp = c - 1
tYexp = 2*(j+k) + (m<0)
elif kind.lower()=='dy':
# The coefficient of the $X^a Y^b$ term at
# the end of eqns. 20a-c.
c = 2*(j+k) + (m<0)
tXexp = n - 2*(s+j+k) - (m<0)
tYexp = c - 1
else:
errString = 'zernike._zgen error: ' + \
'invalid kind \'%s\'; should be \'h\', \'dx\', or \'dy\'.'%kind
sys.exit(errString)
if not cached and USE_CACHE_FILE:
self._cacheHandle.write('%d\t'*12%(n,m,kindIndex,s,j,k,t1,t2,t3,c,tXexp,tYexp)+'\n')
ct123 = c*t1*t2*t3
# The key for the polynomial dictionary is the pair of X,Y
# coefficients.
termKey = (tXexp,tYexp)
# Leave this term out of the dictionary if its coefficient
# is 0.
if ct123:
# If we already have this term, add to its coefficient.
if cdict.has_key(termKey):
cdict[termKey] = cdict[termKey] + ct123
# If not, add it to the dictionary.
else:
cdict[termKey] = ct123
# Remove zeros to speed up computations later.
cdict = {key: value for key, value in cdict.items() if value}
return (outerCoef,cdict)
def _convertToString(self,params):
"""Return a string representation of a Zernike polynomial.
This function takes a tuple, consisting of a squared
normalizing coefficient and dictionary of inner coefficients
and exponents, provided by _zeqn, and returns a string
representation of the polynomial, with LaTeX- style markup.
Example: a params of (10, {(3,4): 7, (2,5): -1}) would produce a
two-term polynomial '\sqrt{10} [7 X^3 Y^4 - X^2 Y^5]', which could be used in LaTeX,
pandoc, markdown, MathJax, or Word with MathType, to produce:
$$ \sqrt{10} [7 X^3 Y^4 - X^2 Y^5] $$
Args:
params (tuple): A pair consisting of an outer coefficient
$c$ and a dictionary mapping tuples (xexp,yexp) of
exponents onto the corresponding term coefficients.
Returns:
string: A string representation of the polynomial.
"""
c = params[0]
cdict = params[1]
keys = sorted(cdict.keys(), key=lambda tup: (tup[0]+tup[1],tup[0]))[::-1]
outstr = ''
firstKey = True
for key in keys:
coef = cdict[key]
if coef>0:
sign = '+'
else:
sign = '-'
coef = abs(coef)
if coef<0 or not firstKey:
outstr = outstr + '%s'%sign
if coef>1 or (key[0]==0 and key[1]==0):
outstr = outstr + '%d'%coef
if key[0]:
outstr = outstr + 'X^{%d}'%key[0]
if key[1]:
outstr = outstr + 'Y^{%d}'%key[1]
firstKey = False
outstr = outstr + ' '
outstr = outstr.strip()
if np.sqrt(float(c))%1.0<.00001:
cstr = '%d'%(np.sqrt(c))
else:
cstr = '\sqrt{%d}'%(c)
if len(outstr):
outstr = '%s [%s]'%(cstr,outstr)
else:
outstr = '%s'%(cstr)
return outstr
def _convertToSurface(self,params,X,Y,mask=None):
"""Return a phase map specified by a Zernike polynomial.
This function takes a tuple, consisting of a squared
normalizing coefficient and dictionary of inner coefficients
and exponents, provided by _zeqn, and x- and y- rectangular
(Cartesian) coordinates, and produces a phase map.
This function works by evaluating the polynomial expressed by
params at each coordinate specified by X and Y.
Args:
params (tuple): A pair consisting of an outer coefficient
$c$ and a dictionary mapping tuples (xexp,yexp) of
exponents onto the corresponding term coefficients.
X (float): A scalar, vector, or matrix of X coordinates in unit pupil.
Y (float): A scalar, vector, or matrix of Y coordinates in unit pupil.
kind (str): 'h', 'dx', or 'dy', for height, partial x derivative (slope)
or partial y derivative, respectively.
Returns:
float: height, dx, or dy; returned structure same size as X and Y.
"""
# Check that shapes of X and Y are equal (not necessarily square).
if not (X.shape[0]==Y.shape[0] and \
X.shape[1]==Y.shape[1]):
errString = 'zernike.getSurface error: ' + \
'X and Y must have the same shape, but X is %d x %d'%(X.shape[0],X.shape[1]) + \
'and Y is %d x %d'%(Y.shape[0],Y.shape[1])
sys.exit(errString)
if mask is None:
mask = np.ones(X.shape)
params = self._zeqn(n,m,kind)
normalizer = np.sqrt(params[0])
matrix_out = np.zeros(X.shape)
for item in params[1].items():
matrix_out = matrix_out + item[1] * X**(item[0][0]) * Y**(item[0][1])
matrix_out = matrix_out * np.sqrt(normalizer)
matrix_out = matrix_out * mask
return matrix_out
def getSurface(self,n,m,X,Y,kind='h',mask=None):
"""Return a phase map specified by a Zernike order and azimuthal frequency.
Given an order (or degree) n and azimuthal frequency f, and x- and y-
rectangular (Cartesian) coordinates, produce a phase map of either height,
partial x derivative, or partial y derivative.
Zernike terms are only defined when n and m have the same parity (both odd
or both even).
The input X and Y values should be located inside a unit pupil, such that
$$\sqrt{X^2 + Y^2}\leq 1$$
Please see Schwiegerling lecture notes in /doc/supporting_docs/ for eqn.
references.
This function works by calling Zernike._zeqn to calculate the coefficients
and exponents of the polynomial, and then using the supplied X and Y
coordinates to produce the height map (or partial derivative).
Args:
n (int): The Zernike order or degree.
m (int): The azimuthal frequency.
X (float): A scalar, vector, or matrix of X coordinates in unit pupil.
Y (float): A scalar, vector, or matrix of Y coordinates in unit pupil.
kind (str): 'h', 'dx', or 'dy', for height, partial x derivative (slope)
or partial y derivative, respectively.
Returns:
float: height, dx, or dy; returned structure same size as X and Y.
"""
# Check that shapes of X and Y are equal (not necessarily square).
if not np.all(X.shape==Y.shape):
errString = 'zernike.getSurface error: ' + \
'X and Y must have the same shape, but X is %d x %d'%(X.shape[0],X.shape[1]) + \
'and Y is %d x %d'%(Y.shape[0],Y.shape[1])
sys.exit(errString)
if mask is None:
mask = np.ones(X.shape)
params = self._zeqn(n,m,kind)
normalizer = np.sqrt(params[0])
matrix_out = np.zeros(X.shape)
for item in params[1].items():
matrix_out = matrix_out + item[1] * X**(item[0][0]) * Y**(item[0][1])
matrix_out = matrix_out * normalizer
matrix_out = matrix_out * mask
return matrix_out
def getEquationString(self,n,m,kind='h',doubleDollar=False):
"""Return LaTeX-encoded of the Zernike polynomial specified by
order n, frequency m.
Args:
n (int): The Zernike order or degree.
m (int): The azimuthal frequency.
kind (str): 'h', 'dx', or 'dy', for height, partial x
derivative (slope) or partial y derivative,
respectively.
doubleDollar (bool): determines how to bookend the
polynomial string; True causes bookending with '$$', to
produce "display" math mode, whereas False would produce
a string suitable for inline use.
Returns:
str: a LaTeX representation of the Zernike polynomial
specified by n, m, and Kind.
"""
params = self._zeqn(n,m,kind)
rightString = self._convertToString(params)
if kind.lower()=='h':
leftString = 'Z^{%d}_{%d}'%(m,n)
elif kind.lower()=='dx':
leftString = '\\frac{\delta Z^{%d}_{%d}}{\delta x}'%(m,n)
elif kind.lower()=='dy':
leftString = '\\frac{\delta Z^{%d}_{%d}}{\delta y}'%(m,n)
else:
sys.exit('zernike.getEquationString: invalid kind %s'%kind)
if doubleDollar:
bookend = '$$'
else:
bookend = '$'
return '%s %s = %s %s'%(bookend,leftString,rightString,bookend)
def plotPolynomial(self,n,m,kind='h'):
"""Plot a polynomial surface specified by order n, frequency m, and kind.
Args:
n (int): The Zernike order or degree.
m (int): The azimuthal frequency.
kind (str): 'h', 'dx', or 'dy', for height, partial x
derivative (slope) or partial y derivative,
respectively.
Calling function/script required to provide a plotting context (e.g. pyplot.figure).
"""
from mpl_toolkits.mplot3d import Axes3D
N = 64
mask = np.zeros((N,N))
xx,yy = np.meshgrid(np.linspace(-1,1,N),np.linspace(-1,1,N))
d = np.sqrt(xx**2 + yy**2)
mask[np.where(d<1)] = 1
surface = self.getSurface(n,m,xx,yy,kind,mask)
surface = surface * mask
#plt.figure()
ax = plt.axes([0,.2,1,.8],projection='3d')
surf = ax.plot_wireframe(xx,yy,surface,rstride=1,cstride=1,color='k')
ax.view_init(elev=70., azim=40)
eqstr = self.getEquationString(n,m,kind)
eqstr = splitEquation(eqstr,160,'$')
print 'plotting %s'%eqstr
plt.axes([0,0,1,.2])
plt.xticks([])
plt.yticks([])
plt.box('off')
fontsize = 12
plt.text(0.5,0.5,eqstr,ha='center',va='center',fontsize=fontsize)
| gpl-2.0 | 5,358,316,222,424,130,000 | 33.732639 | 112 | 0.517845 | false |
dongguangming/django-books | models.py | 1 | 1560 | #import all of the things we will be using
from django.db import models
from tagging.fields import TagField
# to help with translation of field names
from django.utils.translation import ugettext_lazy as _
# to have a generic foreign key for any model
from django.contrib.contenttypes import generic
# stores model info so this can be applied to any model
from django.contrib.contenttypes.models import ContentType
class Book(models.Model):
"""
The details of a Book
"""
# fields that describe this book
name = models.CharField(_('name'), max_length=48)
isbn = models.CharField(_('isbn'), max_length=16)
url = models.URLField(_('url'), verify_exists=False, blank=True)
description = models.TextField(_('description'))
# to add to any model
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = generic.GenericForeignKey('content_type',
'object_id')
# for the list of tags for this book
tags = TagField()
# misc fields
deleted = models.BooleanField(default=0)
created = models.DateTimeField(auto_now_add=True)
# so that {{book.get_absolute_url}} outputs the whole url
@models.permalink
def get_absolute_url(self):
return ("book_details", [self.pk])
# outputs name when printing this object as a string
def __unicode__(self):
return self.name
| bsd-3-clause | 7,885,029,974,770,089,000 | 36.04878 | 76 | 0.633974 | false |
rspavel/spack | var/spack/repos/builtin/packages/bazel/package.py | 1 | 12437 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import re
class Bazel(Package):
"""Bazel is an open-source build and test tool similar to Make, Maven, and
Gradle. It uses a human-readable, high-level build language. Bazel supports
projects in multiple languages and builds outputs for multiple platforms.
Bazel supports large codebases across multiple repositories, and large
numbers of users."""
homepage = "https://bazel.build/"
url = "https://github.com/bazelbuild/bazel/releases/download/3.1.0/bazel-3.1.0-dist.zip"
maintainers = ['adamjstewart']
version('3.1.0', sha256='d7f40d0cac95a06cea6cb5b7f7769085257caebc3ee84269dd9298da760d5615')
version('3.0.0', sha256='530f5132e0a50da7ebb0ed08d9b6f1ddfd0d7d9b5d0beb2df5d687a4c8daf6b3')
version('2.2.0', sha256='9379878a834d105a47a87d3d7b981852dd9f64bc16620eacd564b48533e169a7')
version('2.1.1', sha256='83f67f28f4e47ff69043307d1791c9bffe83949e84165d49058b84eded932647')
version('2.1.0', sha256='3371cd9050989173a3b27364668328653a65653a50a85c320adc53953b4d5f46')
version('2.0.1', sha256='a863ed9e6fc420fbd92e63a12fe1a5b9be1a7a36f11f61f1fdc582c813bbe543')
version('2.0.0', sha256='724da3c656f68e787a86ebb9844773aa1c2e3a873cc39462a8f1b336153d6cbb')
version('1.2.1', sha256='255da49d0f012bc4f2c1d6d3ccdbe578e22fe97b8d124e1629a486fe2a09d3e1')
version('1.2.0', sha256='9cb46b0a18b9166730307a0e82bf4c02281a1cc6da0fb11239e6fe4147bdee6e')
version('1.1.0', sha256='4b66a8c93af7832ed32e7236cf454a05f3aa06d25a8576fc3f83114f142f95ab')
version('1.0.1', sha256='f4d2dfad011ff03a5fae41b9b02cd96cd7297c1205d496603d66516934fbcfee')
version('1.0.0', sha256='c61daf0b69dd95205c695b2f9022d296d052c727062cfd396d54ffb2154f8cac')
version('0.29.1', sha256='872a52cff208676e1169b3e1cae71b1fe572c4109cbd66eab107d8607c378de5')
version('0.29.0', sha256='01cb6f2e808bd016cf0e217e12373c9efb808123e58b37885be8364458d3a40a')
version('0.28.1', sha256='2cea463d611f5255d2f3d41c8de5dcc0961adccb39cf0ac036f07070ba720314')
version('0.28.0', sha256='26ad8cdadd413b8432cf46d9fc3801e8db85d9922f85dd8a7f5a92fec876557f')
version('0.27.2', sha256='5e1bf2b48e54eb7e518430667d29aef53695d6dd7c718665a52131ab27aadab2')
version('0.27.1', sha256='8051d77da4ec338acd91770f853e4c25f4407115ed86fd35a6de25921673e779')
version('0.27.0', sha256='c3080d3b959ac08502ad5c84a51608c291accb1481baad88a628bbf79b30c67a')
version('0.26.1', sha256='c0e94f8f818759f3f67af798c38683520c540f469cb41aea8f5e5a0e43f11600')
version('0.26.0', sha256='d26dadf62959255d58e523da3448a6222af768fe1224e321b120c1d5bbe4b4f2')
version('0.25.3', sha256='23eafd3e439bc71baba9c592b52cb742dabc8640a13b9da1751fec090a2dda99')
version('0.25.2', sha256='7456032199852c043e6c5b3e4c71dd8089c1158f72ec554e6ec1c77007f0ab51')
version('0.25.1', sha256='a52bb31aeb1f821e649d25ef48023cfb54a12887aff875c6349ebcac36c2f056')
version('0.25.0', sha256='f624fe9ca8d51de192655369ac538c420afb7cde16e1ad052554b582fff09287')
version('0.24.1', sha256='56ea1b199003ad832813621744178e42b39e6206d34fbae342562c287da0cd54')
version('0.24.0', sha256='621d2a97899a88850a913eabf9285778331a309fd4658b225b1377f80060fa85')
version('0.23.2', sha256='293a5a7d851e0618eeb5e6958d94a11d45b6a00f2ba9376de61ac2bd5f917439')
version('0.23.1', sha256='dd47199f92452bf67b2c5d60ad4b7143554eaf2c6196ab6e8713449d81a0491d')
version('0.23.0', sha256='2daf9c2c6498836ed4ebae7706abb809748b1350cacd35b9f89452f31ac0acc1')
version('0.22.0', sha256='6860a226c8123770b122189636fb0c156c6e5c9027b5b245ac3b2315b7b55641')
version('0.21.0', sha256='6ccb831e683179e0cfb351cb11ea297b4db48f9eab987601c038aa0f83037db4')
version('0.20.0', sha256='1945afa84fd8858b0a3c68c09915a4bc81065c61df2591387b2985e2297d30bd')
version('0.19.2', sha256='11234cce4f6bdc62c3ac688f41c7b5c178eecb6f7e2c4ba0bcf00ba8565b1d19')
version('0.19.1', sha256='c9405f7b8c79ebc81f9f0e49bb656df4a0da246771d010c2cdd6bb30e2500ac0')
version('0.19.0', sha256='ee6135c5c47306c8421d43ad83aabc4f219cb065376ee37797f2c8ba9a615315')
version('0.18.1', sha256='baed9f28c317000a4ec1ad2571b3939356d22746ca945ac2109148d7abb860d4')
version('0.18.0', sha256='d0e86d2f7881ec8742a9823a986017452d2da0dfe4e989111da787cb89257155')
version('0.17.2', sha256='b6e87acfa0a405bb8b3417c58477b66d5bc27dc0d31ba6fa12bc255b9278d33b')
version('0.17.1', sha256='23e4281c3628cbd746da3f51330109bbf69780bd64461b63b386efae37203f20')
version('0.16.1', sha256='09c66b94356c82c52f212af52a81ac28eb06de1313755a2f23eeef84d167b36c')
version('0.16.0', sha256='c730593916ef0ba62f3d113cc3a268e45f7e8039daf7b767c8641b6999bd49b1')
version('0.15.2', sha256='bf53ec73be3a6d412d85ef612cec6e9c85db45da42001fab0cf1dad44cfc03f1')
version('0.15.1', sha256='c62b351fa4c1ba5aeb34d0a137176f8e8f1d89a32f548a10e96c11df176ffc6c')
version('0.15.0', sha256='c3b716e6625e6b8c323350c95cd3ae0f56aeb00458dddd10544d5bead8a7b602')
version('0.14.1', sha256='d49cdcd82618ae7a7a190e6f0a80d9bf85c1a66b732f994f37732dc14ffb0025')
version('0.14.0', sha256='259627de8b9d415cc80904523facf3d50e6e8e68448ab968eb1c9cb8ca1ef843')
version('0.13.1', sha256='b0269e75b40d87ff87886e5f3432cbf88f70c96f907ab588e6c21b2922d72db0')
version('0.13.0', sha256='82e9035084660b9c683187618a29aa896f8b05b5f16ae4be42a80b5e5b6a7690')
version('0.12.0', sha256='3b3e7dc76d145046fdc78db7cac9a82bc8939d3b291e53a7ce85315feb827754')
version('0.11.1', sha256='e8d762bcc01566fa50952c8028e95cfbe7545a39b8ceb3a0d0d6df33b25b333f')
version('0.11.0', sha256='abfeccc94728cb46be8dbb3507a23ccffbacef9fbda96a977ef4ea8d6ab0d384')
version('0.10.1', sha256='708248f6d92f2f4d6342006c520f22dffa2f8adb0a9dc06a058e3effe7fee667')
version('0.10.0', sha256='47e0798caaac4df499bce5fe554a914abd884a855a27085a4473de1d737d9548')
version('0.9.0', sha256='efb28fed4ffcfaee653e0657f6500fc4cbac61e32104f4208da385676e76312a')
version('0.8.1', sha256='dfd0761e0b7e36c1d74c928ad986500c905be5ebcfbc29914d574af1db7218cf')
version('0.8.0', sha256='aa840321d056abd3c6be10c4a1e98a64f9f73fff9aa89c468dae8c003974a078')
version('0.7.0', sha256='a084a9c5d843e2343bf3f319154a48abe3d35d52feb0ad45dec427a1c4ffc416')
version('0.6.1', sha256='dada1f60a512789747011184b2767d2b44136ef3b036d86947f1896d200d2ba7')
version('0.6.0', sha256='a0e53728a9541ef87934831f3d05f2ccfdc3b8aeffe3e037be2b92b12400598e')
version('0.5.4', sha256='2157b05309614d6af0e4bbc6065987aede590822634a0522161f3af5d647abc9')
version('0.5.3', sha256='76b5c5880a0b15f5b91f7d626c5bc3b76ce7e5d21456963c117ab711bf1c5333')
version('0.5.2', sha256='2418c619bdd44257a170b85b9d2ecb75def29e751b725e27186468ada2e009ea')
version('0.5.1', sha256='85e6a18b111afeea2e475fe991db2a441ec3824211d659bee7b0012c36be9a40')
version('0.5.0', sha256='ebba7330a8715e96a6d6dc0aa085125d529d0740d788f0544c6169d892e4f861')
version('0.4.5', sha256='2b737be42678900470ae9e48c975ac5b2296d9ae23c007bf118350dbe7c0552b')
version('0.4.4', sha256='d52a21dda271ae645711ce99c70cf44c5d3a809138e656bbff00998827548ebb')
version('0.4.3', sha256='cbd2ab580181c17317cf18b2bf825bcded2d97cab01cd5b5fe4f4d520b64f90f')
version('0.4.2', sha256='8e6f41252abadcdb2cc7a07f910ec4b45fb12c46f0a578672c6a186c7efcdb36')
version('0.4.1', sha256='008c648d3c46ece063ae8b5008480d8ae6d359d35967356685d1c09da07e1064')
version('0.4.0', sha256='6474714eee72ba2d4e271ed00ce8c05d67a9d15327bc03962b821b2af2c5ca36')
version('0.3.2', sha256='ca5caf7b2b48c7639f45d815b32e76d69650f3199eb8caa541d402722e3f6c10')
version('0.3.1', sha256='218d0e28b4d1ee34585f2ac6b18d169c81404d93958815e73e60cc0368efcbb7')
version('0.3.0', sha256='357fd8bdf86034b93902616f0844bd52e9304cccca22971ab7007588bf9d5fb3')
variant('nodepfail', default=True, description='Disable failing dependency checks due to injected absolute paths - required for most builds using bazel with spack')
depends_on('java', type=('build', 'run'))
depends_on('python', type=('build', 'run'))
depends_on('zip', type=('build', 'run'))
# Pass Spack environment variables to the build
patch('bazelruleclassprovider-0.25.patch', when='@0.25:')
patch('bazelruleclassprovider-0.14.patch', when='@0.14:0.24')
patch('bazelconfiguration-0.3.patch', when='@:0.13')
# Inject include paths
patch('unix_cc_configure-3.0.patch', when='@3:')
patch('unix_cc_configure-0.15.patch', when='@0.15:2')
patch('unix_cc_configure-0.10.patch', when='@0.10:0.14')
patch('unix_cc_configure-0.5.3.patch', when='@0.5.3:0.9')
patch('cc_configure-0.5.0.patch', when='@0.5.0:0.5.2')
patch('cc_configure-0.3.0.patch', when='@:0.4')
# Set CC and CXX
patch('compile-0.29.patch', when='@0.29:')
patch('compile-0.21.patch', when='@0.21:0.28')
patch('compile-0.16.patch', when='@0.16:0.20')
patch('compile-0.13.patch', when='@0.13:0.15')
patch('compile-0.9.patch', when='@0.9:0.12')
patch('compile-0.6.patch', when='@0.6:0.8')
patch('compile-0.4.patch', when='@0.4:0.5')
patch('compile-0.3.patch', when='@:0.3')
# for fcc
patch('patch_for_fcc.patch', when='@0.29.1:%fj')
patch('patch_for_fcc2.patch', when='@0.25:%fj')
conflicts(
'%fj',
when='@:0.24.1',
msg='Fujitsu Compiler cannot build 0.24.1 or less, '
'please use a newer release.'
)
patch('disabledepcheck.patch', when='@0.3.2:+nodepfail')
patch('disabledepcheck_old.patch', when='@0.3.0:0.3.1+nodepfail')
phases = ['bootstrap', 'install']
executables = ['^bazel$']
@classmethod
def determine_version(cls, exe):
output = Executable(exe)('version', output=str, error=str)
match = re.search(r'Build label: ([\d.]+)', output)
return match.group(1) if match else None
def url_for_version(self, version):
if version >= Version('0.4.1'):
url = 'https://github.com/bazelbuild/bazel/releases/download/{0}/bazel-{0}-dist.zip'
else:
url = 'https://github.com/bazelbuild/bazel/archive/{0}.tar.gz'
return url.format(version)
def setup_build_environment(self, env):
env.set('EXTRA_BAZEL_ARGS',
# Spack's logs don't handle colored output well
'--color=no --host_javabase=@local_jdk//:jdk'
# Enable verbose output for failures
' --verbose_failures'
# Ask bazel to explain what it's up to
# Needs a filename as argument
' --explain=explainlogfile.txt'
# Increase verbosity of explanation,
' --verbose_explanations'
# Show (formatted) subcommands being executed
' --subcommands=pretty_print'
' --jobs={0}'.format(make_jobs))
def bootstrap(self, spec, prefix):
bash = which('bash')
bash('./compile.sh')
def install(self, spec, prefix):
mkdir(prefix.bin)
install('output/bazel', prefix.bin)
@run_after('install')
@on_package_attributes(run_tests=True)
def test(self):
# https://github.com/Homebrew/homebrew-core/blob/master/Formula/bazel.rb
# Bazel does not work properly on NFS, switch to /tmp
with working_dir('/tmp/spack/bazel/spack-test', create=True):
touch('WORKSPACE')
with open('ProjectRunner.java', 'w') as f:
f.write("""\
public class ProjectRunner {
public static void main(String args[]) {
System.out.println("Hi!");
}
}""")
with open('BUILD', 'w') as f:
f.write("""\
java_binary(
name = "bazel-test",
srcs = glob(["*.java"]),
main_class = "ProjectRunner",
)""")
# Spack's logs don't handle colored output well
bazel = Executable(self.prefix.bin.bazel)
bazel('--output_user_root=/tmp/spack/bazel/spack-test',
'build', '--color=no', '//:bazel-test')
exe = Executable('bazel-bin/bazel-test')
assert exe(output=str) == 'Hi!\n'
def setup_dependent_package(self, module, dependent_spec):
module.bazel = Executable('bazel')
@property
def parallel(self):
return not self.spec.satisfies('%fj')
| lgpl-2.1 | 6,327,231,952,113,095,000 | 56.313364 | 168 | 0.733939 | false |
taarifa/taarifa_backend | taarifa_backend/models.py | 1 | 4325 | import datetime
from flask_security import RoleMixin, UserMixin
from flask_mongoengine.wtf import model_form
from taarifa_backend import db
fieldmap = {
'BinaryField': db.BinaryField,
'BooleanField': db.BooleanField,
'ComplexDateTimeField': db.ComplexDateTimeField,
'DateTimeField': db.DateTimeField,
'DecimalField': db.DecimalField,
'DictField': db.DictField,
'DynamicField': db.DynamicField,
'EmailField': db.EmailField,
'EmbeddedDocumentField': db.EmbeddedDocumentField,
'FileField': db.FileField,
'FloatField': db.FloatField,
'GenericEmbeddedDocumentField': db.GenericEmbeddedDocumentField,
'GenericReferenceField': db.GenericReferenceField,
'GeoPointField': db.GeoPointField,
'ImageField': db.ImageField,
'IntField': db.IntField,
'ListField': db.ListField,
'MapField': db.MapField,
'ObjectIdField': db.ObjectIdField,
'ReferenceField': db.ReferenceField,
'SequenceField': db.SequenceField,
'SortedListField': db.SortedListField,
'StringField': db.StringField,
'URLField': db.URLField,
'UUIDField': db.UUIDField,
}
class Field(db.EmbeddedDocument):
"""Field in a :class:`Service`."""
db_field = db.StringField(default=None)
required = db.BooleanField(default=False)
default = db.DynamicField(default=None)
unique = db.BooleanField(default=False)
unique_with = db.DynamicField(default=None)
primary_key = db.BooleanField(default=False)
choices = db.DynamicField(default=None)
help_text = db.StringField(default=None)
verbose_name = db.StringField(default=None)
class Service(db.Document):
"""A service schema served by the API."""
meta = {'strict': False}
name = db.StringField(required=True)
fields = db.DictField(required=True)
description = db.StringField()
group = db.StringField()
keywords = db.ListField(db.StringField())
protocol_type = db.StringField()
service_name = db.StringField(required=True)
service_code = db.StringField(required=True, unique=True)
def build_schema(service):
build_field = lambda d: fieldmap[d.pop('type')](**d)
return type(str(service.name), (Report,),
dict(description=service.description,
group=service.group,
keywords=service.keywords,
protocol_type=service.protocol_type,
service_name=service.service_name,
service_code=service.service_code,
meta={'allow_inheritance': True},
**dict((k, build_field(v)) for k, v in service.fields.items()))
)
class Metadata(object):
"""
Description of a service
"""
def __init__(self, service_code, service_name, description, group=None):
self.service_code = service_code
self.service_name = service_name
self.description = description
self.group = group
def __repr__(self):
args = [self.service_code, self.service_name, self.description, self.group]
return 'Metadata(%s)' % ', '.join(map(str, args))
class Report(db.Document):
created_at = db.DateTimeField(default=datetime.datetime.now, required=True)
latitude = db.FloatField(required=True)
longitude = db.FloatField(required=True)
meta = {'allow_inheritance': True, 'strict': False}
ReportForm = model_form(Report, exclude=['created_at'])
class Role(db.Document, RoleMixin):
name = db.StringField(max_length=80, unique=True)
description = db.StringField(max_length=255)
class User(db.Document, UserMixin):
email = db.StringField(max_length=255, unique=True)
password = db.StringField(max_length=255)
active = db.BooleanField(default=True)
confirmed_at = db.DateTimeField()
roles = db.ListField(db.ReferenceField(Role), default=[])
def get_available_services():
return [build_schema(o) for o in Service.objects]
def get_service_class(service_code):
try:
return build_schema(Service.objects.get(service_code=service_code))
except Service.DoesNotExist:
return Report
def get_form(service_code):
return model_form(get_service_class(service_code), exclude=['created_at'])
def clear_database():
for cls in [Report, Role, User]:
cls.drop_collection()
| bsd-3-clause | 7,360,685,039,675,874,000 | 30.801471 | 84 | 0.672139 | false |
SureshMatsui/SaveCoin | qa/rpc-tests/skeleton.py | 1 | 2232 | #!/usr/bin/env python
# Skeleton for python-based regression tests using
# JSON-RPC
# Add python-SaveCoinrpc to module search path:
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-SaveCoinrpc"))
import json
import shutil
import subprocess
import tempfile
import traceback
from SaveCoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
def run_test(nodes):
# Replace this as appropriate
for node in nodes:
assert_equal(node.getblockcount(), 200)
assert_equal(node.getbalance(), 25*50)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave SaveCoinds and test.* datadir on exit or error")
parser.add_option("--srcdir", dest="srcdir", default="../../src",
help="Source directory containing SaveCoind/SaveCoin-cli (default: %default%)")
parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"),
help="Root directory for datadirs")
(options, args) = parser.parse_args()
os.environ['PATH'] = options.srcdir+":"+os.environ['PATH']
check_json_precision()
success = False
try:
print("Initializing test directory "+options.tmpdir)
if not os.path.isdir(options.tmpdir):
os.makedirs(options.tmpdir)
initialize_chain(options.tmpdir)
nodes = start_nodes(2, options.tmpdir)
connect_nodes(nodes[1], 0)
sync_blocks(nodes)
run_test(nodes)
success = True
except AssertionError as e:
print("Assertion failed: "+e.message)
except Exception as e:
print("Unexpected exception caught during testing: "+str(e))
stack = traceback.extract_tb(sys.exc_info()[2])
print(stack[-1])
if not options.nocleanup:
print("Cleaning up")
stop_nodes()
shutil.rmtree(options.tmpdir)
if success:
print("Tests successful")
sys.exit(0)
else:
print("Failed")
sys.exit(1)
if __name__ == '__main__':
main()
| mit | 2,976,008,050,888,033,000 | 27.253165 | 101 | 0.638441 | false |
gochaorg/mailscripts | emailer/mailer.py | 1 | 12253 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import smtplib
import os
import re
import sys
import imaplib
import email
import tempfile
import shutil
import datetime
import quopri
import base64
import hashlib
import quopri
import base64
import hashlib
from email.MIMEMultipart import MIMEMultipart
from email.MIMEBase import MIMEBase
from email.MIMEText import MIMEText
from email import Encoders
class Mailer:
"""Класс по работе с электронной почтой"""
addrTo = []
'''Список адресатов'''
addrFrom = False
login = False
subject = 'Без темы'
subjectTmpl = '{subject} {part}/{total}'
text = ''
attach = []
smtpHost = False
smtpPort = 25
useSSL = False
useTLS = False
smtpPassword = False
verbose = False
timeout = 30
split = False
imapHost = False
imapPort = 143
#md5 = False
imapHost = False
imapPort = 143
imapPassword = False
imapSSL = False
def timeString( self, d ):
"""Возвращает текстовое представление времени
d - Дата"""
timeStr = "{year}-{month:#02}-{day:#02}_{hour:#02}-{minute:#02}-{second:#02}".format(
year=d.year,
month=d.month,
day=d.day,
hour=d.hour,
minute=d.minute,
second=d.second )
return timeStr
def log(self,text):
'''Лог - Выводил текст (text)'''
if self.verbose:
print text
def serr(self,text):
"""Лог - ошибка состояния объекта, text - описание"""
print 'Ошибка состояния объекта = '+text
def exception(self,ex):
"""Лог - исключительная ситуация, ex - описание"""
print 'Ошибка почты {err}'.format( err=ex )
def attachFile(self,msg,fileName):
"""Присоединяет файл (fileName) к сообщению (msg)"""
part = MIMEBase('application', 'octet-stream')
part.set_payload(open(fileName, 'rb').read())
Encoders.encode_base64(part)
part.add_header('Content-Disposition','attachment; filename="%s"' % os.path.basename(fileName))
msg.attach(part)
return True
def splitFile(self,fileName):
"""Разделяет файл (fileName) на куски (формат 7z) во временную директорию.
Возвращает путь временной директории, после использования директории следует самостоятельно ее удалить.
Если не получилось разделить (ошибка), то вернет False"""
tmpDir = tempfile.mkdtemp('mailer')
verb = ''
if not self.verbose:
verb = '1>/dev/null'
cmd = "7z a -v{volsize} '{arcfile}' '{sendfile}' {verbose}".format(
volsize=self.split,
arcfile='{tmpdir}/{basename}.7z'.format(
tmpdir=tmpDir,
basename=os.path.basename(fileName),
),
sendfile=fileName,
verbose=verb
)
cmd = cmd.replace( "(", "\(" ).replace( ")","\)" )
result = os.system( cmd )
if result==0:
return tmpDir
else:
return False
def sendParts(self,tmpDir,srcFilePath):
"""Отсылает файлы указанной директории (tmpDir) отдельными письмами.
Возвращает кол-вот отправленых писем."""
def mf( msg ):
return lambda: msg.makeMessage()
succCount = 0
messages = []
for dirpath, dirnames, filenames in os.walk(tmpDir):
count = len(filenames)
idx = 0
for filename in filenames:
idx = idx + 1
filepath = os.path.join( dirpath,filename )
date_ = datetime.datetime.now()
tmpl = self.subjectTmpl
subject = tmpl.format(
subject=self.subject,
part=idx,
total=count,
date=self.timeString(date_),
filepath=srcFilePath,
filename=os.path.basename(srcFilePath),
attachpath=filepath,
attachname=os.path.basename(filepath))
m = Mailer()
m.addrTo = self.addrTo
m.addrFrom = self.addrFrom
m.subject = subject
m.text = self.text
m.attach = filepath
m.smtpHost = self.smtpHost
m.smtpPort = self.smtpPort
m.useSSL = self.useSSL
m.useTLS = self.useTLS
m.smtpPassword = self.smtpPassword
m.verbose = self.verbose
m.timeout = self.timeout
m.split = False
#msg = m.makeMessage()
msg = mf( m )
if not isinstance(msg,bool):
messages.append( msg )
succ = self.sendMailMessage( messages )
if succ:
return len(messages)
return 0
def send(self):
"""Отправляет письмо на почту.
Если указано разделять вложения на части - то отправит несколько писем.
Возвращает кол-во отправленых писем."""
if isinstance(self.split,str):
count = 0
if isinstance(self.attach,(list,tuple)):
if len(self.attach)>0:
for attc in self.attach:
if os.path.isfile( attc ):
tmpDir = self.splitFile( attc )
if os.path.isdir(tmpDir):
count = count + self.sendParts(tmpDir,attc)
shutil.rmtree(tmpDir)
pass
elif os.path.isfile( self.attach ):
tmpDir = self.splitFile( self.attach )
if os.path.isdir(tmpDir):
count = count + self.sendParts(tmpDir,self.attach)
shutil.rmtree(tmpDir)
self.log( 'Отправлено {counter} писем'.format(counter=count) )
return count>0
return self.sendMail()
def sendMailMessage(self,msg):
"""Отправляет сообщения (msg) на почту.
msg - либо список сообщений (объекты MIMEMultipart / набор функций(без аргументов) - возвращающие MIMEMultipart)
/ либо отдельный объект MIMEMultipart.
Возвращает True - успешно / False - Ошибка отправки.
"""
try:
if self.useSSL:
self.log( 'Соединение по SSL, хост={host} порт={port}'.format( host=self.smtpHost,port=self.smtpPort ) )
mailServer = smtplib.SMTP_SSL( self.smtpHost, self.smtpPort, timeout=float(self.timeout) )
else:
self.log( 'Соединение, хост={host} порт={port}'.format( host=self.smtpHost,port=self.smtpPort ) )
mailServer = smtplib.SMTP( self.smtpHost, self.smtpPort, timeout=float(self.timeout) )
self.log( 'Команда EHLO' )
mailServer.ehlo()
if self.useTLS:
self.log( 'Команда STARTTLS' )
mailServer.starttls()
_login_ = self.login
if _login_ != False:
_login_ = self.addrFrom
self.log( 'Команда LOGIN, логин={login}'.format(login=_login_) )
mailServer.login( _login_, self.smtpPassword )
if isinstance(msg,(tuple,list)):
for message in msg:
m = message
if hasattr(message, '__call__'):
m = message()
if not isinstance(m,bool):
self.log( 'Отправка письма, адресат:{to} тема:{subj}'.format(
to=m['To'],
subj=m['Subject']
) )
mailServer.sendmail(self.addrFrom, m['To'], m.as_string())
else:
self.log( 'Отправка письма, адресат:{to} тема:{subj}'.format(
to=msg['To'],
subj=msg['Subject']
) )
mailServer.sendmail(self.addrFrom, msg['To'], msg.as_string())
self.log( 'Закрытие соединения' )
mailServer.close()
self.log( 'Письмо отправлено' )
return True
except smtplib.SMTPException as e:
print 'Ошибка почты {err}'.format( err=e )
return False
def makeMessage(self):
"""Создает сообщение - объект MIMEMultipart и возвращает его."""
msg = MIMEMultipart()
if not isinstance(self.addrFrom,str):
self.serr( 'Не указан отправитель - addrFrom не строка' )
return False
msg['From'] = self.addrFrom
if isinstance(self.addrTo,(str,unicode)):
msg['To'] = self.addrTo
elif isinstance(self.addrTo,(list,tuple)):
if len(self.addrTo)==0:
self.serr( 'Не указан адресат - len(addrTo) = 0' )
return False
msg['To'] = ', '.join( self.addrTo )
else:
self.serr( 'addrTo не строка / список' )
return False
if isinstance(self.subject,(str,unicode)):
msg['Subject'] = self.subject
else:
self.serr( 'Не указана тема - subject не строка' )
return False
if isinstance(self.text,(str,unicode)):
msg.attach( MIMEText(self.text) )
else:
self.serr( 'text не строка' )
return False
if isinstance(self.attach,(list,tuple)):
for attc in self.attach:
self.attachFile( msg, attc )
elif os.path.exists( self.attach ):
self.attachFile( msg, self.attach )
return msg
def sendMail(self):
"""Отправляет отдельное письмо.
Если сообщение создано удачно и письмо отправлено вернет - True.
Если возникли проблемы - то вернет False."""
msg = self.makeMessage()
if isinstance(msg,bool):
return msg
return self.sendMailMessage( msg )
def imapWork(self,workFun):
"""Соединяется с сервером imap, производит login и передает управление функции workFun( m )
m - Объект imaplib.IMAP4. После завершению работы workFun завершает работу с imap."""
if not self.imapHost:
self.serr( 'Не указан параметр imap (imapHost)' )
return False
if not self.imapPort:
self.serr( 'Не указан параметр imap (imapPort)' )
return False
if not self.imapPassword:
self.serr( 'Не указан параметр password (imapPassword)' )
return False
if not self.addrFrom:
self.serr( 'Не указан параметр from (addrFrom)' )
return False
mail = None
if self.imapSSL:
self.log( 'Соединение с imap по ssl {host}:{port}'.format(host=self.imapHost,port=self.imapPort) )
mail = imaplib.IMAP4_SSL(self.imapHost,self.imapPort)
else:
self.log( 'Соединение с imap {host}:{port}'.format(host=self.imapHost,port=self.imapPort) )
mail = imaplib.IMAP4(self.imapHost,self.imapPort)
self.log( 'Команда LOGIN, логин={login}'.format(login=self.addrFrom) )
mail.login(self.addrFrom,self.imapPassword)
workFun( mail )
self.log( 'Завершение работы с imap' )
mail.logout()
return True
def decode_m_utf7(self,s):
r = []
decode = []
for c in s:
if c == '&' and not decode:
decode.append('&')
elif c == '-' and decode:
if len(decode) == 1:
r.append('&')
else:
r.append(self.modified_unbase64(''.join(decode[1:])))
decode = []
elif decode:
decode.append(c)
else:
r.append(c)
if decode:
r.append(self.modified_unbase64(''.join(decode[1:])))
out = ''.join(r)
if not isinstance(out, unicode):
out = unicode(out, 'latin-1')
return out
def modified_base64(self,s):
s_utf7 = s.encode('utf-7')
return s_utf7[1:-1].replace('/', ',')
def modified_unbase64(self,s):
s_utf7 = '+' + s.replace(',', '/') + '-'
return s_utf7.decode('utf-7')
def encode_m_utf7(s):
if isinstance(s, str) and sum(n for n in (ord(c) for c in s) if n > 127):
raise FolderNameError("%r contains characters not valid in a str folder name. "
"Convert to unicode first?" % s)
r = []
_in = []
for c in s:
if ord(c) in (range(0x20, 0x26) + range(0x27, 0x7f)):
if _in:
r.extend(['&', modified_base64(''.join(_in)), '-'])
del _in[:]
r.append(str(c))
elif c == '&':
if _in:
r.extend(['&', modified_base64(''.join(_in)), '-'])
del _in[:]
r.append('&-')
else:
_in.append(c)
if _in:
r.extend(['&', modified_base64(''.join(_in)), '-'])
return ''.join(r)
def list(self):
"""Просматривает список ящиков на сервере imap"""
def listwf(mail):
self.log( 'Команда LIST' )
res = mail.list()
if isinstance(res,(list,tuple)):
if len(res)>1 and res[0]=='OK' and isinstance(res[1],(list,tuple)):
for item in res[1]:
print self.decode_m_utf7( item )
succ = self.imapWork( listwf )
return succ | mit | -74,163,532,198,789,380 | 26.899743 | 114 | 0.656008 | false |
noironetworks/group-based-policy | gbpservice/neutron/db/servicechain_db.py | 1 | 30032 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ast
from neutron_lib.db import model_base
from neutron_lib import exceptions as n_exc
from neutron_lib.plugins import constants as pconst
from neutron_lib.plugins import directory
from oslo_log import helpers as log
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
import sqlalchemy as sa
from sqlalchemy.ext.orderinglist import ordering_list
from sqlalchemy import orm
from sqlalchemy.orm import exc
from gbpservice.neutron.db import api as db_api
from gbpservice.neutron.extensions import servicechain as schain
from gbpservice.neutron.services.servicechain.common import exceptions as s_exc
LOG = logging.getLogger(__name__)
MAX_IPV4_SUBNET_PREFIX_LENGTH = 31
MAX_IPV6_SUBNET_PREFIX_LENGTH = 127
class BaseSCResource(model_base.HasId, model_base.HasProject):
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255))
status = sa.Column(sa.String(length=16), nullable=True)
status_details = sa.Column(sa.String(length=4096), nullable=True)
class BaseSharedSCResource(BaseSCResource):
shared = sa.Column(sa.Boolean)
class SpecNodeAssociation(model_base.BASEV2):
"""Models one to many providing relation between Specs and Nodes."""
__tablename__ = 'sc_spec_node_associations'
servicechain_spec_id = sa.Column(
sa.String(36), sa.ForeignKey('sc_specs.id'), primary_key=True)
node_id = sa.Column(sa.String(36),
sa.ForeignKey('sc_nodes.id'),
primary_key=True)
position = sa.Column(sa.Integer)
class InstanceSpecAssociation(model_base.BASEV2):
"""Models one to many providing relation between Instance and Specs."""
__tablename__ = 'sc_instance_spec_mappings'
servicechain_instance_id = sa.Column(
sa.String(36), sa.ForeignKey('sc_instances.id'), primary_key=True)
servicechain_spec_id = sa.Column(sa.String(36),
sa.ForeignKey('sc_specs.id'),
primary_key=True)
position = sa.Column(sa.Integer)
class ServiceChainNode(model_base.BASEV2, BaseSharedSCResource):
"""ServiceChain Node"""
__tablename__ = 'sc_nodes'
config = sa.Column(sa.TEXT)
specs = orm.relationship(SpecNodeAssociation,
backref="nodes",
cascade='all, delete, delete-orphan')
service_type = sa.Column(sa.String(50), nullable=True)
service_profile_id = sa.Column(
sa.String(36), sa.ForeignKey('service_profiles.id'),
nullable=True)
class ServiceChainInstance(model_base.BASEV2, BaseSCResource):
"""Service chain instances"""
__tablename__ = 'sc_instances'
config_param_values = sa.Column(sa.String(4096))
specs = orm.relationship(
InstanceSpecAssociation,
backref='instances',
cascade='all,delete, delete-orphan',
order_by='InstanceSpecAssociation.position',
collection_class=ordering_list('position', count_from=1))
provider_ptg_id = sa.Column(sa.String(36),
# FixMe(Magesh) Issue with cascade on Delete
# sa.ForeignKey('gp_policy_target_groups.id'),
nullable=True)
consumer_ptg_id = sa.Column(sa.String(36),
# sa.ForeignKey('gp_policy_target_groups.id'),
nullable=True)
management_ptg_id = sa.Column(sa.String(36),
# sa.ForeignKey('gp_policy_target_groups.id'),
nullable=True)
classifier_id = sa.Column(sa.String(36),
# sa.ForeignKey('gp_policy_classifiers.id'),
nullable=True)
class ServiceChainSpec(model_base.BASEV2, BaseSharedSCResource):
""" ServiceChain Spec
"""
__tablename__ = 'sc_specs'
nodes = orm.relationship(
SpecNodeAssociation,
backref='specs', cascade='all, delete, delete-orphan',
order_by='SpecNodeAssociation.position',
collection_class=ordering_list('position', count_from=1))
config_param_names = sa.Column(sa.String(4096))
instances = orm.relationship(InstanceSpecAssociation,
backref="specs",
cascade='all, delete, delete-orphan')
class ServiceProfile(model_base.BASEV2, BaseSharedSCResource):
""" Service Profile
"""
__tablename__ = 'service_profiles'
vendor = sa.Column(sa.String(50))
# Not using ENUM for less painful upgrades. Validation will happen at the
# API level
insertion_mode = sa.Column(sa.String(50))
service_type = sa.Column(sa.String(50))
service_flavor = sa.Column(sa.String(1024))
nodes = orm.relationship(ServiceChainNode, backref="service_profile")
class ServiceChainDbPlugin(schain.ServiceChainPluginBase):
"""ServiceChain plugin interface implementation using SQLAlchemy models."""
# TODO(osms69): native bulk support
__native_bulk_support = False
__native_pagination_support = True
__native_sorting_support = True
def __init__(self, *args, **kwargs):
super(ServiceChainDbPlugin, self).__init__(*args, **kwargs)
@property
def _grouppolicy_plugin(self):
# REVISIT(Magesh): Need initialization method after all
# plugins are loaded to grab and store plugin.
grouppolicy_plugin = directory.get_plugin(pconst.GROUP_POLICY)
if not grouppolicy_plugin:
LOG.error("No Grouppolicy service plugin found.")
raise s_exc.ServiceChainDeploymentError()
return grouppolicy_plugin
# REVISIT: This is temporary, the correct fix is to use the
# project_id in the context. Moreover, patch.py already patches
# thi, so it should not be required here.
def _get_tenant_id_for_create(self, context, resource):
if context.is_admin and 'tenant_id' in resource:
tenant_id = resource['tenant_id']
return tenant_id
elif ('tenant_id' in resource and
resource['tenant_id'] != context.tenant_id):
reason = _('Cannot create resource for another tenant')
raise n_exc.AdminRequired(reason=reason)
else:
tenant_id = context.tenant_id
return tenant_id
def _get_servicechain_node(self, context, node_id):
try:
return db_api.get_by_id(context, ServiceChainNode, node_id)
except exc.NoResultFound:
raise schain.ServiceChainNodeNotFound(sc_node_id=node_id)
def _get_servicechain_spec(self, context, spec_id):
try:
return db_api.get_by_id(context, ServiceChainSpec, spec_id)
except exc.NoResultFound:
raise schain.ServiceChainSpecNotFound(sc_spec_id=spec_id)
def _get_servicechain_instance(self, context, instance_id):
try:
return db_api.get_by_id(context, ServiceChainInstance, instance_id)
except exc.NoResultFound:
raise schain.ServiceChainInstanceNotFound(
sc_instance_id=instance_id)
def _get_service_profile(self, context, profile_id):
try:
return db_api.get_by_id(context, ServiceProfile, profile_id)
except exc.NoResultFound:
raise schain.ServiceProfileNotFound(
profile_id=profile_id)
def _populate_common_fields_in_dict(self, db_ref):
res = {'id': db_ref['id'],
'tenant_id': db_ref['tenant_id'],
'name': db_ref['name'],
'description': db_ref['description'],
'status': db_ref['status'],
'status_details': db_ref['status_details'],
'shared': db_ref.get('shared', False)}
return res
def _make_sc_node_dict(self, sc_node, fields=None):
res = self._populate_common_fields_in_dict(sc_node)
res['service_profile_id'] = sc_node['service_profile_id']
res['service_type'] = sc_node['service_type']
res['config'] = sc_node['config']
res['servicechain_specs'] = [sc_spec['servicechain_spec_id']
for sc_spec in sc_node['specs']]
return db_api.resource_fields(res, fields)
def _make_sc_spec_dict(self, spec, fields=None):
res = self._populate_common_fields_in_dict(spec)
res['config_param_names'] = spec.get('config_param_names')
res['nodes'] = [sc_node['node_id'] for sc_node in spec['nodes']]
res['instances'] = [x['servicechain_instance_id'] for x in
spec['instances']]
return db_api.resource_fields(res, fields)
def _make_sc_instance_dict(self, instance, fields=None):
res = {'id': instance['id'],
'tenant_id': instance['tenant_id'],
'name': instance['name'],
'description': instance['description'],
'config_param_values': instance['config_param_values'],
'provider_ptg_id': instance['provider_ptg_id'],
'consumer_ptg_id': instance['consumer_ptg_id'],
'management_ptg_id': instance['management_ptg_id'],
'classifier_id': instance['classifier_id'],
'status': instance['status'],
'status_details': instance['status_details']}
res['servicechain_specs'] = [sc_spec['servicechain_spec_id']
for sc_spec in instance['specs']]
return db_api.resource_fields(res, fields)
def _make_service_profile_dict(self, profile, fields=None):
res = self._populate_common_fields_in_dict(profile)
res['service_type'] = profile['service_type']
res['service_flavor'] = profile['service_flavor']
res['vendor'] = profile['vendor']
res['insertion_mode'] = profile['insertion_mode']
res['nodes'] = [node['id'] for node in profile['nodes']]
return db_api.resource_fields(res, fields)
@staticmethod
def validate_service_type(service_type):
if service_type not in schain.sc_supported_type:
raise schain.ServiceTypeNotSupported(sc_service_type=service_type)
@log.log_method_call
def create_servicechain_node(self, context, servicechain_node):
node = servicechain_node['servicechain_node']
tenant_id = self._get_tenant_id_for_create(context, node)
with db_api.CONTEXT_WRITER.using(context):
node_db = ServiceChainNode(
id=uuidutils.generate_uuid(), tenant_id=tenant_id,
name=node['name'], description=node['description'],
service_profile_id=node.get('service_profile_id'),
service_type=node.get('service_type'),
config=node['config'], shared=node['shared'],
status=node.get('status'),
status_details=node.get('status_details'))
context.session.add(node_db)
return self._make_sc_node_dict(node_db)
@log.log_method_call
def update_servicechain_node(self, context, servicechain_node_id,
servicechain_node, set_params=False):
node = servicechain_node['servicechain_node']
with db_api.CONTEXT_WRITER.using(context):
node_db = self._get_servicechain_node(context,
servicechain_node_id)
node_db.update(node)
# Update the config param names derived for the associated specs
spec_node_associations = node_db.specs
for node_spec in spec_node_associations:
spec_id = node_spec.servicechain_spec_id
spec_db = self._get_servicechain_spec(context, spec_id)
self._process_nodes_for_spec(
context, spec_db, self._make_sc_spec_dict(spec_db),
set_params=set_params)
return self._make_sc_node_dict(node_db)
@log.log_method_call
def delete_servicechain_node(self, context, servicechain_node_id):
with db_api.CONTEXT_WRITER.using(context):
node_db = self._get_servicechain_node(context,
servicechain_node_id)
if node_db.specs:
raise schain.ServiceChainNodeInUse(
node_id=servicechain_node_id)
context.session.delete(node_db)
@log.log_method_call
def get_servicechain_node(self, context, servicechain_node_id,
fields=None):
node = self._get_servicechain_node(context, servicechain_node_id)
return self._make_sc_node_dict(node, fields)
@log.log_method_call
def get_servicechain_nodes(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
plugin = directory.get_plugin()
marker_obj = db_api.get_marker_obj(plugin, context,
'servicechain_node', limit,
marker)
return db_api.get_collection(context, ServiceChainNode,
self._make_sc_node_dict,
filters=filters, fields=fields,
sorts=sorts, limit=limit,
marker_obj=marker_obj,
page_reverse=page_reverse)
@log.log_method_call
def get_servicechain_nodes_count(self, context, filters=None):
return db_api.get_collection_count(context, ServiceChainNode,
filters=filters)
def _process_nodes_for_spec(self, context, spec_db, spec,
set_params=True):
if 'nodes' in spec:
self._set_nodes_for_spec(context, spec_db, spec['nodes'],
set_params=set_params)
del spec['nodes']
return spec
def _set_nodes_for_spec(self, context, spec_db, nodes_id_list,
set_params=True):
if not nodes_id_list:
spec_db.nodes = []
spec_db.config_param_names = '[]'
return
with context.session.begin(subtransactions=True):
# We will first check if the new list of nodes is valid
filters = {'id': [n_id for n_id in nodes_id_list]}
nodes_in_db = db_api.get_collection_query(context,
ServiceChainNode,
filters=filters)
nodes_list = [n_db['id'] for n_db in nodes_in_db]
for node_id in nodes_id_list:
if node_id not in nodes_list:
# If we find an invalid node id in the list we
# do not perform the update
raise schain.ServiceChainNodeNotFound(sc_node_id=node_id)
# New list of nodes is valid so we will first reset the
# existing list and then add each node in order.
# Note that the list could be empty in which case we interpret
# it as clearing existing nodes.
spec_db.nodes = []
if set_params:
spec_db.config_param_names = '[]'
for node_id in nodes_id_list:
if set_params:
sc_node = self.get_servicechain_node(context, node_id)
node_dict = jsonutils.loads(sc_node['config'])
config_params = (node_dict.get('parameters') or
node_dict.get('Parameters'))
if config_params:
if not spec_db.config_param_names:
spec_db.config_param_names = str(
config_params.keys())
else:
config_param_names = ast.literal_eval(
spec_db.config_param_names)
config_param_names.extend(config_params.keys())
spec_db.config_param_names = str(
config_param_names)
assoc = SpecNodeAssociation(servicechain_spec_id=spec_db.id,
node_id=node_id)
spec_db.nodes.append(assoc)
def _process_specs_for_instance(self, context, instance_db, instance):
if 'servicechain_specs' in instance:
self._set_specs_for_instance(context, instance_db,
instance['servicechain_specs'])
del instance['servicechain_specs']
return instance
def _set_specs_for_instance(self, context, instance_db, spec_id_list):
if not spec_id_list:
instance_db.spec_ids = []
return
with context.session.begin(subtransactions=True):
filters = {'id': spec_id_list}
specs_in_db = db_api.get_collection_query(context,
ServiceChainSpec,
filters=filters)
specs_list = set(spec_db['id'] for spec_db in specs_in_db)
for spec_id in spec_id_list:
if spec_id not in specs_list:
# Do not update if spec ID is invalid
raise schain.ServiceChainSpecNotFound(sc_spec_id=spec_id)
# Reset the existing list and then add each spec in order. The list
# could be empty in which case we clear the existing specs.
instance_db.specs = []
for spec_id in spec_id_list:
assoc = InstanceSpecAssociation(
servicechain_instance_id=instance_db.id,
servicechain_spec_id=spec_id)
instance_db.specs.append(assoc)
def _get_instances_from_policy_target(self, context, policy_target):
with context.session.begin(subtransactions=True):
ptg_id = policy_target['policy_target_group_id']
scis_p = self.get_servicechain_instances(
context, {'provider_ptg_id': [ptg_id]})
scis_c = self.get_servicechain_instances(
context, {'consumer_ptg_id': [ptg_id]})
# Don't return duplicates
result = []
seen = set()
for sci in scis_p + scis_c:
if sci['id'] not in seen:
seen.add(sci['id'])
result.append(sci)
return result
@log.log_method_call
def create_servicechain_spec(self, context, servicechain_spec,
set_params=True):
spec = servicechain_spec['servicechain_spec']
tenant_id = self._get_tenant_id_for_create(context, spec)
with db_api.CONTEXT_WRITER.using(context):
spec_db = ServiceChainSpec(
id=uuidutils.generate_uuid(),
tenant_id=tenant_id,
name=spec['name'],
description=spec['description'],
shared=spec['shared'],
status=spec.get('status'),
status_details=spec.get('status_details'))
self._process_nodes_for_spec(context, spec_db, spec,
set_params=set_params)
context.session.add(spec_db)
return self._make_sc_spec_dict(spec_db)
@log.log_method_call
def update_servicechain_spec(self, context, spec_id,
servicechain_spec, set_params=True):
spec = servicechain_spec['servicechain_spec']
with db_api.CONTEXT_WRITER.using(context):
spec_db = self._get_servicechain_spec(context,
spec_id)
spec = self._process_nodes_for_spec(context, spec_db, spec,
set_params=set_params)
spec_db.update(spec)
return self._make_sc_spec_dict(spec_db)
@log.log_method_call
def delete_servicechain_spec(self, context, spec_id):
policy_actions = self._grouppolicy_plugin.get_policy_actions(
context, filters={"action_value": [spec_id]})
if policy_actions:
raise schain.ServiceChainSpecInUse(spec_id=spec_id)
with db_api.CONTEXT_WRITER.using(context):
spec_db = self._get_servicechain_spec(context,
spec_id)
if spec_db.instances:
raise schain.ServiceChainSpecInUse(spec_id=spec_id)
context.session.delete(spec_db)
@log.log_method_call
def get_servicechain_spec(self, context, spec_id,
fields=None):
spec = self._get_servicechain_spec(context, spec_id)
return self._make_sc_spec_dict(spec, fields)
@log.log_method_call
def get_servicechain_specs(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
plugin = directory.get_plugin()
marker_obj = db_api.get_marker_obj(plugin, context,
'servicechain_spec', limit,
marker)
return db_api.get_collection(context, ServiceChainSpec,
self._make_sc_spec_dict,
filters=filters, fields=fields,
sorts=sorts, limit=limit,
marker_obj=marker_obj,
page_reverse=page_reverse)
@log.log_method_call
def get_servicechain_specs_count(self, context, filters=None):
return db_api.get_collection_count(context, ServiceChainSpec,
filters=filters)
@log.log_method_call
def create_servicechain_instance(self, context, servicechain_instance):
instance = servicechain_instance['servicechain_instance']
tenant_id = self._get_tenant_id_for_create(context, instance)
with db_api.CONTEXT_WRITER.using(context):
if not instance.get('management_ptg_id'):
management_groups = (
self._grouppolicy_plugin.get_policy_target_groups(
context, {'service_management': [True],
'tenant_id': [instance.get('tenant_id')]}))
if not management_groups:
# Fall back on shared service management
management_groups = (
self._grouppolicy_plugin.get_policy_target_groups(
context, {'service_management': [True]}))
if management_groups:
instance['management_ptg_id'] = management_groups[0]['id']
instance_db = ServiceChainInstance(
id=uuidutils.generate_uuid(),
tenant_id=tenant_id, name=instance['name'],
description=instance['description'],
config_param_values=instance['config_param_values'],
provider_ptg_id=instance.get('provider_ptg_id'),
consumer_ptg_id=instance.get('consumer_ptg_id'),
management_ptg_id=instance.get('management_ptg_id'),
classifier_id=instance.get('classifier_id'),
status=instance.get('status'),
status_details=instance.get('status_details'))
self._process_specs_for_instance(context, instance_db, instance)
context.session.add(instance_db)
return self._make_sc_instance_dict(instance_db)
@log.log_method_call
def update_servicechain_instance(self, context, servicechain_instance_id,
servicechain_instance):
instance = servicechain_instance['servicechain_instance']
with db_api.CONTEXT_WRITER.using(context):
instance_db = self._get_servicechain_instance(
context, servicechain_instance_id)
instance = self._process_specs_for_instance(context, instance_db,
instance)
instance_db.update(instance)
return self._make_sc_instance_dict(instance_db)
@log.log_method_call
def delete_servicechain_instance(self, context, servicechain_instance_id):
with db_api.CONTEXT_WRITER.using(context):
instance_db = self._get_servicechain_instance(
context, servicechain_instance_id)
context.session.delete(instance_db)
@log.log_method_call
def get_servicechain_instance(self, context, sc_instance_id, fields=None):
instance_db = self._get_servicechain_instance(context, sc_instance_id)
return self._make_sc_instance_dict(instance_db, fields)
@log.log_method_call
def get_servicechain_instances(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
plugin = directory.get_plugin()
marker_obj = db_api.get_marker_obj(plugin, context,
'servicechain_instance',
limit, marker)
return db_api.get_collection(context, ServiceChainInstance,
self._make_sc_instance_dict,
filters=filters, fields=fields,
sorts=sorts, limit=limit,
marker_obj=marker_obj,
page_reverse=page_reverse)
@log.log_method_call
def get_servicechain_instances_count(self, context, filters=None):
return db_api.get_collection_count(context, ServiceChainInstance,
filters=filters)
@log.log_method_call
def get_service_profiles_count(self, context, filters=None):
return db_api.get_collection_count(context, ServiceProfile,
filters=filters)
@log.log_method_call
def create_service_profile(self, context, service_profile):
profile = service_profile['service_profile']
tenant_id = self._get_tenant_id_for_create(context, profile)
with db_api.CONTEXT_WRITER.using(context):
profile_db = ServiceProfile(
id=uuidutils.generate_uuid(), tenant_id=tenant_id,
name=profile['name'], description=profile['description'],
service_type=profile.get('service_type'),
insertion_mode=profile.get('insertion_mode'),
vendor=profile.get('vendor'),
service_flavor=profile.get('service_flavor'),
shared=profile.get('shared'),
status=profile.get('status'),
status_details=profile.get('status_details'))
context.session.add(profile_db)
return self._make_service_profile_dict(profile_db)
@log.log_method_call
def update_service_profile(self, context, service_profile_id,
service_profile):
profile = service_profile['service_profile']
with db_api.CONTEXT_WRITER.using(context):
profile_db = self._get_service_profile(context,
service_profile_id)
profile_db.update(profile)
return self._make_service_profile_dict(profile_db)
@log.log_method_call
def delete_service_profile(self, context, service_profile_id):
with db_api.CONTEXT_WRITER.using(context):
profile_db = self._get_service_profile(context,
service_profile_id)
if profile_db.nodes:
raise schain.ServiceProfileInUse(
profile_id=service_profile_id)
context.session.delete(profile_db)
@log.log_method_call
def get_service_profile(self, context, service_profile_id, fields=None):
profile_db = self._get_service_profile(
context, service_profile_id)
return self._make_service_profile_dict(profile_db, fields)
@log.log_method_call
def get_service_profiles(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
plugin = directory.get_plugin()
marker_obj = db_api.get_marker_obj(plugin, context,
'service_profile',
limit, marker)
return db_api.get_collection(context, ServiceProfile,
self._make_service_profile_dict,
filters=filters, fields=fields,
sorts=sorts, limit=limit,
marker_obj=marker_obj,
page_reverse=page_reverse)
| apache-2.0 | 4,388,496,890,389,761,000 | 46.369085 | 80 | 0.565197 | false |
nihlaeth/HorseLife | interface/cli/display.py | 1 | 7394 | """Interface to inherit from for *Display classes."""
from textwrap import fill
from core.core import Core
from core.messagecore import MessageCore
from core.stablecore import StableCore
from core.pasturecore import PastureCore
from core.towncore import TownCore
from core.contractercore import ContracterCore
from support.messages.quit import Quit
from support.messages.back import Back
from support.messages.meter import Meter
from support.messages.action import Action
from errors.invalidchoice import InvalidChoice
# pylint: disable=too-many-instance-attributes
class Display(object):
"""Common methods to inherit from by *Display classes."""
def __init__(self):
"""Initiate with only static data."""
self._screen_width = 70
self._separator = self._repeat('-', self._screen_width)
self._title = "You should not be seeing this"
self._description = "This class is not to be called directly."
self._actions = None
self._menu = None
self._info = None
self._story = None
self._level = None
self._core = None
self._choice = None
def init(self, session):
"""Get data from core."""
self._info = self._core.get_info(session)
self._level = self._core.get_level(session)
self._story = self._core.get_story(session)
self._actions = self._core.get_actions(session)
self._menu = self._core.get_menu()
self._choice = Display.display(self, self._level)
def display(self, level=0):
"""Display screen and return user choice (class)."""
print self._format_title()
print ''.join([self._wrap_text(self._description), "\n\n"])
call_list = []
count = 0
for string in self._info:
if isinstance(string, Meter):
print self._meter(string)
elif isinstance(string, Action):
print "".join([str(count), ") ", str(string)])
call_list.append(string)
count += 1
else:
print self._wrap_text(str(string))
print "\n\n"
if self._story is not None:
print self._separator
print self._wrap_text(self._story.text)
print "".join([str(count), ") ", str(self._story.action)])
call_list.append(self._story.action)
count += 1
print self._separator
for action in self._actions:
if isinstance(action, Action):
if action.min_level <= level:
print self._wrap_text(''.join([
str(count),
") ",
str(action)]))
call_list.append(action)
count += 1
else:
print self._wrap_text(''.join([
str(count),
") ",
str(action)]))
call_list.append(action)
count += 1
print ''.join(["\n\n", self._separator, "\n\n"])
for item in self._menu:
print self._wrap_text(''.join([str(count), ") ", str(item)]))
call_list.append(item)
count += 1
choice = self._get_int(count)
return call_list[choice]
def hide(self):
"""Just a placeholder."""
pass
def _repeat(self, string, num):
"""Repeat string num times and return it."""
return ''.join([str(string) for _ in range(num)])
def _format_title(self):
"""Format the page title and return it."""
frame = self._repeat("=", self._screen_width)
whitespace = len(frame) - 6 - len(self._title)
leading_whitespace = whitespace / 2
trailing_whitespace = (whitespace / 2 if whitespace % 2 == 0
else whitespace / 2 + 1)
header = ''.join([
"===",
self._repeat(" ", leading_whitespace),
self._title,
self._repeat(" ", trailing_whitespace),
"==="])
return ''.join([frame, "\n", header, "\n", frame])
def _get_int(self, limit, prompt="Choice: "):
"""Get an integer between 0 and limit from the user and return it.
Arguments:
limit -- the upper limit (exclusive)
promt -- text to be displayed to the user
"""
try:
response = int(raw_input(prompt))
except ValueError:
response = -1
while response < 0 or response >= limit:
print "Invalid choice, try again."
try:
response = int(raw_input(prompt))
except ValueError:
pass
return response
def get_string(self, min_length, prompt):
"""Get a str of min min_length characters from user and return it.
Arguments:
min_length -- the minimum string length
promt -- text to be displayed to the user
"""
response = raw_input(prompt)
while len(response) < min_length:
print ''.join([
"I need at least ",
str(min_length),
" characters."])
response = raw_input(prompt)
return response
def _wrap_text(self, text):
"""Wrap text to screen width while preserving paragraphs."""
paragraphs = text.split("\n")
return '\n'.join([fill(p, self._screen_width) for p in paragraphs])
def _meter(self, meter):
"""Return a graphical meter."""
percent_filled = float(meter.percent) / 100.
if meter.percent < 0:
percent_filled = 0
columnsfilled = int((self._screen_width - 2) * percent_filled)
return ''.join([
"[",
self._repeat("=", columnsfilled),
self._repeat(" ", self._screen_width - columnsfilled - 2),
"]"])
def choice(self, result):
"""Handle user choice on this end."""
if result is None:
return self.display()
elif isinstance(result, Core):
if isinstance(result, StableCore):
from stabledisplay import StableDisplay
next_display = StableDisplay(result)
elif isinstance(result, TownCore):
from towndisplay import TownDisplay
next_display = TownDisplay(result)
elif isinstance(result, PastureCore):
from pasturedisplay import PastureDisplay
next_display = PastureDisplay(result)
elif isinstance(result, MessageCore):
from messagedisplay import MessageDisplay
next_display = MessageDisplay(result)
elif isinstance(result, ContracterCore):
from contracterdisplay import ContracterDisplay
next_display = ContracterDisplay(result)
else:
raise InvalidChoice(result)
next_action = next_display.display()
if isinstance(next_action, Back):
return self.display()
elif isinstance(next_action, Quit):
return next_action
else:
raise InvalidChoice(result)
elif isinstance(result, Back) or isinstance(result, Quit):
return result
else:
raise InvalidChoice(result)
| gpl-2.0 | -9,055,528,757,811,274,000 | 34.37799 | 75 | 0.545983 | false |
mturilli/aimes.emanager | doc/conf.py | 1 | 10732 | # -*- coding: utf-8 -*-
#
# aimes.emanager documentation build configuration file, created by
# sphinx-quickstart on Sat Nov 29 01:32:22 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'aimes.emanager'
copyright = u'2014, Matteo Turilli, Andre Merzky'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'aimesemanagerdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'aimesemanager.tex', u'aimes.emanager Documentation',
u'Matteo Turilli, Andre Merzky', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'aimesemanager', u'aimes.emanager Documentation',
[u'Matteo Turilli, Andre Merzky'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'aimesemanager', u'aimes.emanager Documentation',
u'Matteo Turilli, Andre Merzky', 'aimesemanager', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'aimes.emanager'
epub_author = u'Matteo Turilli, Andre Merzky'
epub_publisher = u'Matteo Turilli, Andre Merzky'
epub_copyright = u'2014, Matteo Turilli, Andre Merzky'
# The basename for the epub file. It defaults to the project name.
#epub_basename = u'aimes.emanager'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| mit | 1,229,454,034,939,450,600 | 30.472141 | 88 | 0.709187 | false |
terrencepreilly/darglint | darglint/parse/grammars/numpy_receives_section.py | 1 | 5642 | # Generated on 2020-04-04 11:23:51.675046
from darglint.token import (
TokenType,
)
from darglint.parse.identifiers import (
NoqaIdentifier,
)
from darglint.parse.grammar import (
BaseGrammar,
P,
)
class ReceivesGrammar(BaseGrammar):
productions = [
P("receives-section", ([], "receives-header", "newlines", 0), ([], "receives-header", "receives-section1", 0), ([], "receives", "receives-header0", 0)),
P("receives-header", ([], "receives", "receives-header0", 0)),
P("receives-body", ([], "receives-item", "receives-body", 0), ([], "ident", "receives-item0", 0), ([], "ident", "receives-item1", 0)),
P("receives-item", ([], "ident", "receives-item0", 0), ([], "ident", "receives-item1", 0)),
P("block-indented", ([], "paragraph-indented", "block-indented0", 0), ([], "paragraph-indented", "block-indented1", 0), ([], "indented", "paragraph-indented0", 0), ([], "indented", "line", 0)),
P("split-indented", ([], "newline", "split-indented0", 0)),
P("paragraph-indented", ([], "indented", "paragraph-indented0", 0), ([], "indented", "line", 0)),
P("indented", ([], "indent", "indents", 0), (TokenType.INDENT, 0)),
P("indents", ([], "indent", "indents", 0), (TokenType.INDENT, 0)),
P("split", ([], "newline", "split0", 0)),
P("newlines", ([], "newline", "newlines", 0), (TokenType.NEWLINE, 0)),
P("line", ([], "word", "line", 0), ([], "word", "noqa-maybe", 0), ([NoqaIdentifier], "hash", "noqa", 0), ([NoqaIdentifier], "noqa-head", "noqa-statement1", 0), (TokenType.COLON, 0), (TokenType.HASH, 0), (TokenType.INDENT, 0), (TokenType.LPAREN, 0), (TokenType.RPAREN, 0), (TokenType.WORD, 0), (TokenType.RAISES, 0), (TokenType.ARGUMENTS, 0), (TokenType.ARGUMENT_TYPE, 0), (TokenType.RETURNS, 0), (TokenType.RETURN_TYPE, 0), (TokenType.YIELDS, 0), (TokenType.YIELD_TYPE, 0), (TokenType.VARIABLES, 0), (TokenType.VARIABLE_TYPE, 0), (TokenType.NOQA, 0), (TokenType.OTHER, 0), (TokenType.RECEIVES, 0), (TokenType.WARNS, 0), (TokenType.SEE, 0), (TokenType.ALSO, 0), (TokenType.NOTES, 0), (TokenType.EXAMPLES, 0), (TokenType.REFERENCES, 0), (TokenType.HEADER, 0)),
P("word", (TokenType.COLON, 0), (TokenType.HASH, 0), (TokenType.INDENT, 0), (TokenType.LPAREN, 0), (TokenType.RPAREN, 0), (TokenType.WORD, 0), (TokenType.RAISES, 0), (TokenType.ARGUMENTS, 0), (TokenType.ARGUMENT_TYPE, 0), (TokenType.RETURNS, 0), (TokenType.RETURN_TYPE, 0), (TokenType.YIELDS, 0), (TokenType.YIELD_TYPE, 0), (TokenType.VARIABLES, 0), (TokenType.VARIABLE_TYPE, 0), (TokenType.NOQA, 0), (TokenType.OTHER, 0), (TokenType.RECEIVES, 0), (TokenType.WARNS, 0), (TokenType.SEE, 0), (TokenType.ALSO, 0), (TokenType.NOTES, 0), (TokenType.EXAMPLES, 0), (TokenType.REFERENCES, 0), (TokenType.HEADER, 0)),
P("ident", (TokenType.WORD, 0), (TokenType.RAISES, 0), (TokenType.ARGUMENTS, 0), (TokenType.ARGUMENT_TYPE, 0), (TokenType.RETURNS, 0), (TokenType.RETURN_TYPE, 0), (TokenType.YIELDS, 0), (TokenType.YIELD_TYPE, 0), (TokenType.VARIABLES, 0), (TokenType.VARIABLE_TYPE, 0), (TokenType.NOQA, 0), (TokenType.OTHER, 0), (TokenType.RECEIVES, 0), (TokenType.WARNS, 0), (TokenType.SEE, 0), (TokenType.ALSO, 0), (TokenType.NOTES, 0), (TokenType.EXAMPLES, 0), (TokenType.REFERENCES, 0)),
P("header", (TokenType.HEADER, 0)),
P("colon", (TokenType.COLON, 0)),
P("hash", (TokenType.HASH, 0)),
P("indent", (TokenType.INDENT, 0)),
P("newline", (TokenType.NEWLINE, 0)),
P("noqa", (TokenType.NOQA, 0)),
P("receives", (TokenType.RECEIVES, 0)),
P("noqa-maybe", ([NoqaIdentifier], "hash", "noqa", 0), ([NoqaIdentifier], "noqa-head", "noqa-statement1", 0)),
P("noqa-head", ([], "hash", "noqa", 0)),
P("words", ([], "word", "words", 0), (TokenType.COLON, 0), (TokenType.HASH, 0), (TokenType.INDENT, 0), (TokenType.LPAREN, 0), (TokenType.RPAREN, 0), (TokenType.WORD, 0), (TokenType.RAISES, 0), (TokenType.ARGUMENTS, 0), (TokenType.ARGUMENT_TYPE, 0), (TokenType.RETURNS, 0), (TokenType.RETURN_TYPE, 0), (TokenType.YIELDS, 0), (TokenType.YIELD_TYPE, 0), (TokenType.VARIABLES, 0), (TokenType.VARIABLE_TYPE, 0), (TokenType.NOQA, 0), (TokenType.OTHER, 0), (TokenType.RECEIVES, 0), (TokenType.WARNS, 0), (TokenType.SEE, 0), (TokenType.ALSO, 0), (TokenType.NOTES, 0), (TokenType.EXAMPLES, 0), (TokenType.REFERENCES, 0), (TokenType.HEADER, 0)),
P("receives-section1", ([], "newline", "receives-section2", 0)),
P("receives-section2", ([], "receives-body", "newlines", 0), ([], "receives-item", "receives-body", 0), ([], "ident", "receives-item0", 0), ([], "ident", "receives-item1", 0)),
P("receives-header0", ([], "newline", "header", 0)),
P("receives-item0", ([], "newline", "block-indented", 0)),
P("receives-item1", ([], "colon", "receives-item2", 0)),
P("receives-item2", ([], "line", "receives-item3", 0)),
P("receives-item3", ([], "newline", "block-indented", 0)),
P("block-indented0", ([], "split", "block-indented", 0)),
P("block-indented1", ([], "split-indented", "block-indented", 0)),
P("split-indented0", ([], "indents", "split-indented1", 0), ([], "newline", "newlines", 0), (TokenType.NEWLINE, 0)),
P("split-indented1", ([], "newline", "newlines", 0), (TokenType.NEWLINE, 0)),
P("paragraph-indented0", ([], "line", "paragraph-indented1", 0)),
P("paragraph-indented1", ([], "newline", "paragraph-indented", 0)),
P("split0", ([], "newline", "newlines", 0), (TokenType.NEWLINE, 0)),
P("noqa-statement1", ([], "colon", "words", 0)),
]
start = "receives-section" | mit | -349,874,506,556,953,200 | 96.293103 | 766 | 0.606523 | false |
diogopaulo/kivyconfnav | Processes/ConfNavegacaoInserts.py | 1 | 2790 | def obtem_valor_bd(valor):
if valor == None:
return 'null'
elif isinstance(valor, bool):
return 1 if valor else 0
elif isinstance(valor, int):
return valor
else:
return "'" + valor.replace("'", "''") + "'"
class ConfNavegacaoInserts:
def __init__(self, configuracoes, path):
self.path_script = path
self.paginas = ''
self.configuracoes_nav = ''
self.configuracoes = configuracoes
def process(self):
for configuracao in self.configuracoes:
self.trata_configuracao(configuracao)
self.escreve_script()
def trata_configuracao(self, configuracao):
if configuracao.pagina.generate:
self.gera_pagina(configuracao.pagina)
self.gera_configuracao(configuracao)
def gera_configuracao(self, configuracao):
self.configuracoes_nav += 'insert into () values'
self.configuracoes_nav += '('
self.configuracoes_nav += obtem_valor_bd(configuracao.menus_id) + ','
self.configuracoes_nav += obtem_valor_bd(configuracao.pagina) + ','
self.configuracoes_nav += obtem_valor_bd(configuracao.pagina_seguinte) + ','
self.configuracoes_nav += obtem_valor_bd(configuracao.pagina_pai) + ','
self.configuracoes_nav += obtem_valor_bd(configuracao.redesenha_navegacao) + ','
self.configuracoes_nav += obtem_valor_bd(configuracao.nivel) + ','
self.configuracoes_nav += obtem_valor_bd(configuracao.mvc_action_name_cabecalho) + ','
self.configuracoes_nav += obtem_valor_bd(configuracao.apresenta_bread_crumb) + ','
self.configuracoes_nav += obtem_valor_bd(configuracao.tipologias_id) + ','
self.configuracoes_nav += obtem_valor_bd(configuracao.programas_id) + ','
self.configuracoes_nav += obtem_valor_bd(configuracao.sub_navegacao) + ','
self.configuracoes_nav += obtem_valor_bd(configuracao.obrigatoriedade_id) + ','
self.configuracoes_nav += obtem_valor_bd(configuracao.tipos_navegacao) + ','
self.configuracoes_nav += 'getdate(),1,getdate(),1)\n'
def gera_pagina(self, pagina):
self.paginas += 'insert into () values '
self.paginas += '('
self.paginas += obtem_valor_bd(pagina.id) + ','
self.paginas += obtem_valor_bd(pagina.nome_pagina) + ','
self.paginas += obtem_valor_bd(pagina.mvc_action_name) + ','
self.paginas += 'getdate(),1,getdate(),1)\n'
def escreve_script(self):
if self.paginas == '' and self.configuracoes == '':
pass
with open(self.path, 'w') as f:
f.write('-- Paginas \n')
f.write(self.paginas)
f.write('\n\n-- Configuracoes Navegacao \n')
f.write(self.configuracoes)
f.close()
| lgpl-3.0 | -8,103,772,840,879,177,000 | 43.285714 | 94 | 0.62043 | false |
zojoncj/cleanthehead | nsnitro/nsresources/nssslvserver.py | 1 | 23417 | from nsbaseresource import NSBaseResource
__author__ = 'Aleksandar Topuzovic'
class NSSSLVServer(NSBaseResource):
def __init__(self, json_data=None):
"""
Supplied with json_data the object can be pre-filled
"""
super(NSSSLVServer, self).__init__()
self.options = {'vservername': '',
'cipherdetails': '',
'cleartextport': '',
'dh': '',
'dhfile': '',
'dhcount': '',
'ersa': '',
'ersacount': '',
'sessreuse': '',
'sesstimeout': '',
'cipherredirect': '',
'crlcheck': '',
'cipherurl': '',
'sslv2redirect': '',
'sslv2url': '',
'clientauth': '',
'clientcert': '',
'sslredirect': '',
'redirectportrewrite': '',
'nonfipsciphers': '',
'ssl2': '',
'ssl3': '',
'tls1': '',
'snienable': '',
'service': '',
'certkeyname': '',
'servicename': '',
'ocspcheck': '',
'pushenctrigger': '' }
self.resourcetype = NSSSLVServer.get_resourcetype()
if not (json_data is None):
for key in json_data.keys():
if key in self.options.keys():
self.options[key] = json_data[key]
@staticmethod
def get_resourcetype():
"""
Binding object showing the lbmonitor that can be bound to service.
"""
return "sslvserver"
# Read/write properties
def set_vservername(self, vservername):
"""
The name of the SSL virtual server.
Minimum length = 1
"""
self.options['vservername'] = vservername
def get_vservername(self):
"""
The name of the SSL virtual server.
Minimum length = 1
"""
return self.options['vservername']
def set_cleartextport(self, cleartextport):
"""
The port on the back-end web-servers where the clear-text data
is sent by system. Use this setting for the wildcard IP based
SSL Acceleration configuration (*:443).
Minimum value = 1
"""
self.options['cleartextport'] = cleartextport
def get_cleartextport(self):
"""
The port on the back-end web-servers where the clear-text data
is sent by system. Use this setting for the wildcard IP based
SSL Acceleration configuration (*:443).
Minimum value = 1
"""
return self.options['cleartextport']
def set_dh(self, dh):
"""
The state of DH key exchange support for the specified SSL virtual server.
Default value: DISABLED
"""
self.options['dh'] = dh
def get_dh(self):
"""
The state of DH key exchange support for the specified SSL virtual server.
Default value: DISABLED
"""
return self.options['dh']
def set_dhfile(self, dhfile):
"""
The file name and path for the DH parameter. The file format is
PEM. Note: The '-dh' argument must be enabled if this argument
is specified.
Minimum length = 1
"""
self.options['dhfile'] = dhfile
def get_dhfile(self):
"""
The file name and path for the DH parameter. The file format is
PEM. Note: The '-dh' argument must be enabled if this argument
is specified.
Minimum length = 1
"""
return self.options['dhfile']
def set_dhcount(self, ersa):
"""
The refresh count for the re-generation of DH public-key and
private-key from the DH parameter. Zero means infinite usage
(no refresh). Note: The '-dh' argument must be enabled if this
argument is specified.
Default value: 0
Minimum value = 0
Maximum value = 65534
"""
self.options['dhcount'] = ersa
def get_dhcount(self):
"""
The refresh count for the re-generation of DH public-key and
private-key from the DH parameter. Zero means infinite usage
(no refresh). Note: The '-dh' argument must be enabled if this
argument is specified.
Default value: 0
Minimum value = 0
Maximum value = 65534
"""
return self.options['dhcount']
def set_ersa(self, ersa):
"""
The state of Ephemeral RSA key exchange support for the SSL
virtual server.
Default value: ENABLED
"""
self.options['ersa'] = ersa
def get_ersa(self):
"""
The state of Ephemeral RSA key exchange support for the SSL
virtual server.
Default value: ENABLED
"""
return self.options['ersa']
def set_ersacount(self, ersacount):
"""
The refresh count for the re-generation of RSA public-key and
private-key pair. Zero means infinite usage (no refresh) Note:
The '-eRSA' argument must be enabled if this argument is
specified.
Default value: 0
Minimum value = 0
Maximum value = 65534
"""
self.options['ersacount'] = ersacount
def get_ersacount(self):
"""
The refresh count for the re-generation of RSA public-key and
private-key pair. Zero means infinite usage (no refresh) Note:
The '-eRSA' argument must be enabled if this argument is
specified.
Default value: 0
Minimum value = 0
Maximum value = 65534
"""
return self.options['ersacount']
def set_sessreuse(self, sessreuse):
"""
The state of session re-use support for the SSL virtual server.
Default value: ENABLED
"""
self.options['sessreuse'] = sessreuse
def get_sessreuse(self):
"""
The state of session re-use support for the SSL virtual server.
Default value: ENABLED
"""
return self.options['sessreuse']
def set_sesstimeout(self, sesstimeout):
"""
The Session timeout value in seconds. The value has to be a
positive integer. The '-sessReuse' argument must be enabled if
this argument is specified.
Default value: 120
Minimum value = 0
Maximum value = 0xFFFFFFFE
"""
self.options['sesstimeout'] = sesstimeout
def get_sesstimeout(self):
"""
The Session timeout value in seconds. The value has to be a
positive integer. The '-sessReuse' argument must be enabled if
this argument is specified.
Default value: 120
Minimum value = 0
Maximum value = 0xFFFFFFFE
"""
return self.options['sesstimeout']
def set_cipherredirect(self, cipherredirect):
"""
The state of Cipher Redirect feature.
Default value: DISABLED
"""
self.options['cipherredirect'] = cipherredirect
def get_cipherredirect(self):
"""
The state of Cipher Redirect feature.
Default value: DISABLED
"""
return self.options['cipherredirect']
def set_cipherurl(self, cipherurl):
"""
The redirect URL to be used with the Cipher Redirect feature.
"""
self.options['cipherurl'] = cipherurl
def get_cipherurl(self, cipherurl):
"""
The redirect URL to be used with the Cipher Redirect feature.
"""
return self.options['cipherurl']
def set_sslv2redirect(self, sslv2redirect):
"""
The state of SSLv2 Redirect feature.
Default value: DISABLED
"""
self.options['sslv2redirect'] = sslv2redirect
def get_sslv2redirect(self):
"""
The state of SSLv2 Redirect feature.
Default value: DISABLED
"""
return self.options['sslv2redirect']
def set_sslv2url(self, sslv2url):
"""
The redirect URL to be used with SSLv2 Redirect feature.
"""
self.options['sslv2url'] = sslv2url
def get_sslv2url(self):
"""
The redirect URL to be used with SSLv2 Redirect feature.
"""
return self.options['sslv2url']
def set_clientauth(self, clientauth):
"""
The state of Client-Authentication support for the SSL virtual server.
Default value: DISABLED
"""
self.options['clientauth'] = clientauth
def get_clientauth(self):
"""
The state of Client-Authentication support for the SSL virtual server.
Default value: DISABLED
"""
return self.options['clientauth']
def set_clientcert(self, clientcert):
"""
The rule for client authentication. If the clientCert if set to
Mandatory, the system will terminate the SSL handshake if the
SSL client does not provide a valid certificate. If the setting
is Optional, then System will allow SSL clients with no
certificate or invalid certificates to access the secure
resource. Note: Make sure proper access control policies are
defined before changing the above setting to Optional.
"""
self.options['clientcert'] = clientcert
def get_clientcert(self):
"""
The rule for client authentication. If the clientCert if set to
Mandatory, the system will terminate the SSL handshake if the
SSL client does not provide a valid certificate. If the setting
is Optional, then System will allow SSL clients with no
certificate or invalid certificates to access the secure
resource. Note: Make sure proper access control policies are
defined before changing the above setting to Optional.
"""
return self.options['clientcert']
def set_sslredirect(self, sslredirect):
"""
The state of HTTPS redirects for the SSL virtual server. This
is required for proper working of the redirect messages from
the web server. The redirect message from the server gives the
new location for the moved object. This is contained in the
HTTP header field: Location (for example, Location:
http://www.moved.org/here.html). For an SSL session, if the
client browser receives this message, the browser will try to
connect to the new location. This will break the secure SSL
session, as the object has moved from a secure site (https://)
to an unsecured one (http://). Browsers usually flash a warning
message on the screen and prompt the user to either continue or
disconnect. When the above feature is enabled, all such http://
redirect messages are automatically converted to https://. This
does not break the client SSL session.
Default value: DISABLED
"""
self.options['sslredirect'] = sslredirect
def get_sslredirect(self):
"""
The state of HTTPS redirects for the SSL virtual server. This
is required for proper working of the redirect messages from
the web server. The redirect message from the server gives the
new location for the moved object. This is contained in the
HTTP header field: Location (for example, Location:
http://www.moved.org/here.html). For an SSL session, if the
client browser receives this message, the browser will try to
connect to the new location. This will break the secure SSL
session, as the object has moved from a secure site (https://)
to an unsecured one (http://). Browsers usually flash a warning
message on the screen and prompt the user to either continue or
disconnect. When the above feature is enabled, all such http://
redirect messages are automatically converted to https://. This
does not break the client SSL session.
Default value: DISABLED
"""
return self.options['sslredirect']
def set_redirectportrewrite(self, redirectportrewrite):
"""
The state of port in rewrite while performing HTTPS redirect.
Default value: DISABLED
"""
self.options['redirectportrewrite'] = redirectportrewrite
def get_redirectportrewrite(self):
"""
The state of port in rewrite while performing HTTPS redirect.
Default value: DISABLED
"""
return self.options['redirectportrewrite']
def set_nonfipsciphers(self, nonfipsciphers):
"""
The state of usage of non FIPS approved ciphers. Valid only for
an SSL vserver bound with a FIPS key and certificate.
Default value: DISABLED
"""
self.options['nonfipsciphers'] = nonfipsciphers
def get_nonfipsciphers(self):
"""
The state of usage of non FIPS approved ciphers. Valid only for
an SSL vserver bound with a FIPS key and certificate.
Default value: DISABLED
"""
return self.options['nonfipsciphers']
def set_ssl2(self, ssl2):
"""
The state of SSLv2 protocol support for the SSL virtual server.
Default value: DISABLED
"""
self.options['ssl2'] = ssl2
def get_ssl2(self):
"""
The state of SSLv2 protocol support for the SSL virtual server.
Default value: DISABLED
"""
return self.options['ssl2']
def set_ssl3(self, ssl3):
"""
The state of SSLv3 protocol support for the SSL virtual server.
Default value: ENABLED
"""
self.options['ssl3'] = ssl3
def get_ssl3(self):
"""
The state of SSLv3 protocol support for the SSL virtual server.
Default value: ENABLED
"""
return self.options['ssl3']
def set_tls1(self, tls1):
"""
The state of TLSv1 protocol support for the SSL virtual server.
Default value: ENABLED
"""
self.options['tls1'] = tls1
def get_tls1(self):
"""
The state of TLSv1 protocol support for the SSL virtual server.
Default value: ENABLED
"""
return self.options['tls1']
def set_snienable(self, snienable):
"""
state of SNI feature on virtual server.
Default value: DISABLED
"""
self.options['snienable'] = snienable
def get_snienable(self):
"""
state of SNI feature on virtual server.
Default value: DISABLED
"""
return self.options['snienable']
def set_pushenctrigger(self, pushenctrigger):
"""
PUSH packet triggering encryption Always - Any PUSH packet
triggers encryption Ignore - Ignore PUSH packet for triggering
encryption Merge - For consecutive sequence of PUSH packets,
last PUSH packet triggers encryption Timer - PUSH packet
triggering encryption delayed by timer period defined in 'set
ssl parameter' .
"""
self.options['pushenctrigger'] = pushenctrigger
def get_pushenctrigger(self):
"""
PUSH packet triggering encryption Always - Any PUSH packet
triggers encryption Ignore - Ignore PUSH packet for triggering
encryption Merge - For consecutive sequence of PUSH packets,
last PUSH packet triggers encryption Timer - PUSH packet
triggering encryption delayed by timer period defined in 'set
ssl parameter' .
"""
return self.options['pushenctrigger']
def set_cipherdetails(self, cipherdetails):
"""
Details of the individual ciphers bound to the SSL vserver.
Select this flag value to display the details of the individual
ciphers bound to the SSL vserver.
"""
self.options['cipherdetails'] = cipherdetails
def get_cipherdetails(self):
"""
Details of the individual ciphers bound to the SSL vserver.
Select this flag value to display the details of the individual
ciphers bound to the SSL vserver.
"""
return self.options['cipherdetails']
# Read only properties
def get_crlcheck(self):
"""
The state of the CRL check parameter. (Mandatory/Optional)
"""
return self.options['crlcheck']
def get_service(self):
"""
Service
"""
return self.options['service']
def get_certkeyname(self):
"""
The name of the certificate key pair binding.
"""
return self.options['certkeyname']
def get_servicename(self):
"""
Service name.
"""
return self.options['servicename']
def get_ocspcheck(self):
"""
The state of the OCSP check parameter. (Mandatory/Optional)
"""
return self.options['ocspcheck']
@staticmethod
def get(nitro, sslvserver):
"""
Use this API to fetch sslvserver resource of given name.
"""
__sslvserver = NSSSLVServer()
__sslvserver.get_resource(nitro, sslvserver.get_vservername())
return __sslvserver
@staticmethod
def get_all(nitro):
"""
Use this API to fetch all configured sslvserver resources.
"""
__url = nitro.get_url() + NSSSLVServer.get_resourcetype()
__json_sslvservers = nitro.get(__url).get_response_field(NSSSLVServer.get_resourcetype())
__sslvservers = []
for json_sslvserver in __json_sslvservers:
__sslvservers.append(NSSSLVServer(json_sslvserver))
return __sslvservers
@staticmethod
def update(nitro, sslvserver):
"""
Use this API to update sslvserver of a given name.
"""
__sslvserver = NSSSLVServer()
__sslvserver.set_vservername(sslvserver.get_vservername())
__sslvserver.set_cleartextport(sslvserver.get_cleartextport())
__sslvserver.set_dh(sslvserver.get_dh())
__sslvserver.set_dhfile(sslvserver.get_dhfile())
__sslvserver.set_dhcount(sslvserver.get_dhcount())
__sslvserver.set_ersa(sslvserver.get_ersa())
__sslvserver.set_ersacount(sslvserver.get_ersacount())
__sslvserver.set_sessreuse(sslvserver.get_sessreuse())
__sslvserver.set_sesstimeout(sslvserver.get_sesstimeout())
__sslvserver.set_cipherredirect(sslvserver.get_cipherredirect())
__sslvserver.set_cipherurl(sslvserver.get_cipherurl())
__sslvserver.set_sslv2redirect(sslvserver.get_sslv2redirect())
__sslvserver.set_sslv2url(sslvserver.get_sslv2redirect())
__sslvserver.set_clientauth(sslvserver.get_clientauth())
__sslvserver.set_clientcert(sslvserver.get_clientcert())
__sslvserver.set_sslredirect(sslvserver.get_sslredirect())
__sslvserver.set_redirectportrewrite(sslvserver.get_redirectportrewrite())
__sslvserver.set_nonfipsciphers(sslvserver.get_nonfipsciphers())
__sslvserver.set_ssl2(sslvserver.get_ssl2())
__sslvserver.set_ssl3(sslvserver.get_ssl3())
__sslvserver.set_tls1(sslvserver.get_tls1())
__sslvserver.set_snienable(sslvserver.get_snienable())
__sslvserver.set_pushenctrigger(sslvserver.get_pushenctrigger())
return __sslvserver.update_resource(nitro)
# No unset functionality for now.
| apache-2.0 | -8,071,013,944,966,436,000 | 41.041293 | 105 | 0.503737 | false |
benvcarr/algorithms_course | hw2/carr-hw2.py | 1 | 5854 | #!/usr/bin/python
"""
Benjamin Carr
Homework #2 - MPCS 55001
Answers:
(1) Program below.
(2) My program is correct for all cases where both the numbers and the distances from the median
are unique. I spent a lot of time (30+ hrs) trying to figure out other ways of doing this other than using a
dictionary to store key,val pairs but didn't come up with anything that would work for all situations. So, it
works for situations that meet that criteria. It also assumes the median is always equal to floor(n/2), which is
a bit of a mathematical compromise.
(3) It should run in O(n) time - the worst running time is a function of the O(n) lookup to select()
that is initially used to find the median. Both of the core FOR loops (lines 66 & 71) take O(n) as well.
"""
import sys
import math
from random import randint
def main():
startFindClosest()
def startFindClosest():
"""Begins the closest search process by reading in the stdin file.
Args:
None. Reads from stdin for file.
Returns:
No value. Prints closest k values to median to stdout."""
f = sys.stdin
line1 = f.readline()
while line1 != '':
k = int(f.readline())
array = form_array_from_string_line(line1)
print findClosestKValues(array, 0, len(array)-1, k)
line1 = f.readline()
if not line1:
break
return
def findClosestKValues(array, l_index, r_index, k):
"""Finds the closest K values to the median.
Args:
array: List object containing unsorted list of values.
k: The number of numbers closest to the median we wish to find.
Returns:
nums: a list object containing the closest k numbers to median."""
nums = []
temp_array = []
pairing = {}
"""
Note: This is code I tried to use to get it work for varying lengths to accurately output
the median value. It turned out to be more complex than imagined so I left it out.
if (len(array) % 2) == 0:
median_A = randomizedSelect(array, l_index, r_index, (len(array)/2))
median_B = randomizedSelect(array, l_index, r_index, ((len(array)-1)/2))
median = (median_A + median_B) / 2.0
else:
median = randomizedSelect(array, l_index, r_index, (len(array)/2))"""
median = randomizedSelect(array, l_index, r_index, math.floor(len(array)/2))
array.remove(median)
array.append(median)
for i in range(0,r_index+1):
pairing[abs(array[i]-median)] = array[i]
temp_array.append(abs(array[i] - median))
kth_element = randomizedSelect(temp_array, l_index, len(temp_array)-1, k)
for j in range(0,len(array)):
if temp_array[j] <= kth_element:
nums.append(pairing[temp_array[j]])
return nums
def form_array_from_string_line(line):
"""Begins the inversion count process by reading in the stdin file.
Args:
line: A string of input line (usually from a text file) with integers
contained within, separated by spaces.
Returns:
array: List object (Python's standard 'array' type) featuring each of
the integers as a separate item in the list."""
array = [int(n) for n in line.split()]
return array
def randomizedSelect(array, l_index, r_index, i):
"""Uses the randomizedPartion method to find the specified i-th value.
Args:
array: List object containing unsorted list of values.
l_index: Left index of the subarray we want to search in.
r_index: Right index of the subarray we want to search in.
i: The i-th sorted value we want to find.
Returns:
array: List object (Python's standard 'array' type) featuring each of
the integers as a separate item in the list."""
if l_index == r_index:
return array[l_index]
q = randomizedPartition(array, l_index, r_index)
k = q - l_index + 1
if i == k:
return array[q]
elif i < k:
return randomizedSelect(array, l_index, q-1, i)
else:
return randomizedSelect(array, q+1, r_index, i-k)
def randomizedPartition(array, l_index, r_index):
"""Randomizes the partion method.
Args:
array: List object containing unsorted list of values.
l_index: Left index of the subarray we want to search in.
r_index: Right index of the subarray we want to search in.
Returns:
i+1: Integer value of the index of the partition."""
i = randint(l_index, r_index)
array = valueSwap(array, i, r_index)
return partition(array, l_index, r_index)
def partition(array, l_index, r_index):
"""Identifies the partion index.
Args:
array: List object containing unsorted list of values.
l_index: Left index of the subarray we want to search in.
r_index: Right index of the subarray we want to search in.
Returns:
i+1: Integer value of the index of the partition."""
pivot = array[r_index]
i = l_index - 1
j = l_index
for j in range(l_index, r_index):
if array[j] <= pivot:
i += 1
array = valueSwap(array, i, j)
array = valueSwap(array, i+1, r_index)
return i+1
def valueSwap(array, index_one, index_two):
"""Swaps two values in a given array.
Args:
array: List object containing unsorted list of values.
index_one: Index of first item we want to swap.
index_two: Index of second item we want to swap.
Returns:
array: List with the desired values swapped."""
if len(array) <= 1:
return array
else:
try:
temp = array[index_one]
array[index_one] = array[index_two]
array[index_two] = temp
except IndexError, e:
print e
print "Tried to swap index: " + str(index_one) + ' with index: ' + str(index_two)
return array
if __name__ == '__main__':
main()
| apache-2.0 | -1,535,948,675,559,064,000 | 32.233918 | 116 | 0.640588 | false |
alorenzo175/pvlib-python | pvlib/test/test_forecast.py | 1 | 5733 | from datetime import datetime, timedelta
from pytz import timezone
import warnings
import pandas as pd
import pytest
from numpy.testing import assert_allclose
from conftest import requires_siphon, has_siphon, skip_windows
pytestmark = pytest.mark.skipif(not has_siphon, reason='requires siphon')
if has_siphon:
with warnings.catch_warnings():
# don't emit import warning
warnings.simplefilter("ignore")
from pvlib.forecast import GFS, HRRR_ESRL, HRRR, NAM, NDFD, RAP
# setup times and location to be tested. Tucson, AZ
_latitude = 32.2
_longitude = -110.9
_tz = 'US/Arizona'
_start = pd.Timestamp.now(tz=_tz)
_end = _start + pd.Timedelta(days=1)
_modelclasses = [
GFS, NAM, HRRR, NDFD, RAP,
pytest.param(
HRRR_ESRL, marks=[
skip_windows,
pytest.mark.xfail(reason="HRRR_ESRL is unreliable"),
pytest.mark.timeout(timeout=60),
pytest.mark.filterwarnings('ignore:.*experimental')])]
_working_models = []
_variables = ['temp_air', 'wind_speed', 'total_clouds', 'low_clouds',
'mid_clouds', 'high_clouds', 'dni', 'dhi', 'ghi']
_nonnan_variables = ['temp_air', 'wind_speed', 'total_clouds', 'dni',
'dhi', 'ghi']
else:
_modelclasses = []
# make a model object for each model class
# get the data for that model and store it in an
# attribute for further testing
@requires_siphon
@pytest.fixture(scope='module', params=_modelclasses)
def model(request):
amodel = request.param()
try:
raw_data = amodel.get_data(_latitude, _longitude, _start, _end)
except Exception as e:
warnings.warn('Exception getting data for {}.\n'
'latitude, longitude, start, end = {} {} {} {}\n{}'
.format(amodel, _latitude, _longitude, _start, _end, e))
raw_data = pd.DataFrame() # raw_data.empty will be used later
amodel.raw_data = raw_data
return amodel
@requires_siphon
def test_process_data(model):
for how in ['liujordan', 'clearsky_scaling']:
if model.raw_data.empty:
warnings.warn('Could not test {} process_data with how={} '
'because raw_data was empty'.format(model, how))
continue
data = model.process_data(model.raw_data, how=how)
for variable in _nonnan_variables:
try:
assert not data[variable].isnull().values.any()
except AssertionError:
warnings.warn('{}, {}, data contained null values'
.format(model, variable))
@requires_siphon
def test_bad_kwarg_get_data():
# For more information on why you would want to pass an unknown keyword
# argument, see Github issue #745.
amodel = NAM()
data = amodel.get_data(_latitude, _longitude, _start, _end,
bad_kwarg=False)
assert not data.empty
@requires_siphon
def test_bad_kwarg_get_processed_data():
# For more information on why you would want to pass an unknown keyword
# argument, see Github issue #745.
amodel = NAM()
data = amodel.get_processed_data(_latitude, _longitude, _start, _end,
bad_kwarg=False)
assert not data.empty
@requires_siphon
def test_how_kwarg_get_processed_data():
amodel = NAM()
data = amodel.get_processed_data(_latitude, _longitude, _start, _end,
how='clearsky_scaling')
assert not data.empty
@requires_siphon
def test_vert_level():
amodel = NAM()
vert_level = 5000
amodel.get_processed_data(_latitude, _longitude, _start, _end,
vert_level=vert_level)
@requires_siphon
def test_datetime():
amodel = NAM()
start = datetime.now()
end = start + timedelta(days=1)
amodel.get_processed_data(_latitude, _longitude, start, end)
@requires_siphon
def test_queryvariables():
amodel = GFS()
new_variables = ['u-component_of_wind_height_above_ground']
data = amodel.get_data(_latitude, _longitude, _start, _end,
query_variables=new_variables)
data['u-component_of_wind_height_above_ground']
@requires_siphon
def test_latest():
GFS(set_type='latest')
@requires_siphon
def test_full():
GFS(set_type='full')
@requires_siphon
def test_temp_convert():
amodel = GFS()
data = pd.DataFrame({'temp_air': [273.15]})
data['temp_air'] = amodel.kelvin_to_celsius(data['temp_air'])
assert_allclose(data['temp_air'].values, 0.0)
# @requires_siphon
# def test_bounding_box():
# amodel = GFS()
# latitude = [31.2,32.2]
# longitude = [-111.9,-110.9]
# new_variables = {'temperature':'Temperature_surface'}
# data = amodel.get_query_data(latitude, longitude, _start, _end,
# variables=new_variables)
@requires_siphon
def test_set_location():
amodel = GFS()
latitude, longitude = 32.2, -110.9
time = datetime.now(timezone('UTC'))
amodel.set_location(time, latitude, longitude)
def test_cloud_cover_to_transmittance_linear():
amodel = GFS()
assert_allclose(amodel.cloud_cover_to_transmittance_linear(0), 0.75)
assert_allclose(amodel.cloud_cover_to_transmittance_linear(100), 0.0)
assert_allclose(amodel.cloud_cover_to_transmittance_linear(0, 0.5), 0.5)
def test_cloud_cover_to_ghi_linear():
amodel = GFS()
ghi_clear = 1000
offset = 25
out = amodel.cloud_cover_to_ghi_linear(0, ghi_clear, offset=offset)
assert_allclose(out, 1000)
out = amodel.cloud_cover_to_ghi_linear(100, ghi_clear, offset=offset)
assert_allclose(out, 250)
| bsd-3-clause | 4,192,149,592,717,997,000 | 30.5 | 78 | 0.618873 | false |
USGSDenverPychron/pychron | pychron/spectrometer/base_magnet.py | 1 | 12604 | # ===============================================================================
# Copyright 2014 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
import os
import time
import yaml
from math import pi
from numpy import arange, sin
from traits.api import Property, Float, Event, Instance
from traitsui.api import View, Item, VGroup, HGroup, Spring, RangeEditor
from pychron.loggable import Loggable
from pychron.paths import paths
def get_float(func):
def dec(*args, **kw):
try:
return float(func(*args, **kw))
except (TypeError, ValueError):
return 0.0
return dec
import threading
import time
class BaseMagnet(Loggable):
dac = Property(Float, depends_on='_dac')
mass = Float
_dac = Float
dacmin = Float(0.0)
dacmax = Float(10.0)
massmin = Property(Float, depends_on='_massmin')
massmax = Property(Float, depends_on='_massmax')
_massmin = Float(0.0)
_massmax = Float(200.0)
settling_time = 0.5
detector = Instance('pychron.spectrometer.base_detector.BaseDetector')
dac_changed = Event
mftable = Instance('pychron.spectrometer.mftable.MagnetFieldTable', ())
confirmation_threshold_mass = Float
use_deflection_correction = True
use_af_demagnetization = False
_suppress_mass_update = False
def __init__(self, *args, **kw):
super(BaseMagnet, self).__init__(*args, **kw)
self._lock = threading.Lock()
self._cond = threading.Condition((threading.Lock()))
def reload_mftable(self):
self.mftable.load_mftable()
def set_dac(self, *args, **kw):
raise NotImplementedError
def set_mftable(self, name):
self.mftable.set_path_name(name)
def update_field_table(self, *args, **kw):
self.mftable.update_field_table(*args, **kw)
# ===============================================================================
# persistence
# ===============================================================================
def load(self):
pass
def finish_loading(self):
"""
initialize the mftable
read DAC from device
:return:
"""
if self.spectrometer:
molweights = self.spectrometer.molecular_weights
name = self.spectrometer.name
else:
from pychron.spectrometer.molecular_weights import MOLECULAR_WEIGHTS as molweights
name = ''
self.mftable.initialize(molweights)
self.mftable.spectrometer_name = name.lower()
d = self.read_dac()
if d is not None:
self._dac = d
# load af demag
self._load_af_demag_configuration()
# ===============================================================================
# mapping
# ===============================================================================
def map_dac_to_mass(self, dac, detname):
"""
convert a DAC value (voltage) to mass for a given detector
use the mftable
:param dac: float, voltage (0-10V)
:param detname: str, name of a detector, e.g H1
:return: float, mass
"""
return self.mftable.map_dac_to_mass(dac, detname)
def map_mass_to_dac(self, mass, detname):
"""
convert a mass value from amu to dac for a given detector
:param mass: float, amu
:param detname: std, name of a detector, e.g. H1
:return: float, dac voltage
"""
dac = self.mftable.map_mass_to_dac(mass, detname)
self.debug('{} map mass to dac {} >> {}'.format(detname, mass, dac))
if dac is None:
self.warning('Could not map mass to dac. Returning current DAC {}'.format(self._dac))
dac = self._dac
return dac
def map_dac_to_isotope(self, dac=None, det=None, current=True):
"""
convert a dac voltage to isotope name for a given detector
:param dac: float, voltage
:param det: str, detector name
:param current: bool, get current hv
:return: str, e.g Ar40
"""
if dac is None:
dac = self._dac
if det is None:
det = self.detector
if det:
dac = self.spectrometer.uncorrect_dac(det, dac, current=current)
m = self.map_dac_to_mass(dac, det.name)
if m is not None:
return self.spectrometer.map_isotope(m)
def mass_change(self, m):
"""
set the self.mass attribute
suppress mass change handler
:param m: float
:return:
"""
self._suppress_mass_update = True
self.trait_set(mass=m)
self._suppress_mass_update = False
# ===============================================================================
# private
# ===============================================================================
def _wait_release(self):
self._lock.release()
# self._cond.notify()
def _wait_lock(self, timeout):
"""
http://stackoverflow.com/questions/8392640/how-to-implement-a-lock-with-a-timeout-in-python-2-7
@param timeout:
@return:
"""
with self._cond:
current_time = start_time = time.time()
while current_time < start_time + timeout:
if self._lock.acquire(False):
return True
else:
self._cond.wait(timeout - current_time + start_time)
current_time = time.time()
return False
def _load_af_demag_configuration(self):
self.use_af_demagnetization = False
p = paths.af_demagnetization
if os.path.isfile(p):
with open(p, 'r') as rfile:
try:
yd = yaml.load(rfile)
except BaseException, e:
self.warning_dialog('AF Demagnetization unavailable. Syntax error in file. Error: {}'.format(e))
return
if not isinstance(yd, dict):
self.warning_dialog('AF Demagnetization unavailable. Syntax error in file')
return
self.use_af_demagnetization = yd.get('enabled', True)
self.af_demag_threshold = yd.get('threshold', 1)
def _do_af_demagnetization(self, target, setfunc):
p = paths.af_demagnetization
if os.path.isfile(p):
with open(p, 'r') as rfile:
try:
yd = yaml.load(rfile)
except BaseException, e:
self.warning('AF Demagnetization unavailable. Syntax error in file. Error: {}'.format(e))
return
period = yd.get('period', None)
if period is None:
frequency = yd.get('frequency')
if frequency is None:
self.warning('AF Demagnetization unavailable. '
'Need to specify "period" or "frequency" in "{}"'.format(p))
return
else:
period = 1 / float(frequency)
else:
frequency = 1 / float(period)
duration = yd.get('duration')
if duration is None:
duration = 5
self.debug('defaulting to duration={}'.format(duration))
start_amplitude = yd.get('start_amplitude')
if start_amplitude is None:
self.warning('AF Demagnetization unavailable. '
'Need to specify "start_amplitude" in "{}"'.format(p))
return
sx = arange(0.5 * period, duration, period)
slope = start_amplitude / float(duration)
dacs = slope * sx * sin(frequency * pi * sx)
self.info('Doing AF Demagnetization around target={}. '
'duration={}, start_amplitude={}, period={}'.format(target, duration, start_amplitude, period))
for dac in reversed(dacs):
self.debug('set af dac raw:{} dac:{}'.format(dac, target + dac))
setfunc(target + dac)
time.sleep(period)
else:
self.warning('AF Demagnetization unavailable. {} not a valid file'.format(p))
def _validate_mass_change(self, cm, m):
ct = self.confirmation_threshold_mass
move_ok = True
if abs(cm - m) > ct:
move_ok = False
self.info('Requested move greater than threshold. Current={}, Request={}, Threshold={}'.format(cm, m, ct))
if self.confirmation_dialog('Requested magnet move is greater than threshold.\n'
'Current Mass={}\n'
'Requested Mass={}\n'
'Threshold={}\n'
'Are you sure you want to continue?'.format(cm, m, ct)):
move_ok = True
return move_ok
def _mass_changed(self, old, new):
if self._suppress_mass_update:
return
if self._validate_mass_change(old, new):
self._set_mass(new)
else:
self.mass_change(old)
def _set_mass(self, m):
if self.detector:
self.debug('setting mass {}'.format(m))
dac = self.map_mass_to_dac(m, self.detector.name)
dac = self.spectrometer.correct_dac(self.detector, dac)
self.dac = dac
# ===============================================================================
# property get/set
# ===============================================================================
def _validate_dac(self, d):
return self._validate_float(d)
def _get_dac(self):
return self._dac
def _set_dac(self, v):
if v is not None:
self.set_dac(v)
def _validate_float(self, d):
try:
return float(d)
except (ValueError, TypeError):
return d
def _validate_massmin(self, d):
d = self._validate_float(d)
if isinstance(d, float):
if d > self.massmax:
d = str(d)
return d
def _get_massmin(self):
return self._massmin
def _set_massmin(self, v):
self._massmin = v
def _validate_massmax(self, d):
d = self._validate_float(d)
if isinstance(d, float):
if d < self.massmin:
d = str(d)
return d
def _get_massmax(self):
return self._massmax
def _set_massmax(self, v):
self._massmax = v
# ===============================================================================
# views
# ===============================================================================
def traits_view(self):
v = View(VGroup(VGroup(Item('dac', editor=RangeEditor(low_name='dacmin',
high_name='dacmax',
format='%0.5f')),
Item('mass', editor=RangeEditor(mode='slider', low_name='massmin',
high_name='massmax',
format='%0.3f')),
HGroup(Spring(springy=False,
width=48),
Item('massmin', width=-40), Spring(springy=False,
width=138),
Item('massmax', width=-55),
show_labels=False),
show_border=True,
label='Control')))
return v
# ============= EOF =============================================
| apache-2.0 | 4,398,844,861,938,323,000 | 33.157182 | 118 | 0.485401 | false |
jptomo/rpython-lang-scheme | rpython/translator/test/snippet.py | 1 | 19453 | """Snippets for translation
This module holds various snippets, to be used by translator
unittests.
We define argument types as default arguments to the snippet
functions.
"""
numtype = (int, float)
anytype = (int, float, str)
seqtype = (list, tuple)
def if_then_else(cond=anytype, x=anytype, y=anytype):
if cond:
return x
else:
return y
def my_gcd(a=numtype, b=numtype):
r = a % b
while r:
a = b
b = r
r = a % b
return b
def is_perfect_number(n=int):
div = 1
sum = 0
while div < n:
if n % div == 0:
sum += div
div += 1
return n == sum
def my_bool(x=int):
return not not x
def my_contains(seq=seqtype, elem=anytype):
return elem in seq
def is_one_or_two(n=int):
return n in [1, 2]
def two_plus_two():
"""Array test"""
array = [0] * 3
array[0] = 2
array[1] = 2
array[2] = array[0] + array[1]
return array[2]
def get_set_del_slice(l=list):
del l[:1]
del l[-1:]
del l[2:4]
l[:1] = [3]
l[-1:] = [9]
l[2:4] = [8,11]
return l[:2], l[5:], l[3:5]
def sieve_of_eratosthenes():
"""Sieve of Eratosthenes
This one is from an infamous benchmark, "The Great Computer
Language Shootout".
URL is: http://www.bagley.org/~doug/shootout/
"""
flags = [True] * (8192+1)
count = 0
i = 2
while i <= 8192:
if flags[i]:
k = i + i
while k <= 8192:
flags[k] = False
k = k + i
count = count + 1
i = i + 1
return count
def simple_func(i=numtype):
return i + 1
def while_func(i=numtype):
total = 0
while i > 0:
total = total + i
i = i - 1
return total
def nested_whiles(i=int, j=int):
s = ''
z = 5
while z > 0:
z = z - 1
u = i
while u < j:
u = u + 1
s = s + '.'
s = s + '!'
return s
def poor_man_range(i=int):
lst = []
while i > 0:
i = i - 1
lst.append(i)
lst.reverse()
return lst
def poor_man_rev_range(i=int):
lst = []
while i > 0:
i = i - 1
lst += [i]
return lst
def simple_id(x=anytype):
return x
def branch_id(cond=anytype, a=anytype, b=anytype):
while 1:
if cond:
return a
else:
return b
def builtinusage():
return pow(2, 2)
def yast(lst=seqtype):
total = 0
for z in lst:
total = total + z
return total
def time_waster(n=int):
"""Arbitrary test function"""
i = 0
x = 1
while i < n:
j = 0
while j <= i:
j = j + 1
x = x + (i & j)
i = i + 1
return x
def half_of_n(n=int):
"""Slice test"""
i = 0
lst = range(n)
while lst:
lst = lst[1:-1]
i = i + 1
return i
def int_id(x=int):
i = 0
while i < x:
i = i + 1
return i
def greet(target=str):
"""String test"""
hello = "hello"
return hello + target
def choose_last():
"""For loop test"""
set = ["foo", "bar", "spam", "egg", "python"]
choice = ""
for choice in set:
pass
return choice
def poly_branch(x=int):
if x:
y = [1,2,3]
else:
y = ['a','b','c']
z = y
return z*2
def s_and(x=anytype, y=anytype):
if x and y:
return 'yes'
else:
return 'no'
def break_continue(x=numtype):
result = []
i = 0
while 1:
i = i + 1
try:
if i&1:
continue
if i >= x:
break
finally:
result.append(i)
i = i + 1
return result
def reverse_3(lst=seqtype):
try:
a, b, c = lst
except:
return 0, 0, 0
return c, b, a
def finallys(lst=seqtype):
x = 1
try:
x = 2
try:
x = 3
a, = lst
x = 4
except KeyError:
return 5
except ValueError:
return 6
b, = lst
x = 7
finally:
x = 8
return x
def finally2(o, k):
try:
o[k] += 1
finally:
o[-1] = 'done'
def bare_raise(o, ignore):
try:
return o[5]
except:
if not ignore:
raise
def factorial(n=int):
if n <= 1:
return 1
else:
return n * factorial(n-1)
def factorial2(n=int): # analysed in a different order
if n > 1:
return n * factorial2(n-1)
else:
return 1
def _append_five(lst):
lst += [5]
def call_five():
a = []
_append_five(a)
return a
def _append_six(lst):
lst += [6]
def call_five_six():
a = []
_append_five(a)
_append_six(a)
return a
def call_unpack_56():
a = call_five_six()
return len(a), a[0], a[1]
def forty_two():
return 42
def never_called():
return "booo"
def constant_result():
if forty_two():
return "yadda"
else:
return never_called()
class CallablePrebuiltConstant(object):
def __call__(self):
return 42
callable_prebuilt_constant = CallablePrebuiltConstant()
def call_cpbc():
return callable_prebuilt_constant()
class E1(Exception):
pass
class E2(Exception):
pass
def raise_choose(n):
if n == 1:
raise E1
elif n == 2:
raise E2
elif n == -1:
raise Exception
return 0
def try_raise_choose(n=int):
try:
raise_choose(n)
except E1:
return 1
except E2:
return 2
except Exception:
return -1
return 0
def do_try_raise_choose():
r = []
for n in [-1,0,1,2]:
r.append(try_raise_choose(n))
return r
# INHERITANCE / CLASS TESTS
class C(object): pass
def build_instance():
c = C()
return c
def set_attr():
c = C()
c.a = 1
c.a = 2
return c.a
def merge_setattr(x):
if x:
c = C()
c.a = 1
else:
c = C()
return c.a
class D(C): pass
class E(C): pass
def inheritance1():
d = D()
d.stuff = ()
e = E()
e.stuff = -12
e.stuff = 3
lst = [d, e]
return d.stuff, e.stuff
def inheritance2():
d = D()
d.stuff = (-12, -12)
e = E()
e.stuff = (3, 12.3)
return _getstuff(d), _getstuff(e)
class F:
pass
class G(F):
def m(self, x):
return self.m2(x)
def m2(self, x):
return D(), x
class H(F):
def m(self, y):
self.attr = 1
return E(), y
def knownkeysdict(b=anytype):
if b:
d = {'a': 0}
d['b'] = b
d['c'] = 'world'
else:
d = {'b': -123}
return d['b']
def generaldict(key=str, value=int, key2=str, value2=int):
d = {key: value}
d[key2] = value2
return d[key or key2]
def prime(n=int):
return len([i for i in range(1,n+1) if n%i==0]) == 2
class A0:
pass
class A1(A0):
clsattr = 123
class A2(A1):
clsattr = 456
class A3(A2):
clsattr = 789
class A4(A3):
pass
class A5(A0):
clsattr = 101112
def classattribute(flag=int):
if flag == 1:
x = A1()
elif flag == 2:
x = A2()
elif flag == 3:
x = A3()
elif flag == 4:
x = A4()
else:
x = A5()
return x.clsattr
class Z:
def my_method(self):
return self.my_attribute
class WithInit:
def __init__(self, n):
self.a = n
class WithMoreInit(WithInit):
def __init__(self, n, m):
WithInit.__init__(self, n)
self.b = m
def simple_method(v=anytype):
z = Z()
z.my_attribute = v
return z.my_method()
def with_init(v=int):
z = WithInit(v)
return z.a
def with_more_init(v=int, w=bool):
z = WithMoreInit(v, w)
if z.b:
return z.a
else:
return -z.a
global_z = Z()
global_z.my_attribute = 42
def global_instance():
return global_z.my_method()
def call_Z_my_method(z):
return z.my_method
def somepbc_simplify():
z = Z()
call_Z_my_method(global_z)
call_Z_my_method(z)
class ClassWithMethods:
def cm(cls, x):
return x
cm = classmethod(cm)
def sm(x):
return x
sm = staticmethod(sm)
global_c = C()
global_c.a = 1
def global_newstyle_instance():
return global_c
global_rl = []
global_rl.append(global_rl)
def global_recursive_list():
return global_rl
class MI_A(object):
a = 1
class MI_B(MI_A):
b = 2
class MI_C(MI_A):
c = 3
class MI_D(MI_B, MI_C):
d = 4
def multiple_inheritance():
i = MI_D()
return i.a + i.b + i.c + i.d
class CBase(object):
pass
class CSub1(CBase):
def m(self):
self.x = 42
return self.x
class CSub2(CBase):
def m(self):
self.x = 'world'
return self.x
def methodcall_is_precise(cond):
if cond:
x = CSub1()
x.m()
else:
x = CSub2()
x.m()
return CSub1().m()
def flow_type_info(i):
if isinstance(i, int):
a = i + 1
else:
a = len(str(i))
return a
def flow_usertype_info(ob):
if isinstance(ob, WithInit):
return ob
else:
return WithMoreInit(1, 2)
def star_args0(*args):
return args[0] / 2
def call_star_args0(z):
return star_args0(z)
def star_args1(a, *args):
return a + args[0] / 2
def call_star_args1(z):
return star_args1(z, 20)
def star_args1def(a=4, *args):
if args:
return a + args[0] / 2
else:
return a*3
def call_star_args1def(z):
a = star_args1def(z, 22)
b = star_args1def(5)
c = star_args1def()
return a+b+c
def star_args(x, y, *args):
return x + args[0]
def call_star_args(z):
return star_args(z, 5, 10, 15, 20)
def call_star_args_multiple(z):
a = star_args(z, 5, 10)
b = star_args(z, 5, 10, 15)
c = star_args(z, 5, 10, 15, 20)
return a+b+c
def default_args(x, y=2, z=3L):
return x+y+z
def call_default_args(u):
return default_args(111, u)
def default_and_star_args(x, y=2, z=3, *more):
return x+y+z+len(more)
def call_default_and_star_args(u):
return (default_and_star_args(111, u),
default_and_star_args(-1000, -2000, -3000, -4000, -5000))
def call_with_star(z):
return default_args(-20, *z)
def call_with_keyword(z):
return default_args(-20, z=z)
def call_very_complex(z, args, kwds):
return default_args(-20, z=z, *args, **kwds)
def powerset(setsize=int):
"""Powerset
This one is from a Philippine Pythonista Hangout, an modified
version of Andy Sy's code.
list.append is modified to list concatenation, and powerset
is pre-allocated and stored, instead of printed.
URL is: http://lists.free.net.ph/pipermail/python/2002-November/
"""
set = range(setsize)
maxcardinality = pow(2, setsize)
bitmask = 0L
powerset = [None] * maxcardinality
ptr = 0
while bitmask < maxcardinality:
bitpos = 1L
index = 0
subset = []
while bitpos < maxcardinality:
if bitpos & bitmask:
subset = subset + [set[index]]
index += 1
bitpos <<= 1
powerset[ptr] = subset
ptr += 1
bitmask += 1
return powerset
def harmonic(n):
result = 0.0
for i in range(n, 0, -1):
result += 1.0 / n
return result
# --------------------(Currently) Non runnable Functions ---------------------
def _somebug1(n=int):
l = []
v = l.append
while n:
l[7] = 5 # raises an exception
break
return v
def _getstuff(x):
return x.stuff
# --------------------(Currently) Non compilable Functions ---------------------
class BadInit(object):
def update(self, k):
self.k = 1
def __init__(self, v):
return
self.update(**{'k':v})
def read(self):
return self.k
global_bi = BadInit(1)
def global_badinit():
return global_bi.read()
def _attrs():
def b(): pass
b.f = 4
b.g = 5
return b.f + b.g
def _methodcall1(cond):
if cond:
x = G()
else:
x = H()
return x.m(42)
def func1():
pass
def func2():
pass
def mergefunctions(cond):
if cond:
x = func1
else:
x = func2
return x
def func_producing_exception():
raise ValueError, "this might e.g. block the caller"
def funccallsex():
return func_producing_exception()
def func_arg_unpack():
a,b = 3, "hello"
return a
class APBC:
def __init__(self):
self.answer = 42
apbc = APBC()
apbc.answer = 7
def preserve_pbc_attr_on_instance(cond):
if cond:
x = APBC()
else:
x = apbc
return x.answer
class APBCS(object):
__slots__ = ['answer']
def __init__(self):
self.answer = 42
apbcs = APBCS()
apbcs.answer = 7
def preserve_pbc_attr_on_instance_with_slots(cond):
if cond:
x = APBCS()
else:
x = apbcs
return x.answer
def is_and_knowntype(x):
if x is None:
return x
else:
return None
def isinstance_and_knowntype(x):
if isinstance(x, APBC):
return x
else:
return apbc
def simple_slice(x):
return x[:10]
def simple_iter(x):
return iter(x)
def simple_zip(x,y):
return zip(x,y)
def dict_copy(d):
return d.copy()
def dict_update(x):
d = {x:x}
d.update({1:2})
return d
def dict_keys():
d = {"a" : 1}
return d.keys()
def dict_keys2():
d = {"a" : 1}
keys = d.keys()
d["123"] = 12
return keys
def dict_values():
d = {"a" : "a"}
return d.values()
def dict_values2():
d = {54312 : "a"}
values = d.values()
d[1] = "12"
return values
def dict_items():
d = {'a' : 1}
return d.items()
class Exc(Exception):
pass
def exception_deduction0(x):
pass
def exception_deduction():
try:
exception_deduction0(2)
except Exc, e:
return e
return Exc()
def always_raising(x):
raise ValueError
def witness(x):
pass
def exception_deduction_with_raise1(x):
try:
exception_deduction0(2)
if x:
raise Exc()
except Exc, e:
witness(e)
return e
return Exc()
def exception_deduction_with_raise2(x):
try:
exception_deduction0(2)
if x:
raise Exc
except Exc, e:
witness(e)
return e
return Exc()
def exception_deduction_with_raise3(x):
try:
exception_deduction0(2)
if x:
raise Exc, Exc()
except Exc, e:
witness(e)
return e
return Exc()
def slice_union(x):
if x:
return slice(1)
else:
return slice(0, 10, 2)
def exception_deduction_we_are_dumb():
a = 1
try:
exception_deduction0(2)
except Exc, e:
a += 1
return e
return Exc()
class Exc2(Exception):
pass
def nested_exception_deduction():
try:
exception_deduction0(1)
except Exc, e:
try:
exception_deduction0(2)
except Exc2, f:
return (e, f)
return (e, Exc2())
return (Exc(), Exc2())
class Exc3(Exception):
def m(self):
return 1
class Exc4(Exc3):
def m(self):
return 1
class Sp:
def o(self):
raise Exc3
class Mod:
def __init__(self, s):
self.s = s
def p(self):
s = self.s
try:
s.o()
except Exc3, e:
return e.m()
return 0
class Mod3:
def __init__(self, s):
self.s = s
def p(self):
s = self.s
try:
s.o()
except Exc4, e1:
return e1.m()
except Exc3, e2:
try:
return e2.m()
except Exc4, e3:
return e3.m()
return 0
mod = Mod(Sp())
mod3 = Mod3(Sp())
def exc_deduction_our_exc_plus_others():
return mod.p()
def exc_deduction_our_excs_plus_others():
return mod3.p()
def call_two_funcs_but_one_can_only_raise(n):
fn = [witness, always_raising][n]
return fn(n)
# constant instances with __init__ vs. __new__
class Thing1:
def __init__(self):
self.thingness = 1
thing1 = Thing1()
def one_thing1():
return thing1
class Thing2(long):
def __new__(t, v):
return long.__new__(t, v * 2)
thing2 = Thing2(2)
def one_thing2():
return thing2
# propagation of fresh instances through attributes
class Stk:
def __init__(self):
self.itms = []
def push(self, v):
self.itms.append(v)
class EC:
def __init__(self):
self.stk = Stk()
def enter(self, f):
self.stk.push(f)
def propagation_of_fresh_instances_through_attrs(x):
e = EC()
e.enter(x)
# same involving recursion
class R:
def __init__(self, n):
if n > 0:
self.r = R(n-1)
else:
self.r = None
self.n = n
if self.r:
self.m = self.r.n
else:
self.m = -1
def make_r(n):
return R(n)
class B:
pass
class Even(B):
def __init__(self, n):
if n > 0:
self.x = [Odd(n-1)]
self.y = self.x[0].x
else:
self.x = []
self.y = []
class Odd(B):
def __init__(self, n):
self.x = [Even(n-1)]
self.y = self.x[0].x
def make_eo(n):
if n % 2 == 0:
return Even(n)
else:
return Odd(n)
# shows that we care about the expanded structure in front of changes to attributes involving only
# instances rev numbers
class Box:
pass
class Box2:
pass
class Box3(Box2):
pass
def flow_rev_numbers(n):
bx3 = Box3()
bx3.x = 1
bx = Box()
bx.bx3 = bx3
if n > 0:
z = bx.bx3.x
if n > 0:
bx2 = Box2()
bx2.x = 3
return z
raise Exception
# class specialization
class PolyStk:
_annspecialcase_ = "specialize:ctr_location"
def __init__(self):
self.itms = []
def push(self, v):
self.itms.append(v)
def top(self):
return self.itms[-1]
def class_spec():
istk = PolyStk()
istk.push(1)
sstk = PolyStk()
sstk.push("a")
istk.push(2)
sstk.push("b")
#if not isinstance(istk, PolyStk):
# return "confused"
return istk.top(), sstk.top()
from rpython.rlib.rarithmetic import ovfcheck
def add_func(i=numtype):
try:
return ovfcheck(i + 1)
except OverflowError:
raise
from sys import maxint
def div_func(i=numtype):
try:
return ovfcheck((-maxint-1) // i)
except (OverflowError, ZeroDivisionError):
raise
def mul_func(x=numtype, y=numtype):
try:
return ovfcheck(x * y)
except OverflowError:
raise
def mod_func(i=numtype):
try:
return ovfcheck((-maxint-1) % i)
except OverflowError:
raise
except ZeroDivisionError:
raise
def rshift_func(i=numtype):
try:
return (-maxint-1) >> i
except ValueError:
raise
class hugelmugel(OverflowError):
pass
def hugo(a, b, c):pass
def lshift_func(i=numtype):
try:
hugo(2, 3, 5)
return ovfcheck((-maxint-1) << i)
except (hugelmugel, OverflowError, StandardError, ValueError):
raise
def unary_func(i=numtype):
try:
return ovfcheck(-i), ovfcheck(abs(i-1))
except:
raise
# XXX it would be nice to get it right without an exception
# handler at all, but then we need to do much harder parsing
| mit | 713,307,751,522,421,600 | 16.384272 | 98 | 0.522027 | false |
cu-csc/automaton | tests/deployment_tests.py | 1 | 2068 | """
Module that tests various deployment functionality
To run me from command line:
cd automaton/tests
export PYTHONPATH=$PYTHONPATH:../
python -m unittest -v deployment_tests
unset PYTHONPATH
I should have used nose but
"""
import unittest
from lib import util
from deployment import common
class test_deployment_functions(unittest.TestCase):
def setUp(self):
self.testing_machine = "vm-148-120.uc.futuregrid.org"
self.bad_machine_name = "Idonotexistwallah.wrong"
self.key_filename = "/Users/ali/.ssh/ali_alzabarah_fg.priv"
def test_port_status_check(self):
# ssh port
self.assertFalse(util.check_port_status("google.com"))
# ssh port
self.assertTrue(util.check_port_status("research.cs.colorado.edu"))
# http port
self.assertTrue(util.check_port_status("google.com", 80, 2))
# wrong domain
self.assertFalse(util.check_port_status("Idonotexistwallah.wrong"))
# wrong ip
self.assertFalse(util.check_port_status("256.256.256.256"))
def test_run_remote_command(self):
result = util.RemoteCommand(self.testing_machine,
self.key_filename, "grep "
"ewrqwerasdfqewr /etc/passwd").execute()
self.assertNotEqual(result, 0)
result = util.RemoteCommand(self.testing_machine, self.key_filename,
"ls -al /etc/passwd").execute()
self.assertEqual(result, 0)
def test_clone_git_repo(self):
self.assertIsNotNone(util.clone_git_repo("https://github.com/"
"alal3177/automaton.git"))
def test_is_executable(self):
self.assertFalse(util.is_executable_file("wrong/path"))
self.assertTrue(util.is_executable_file("/bin/echo"))
self.assertFalse(util.is_executable_file("/tmp"))
def test_get_executable_files(self):
self.assertIsNotNone(common.get_executable_files("/bin"))
if __name__ == '__main__':
unittest.main()
| mit | 3,975,989,049,178,258,000 | 32.901639 | 76 | 0.62911 | false |
agirardeaudale/nbawebstats | docs/generaterequestrst.py | 1 | 2103 | #!/usr/bin/env python
from jinja2 import Environment, FileSystemLoader
import json
import os
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_PATH = os.path.abspath(os.path.join(THIS_DIR, '../nbawebstats/requests.json'))
RST_PATH = os.path.abspath(os.path.join(THIS_DIR, 'requests.rst'))
def format_internal_link(name, domain):
return ":ref:`{0} <{1}-{2}>`".format(name, domain, name.lower())
def with_default(request_params, global_param_map):
return [x for x in request_params if 'default' in global_param_map[x]]
def without_default(request_params, global_param_map):
return [x for x in request_params if 'default' not in global_param_map[x]]
def format_string_literals(strings):
return ["``'{0}'``".format(x) for x in strings]
def format_param_links(param_names):
return [format_internal_link(x, 'param') for x in param_names]
def format_param_type_link(param_type):
param_type_name = {'int': 'Integer',
'boolean-yn': 'Boolean',
'boolean-01': 'Boolean',
'enum': 'Enumerated',
'enum-mapped': 'Enumerated',
'date': 'Date',
'season': 'Season',
'season-id': 'Season'}[param_type]
return format_internal_link(param_type_name, 'type')
def update_request_rst():
with open(DATA_PATH, 'r') as f:
data = json.load(f)
jinja_env = Environment(loader=FileSystemLoader(THIS_DIR),
trim_blocks=True,
lstrip_blocks=True)
jinja_env.filters['with_default'] = with_default
jinja_env.filters['without_default'] = without_default
jinja_env.filters['format_string_literals'] = format_string_literals
jinja_env.filters['format_param_links'] = format_param_links
jinja_env.filters['format_param_type_link'] = format_param_type_link
rst_contents = jinja_env.get_template('requests.template').render(data)
with open(RST_PATH, 'w') as f:
f.write(rst_contents)
if __name__ == '__main__':
update_request_rst()
| mit | -3,689,226,585,242,646,500 | 35.258621 | 83 | 0.61864 | false |
OpenAcademy-OpenStack/nova-scheduler | nova/api/openstack/compute/plugins/v3/admin_password.py | 1 | 2734 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from webob import exc
from nova.api.openstack import common
from nova.api.openstack.compute.schemas.v3 import admin_password
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api import validation
from nova import compute
from nova import exception
from nova.openstack.common.gettextutils import _
ALIAS = "os-admin-password"
authorize = extensions.extension_authorizer('compute', 'v3:%s' % ALIAS)
class AdminPasswordController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(AdminPasswordController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
@wsgi.action('change_password')
@wsgi.response(204)
@extensions.expected_errors((400, 404, 409, 501))
@validation.schema(admin_password.change_password)
def change_password(self, req, id, body):
context = req.environ['nova.context']
authorize(context)
password = body['change_password']['admin_password']
try:
instance = self.compute_api.get(context, id, want_objects=True)
except exception.InstanceNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
try:
self.compute_api.set_admin_password(context, instance, password)
except exception.InstancePasswordSetFailed as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as e:
raise common.raise_http_conflict_for_instance_invalid_state(
e, 'change_password')
except NotImplementedError:
msg = _("Unable to set password on instance")
raise exc.HTTPNotImplemented(explanation=msg)
class AdminPassword(extensions.V3APIExtensionBase):
"""Admin password management support."""
name = "AdminPassword"
alias = ALIAS
version = 1
def get_resources(self):
return []
def get_controller_extensions(self):
controller = AdminPasswordController()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
| apache-2.0 | 1,025,928,680,341,808,000 | 35.453333 | 79 | 0.696781 | false |
syrusakbary/Djinja | setup.py | 1 | 1097 | from setuptools import setup, find_packages
setup(
name='Djinja',
version=".".join(map(str, __import__("djinja").__version__)),
description='A package that makes possible the integration of Jinja2 in Django, in a clean way.',
long_description=open('README.rst').read(),
author='Syrus Akbary Nieto',
author_email='[email protected]',
url='http://github.com/syrusakbary/djinja',
license='BSD',
packages=find_packages(exclude=['ez_setup']),
install_requires=[
'Django',
'Jinja2',
],
include_package_data=True,
zip_safe=False, # because we're including media that Django needs
classifiers=[
"Framework :: Django",
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"Operating System :: OS Independent",
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| bsd-3-clause | 165,900,132,703,426,240 | 35.566667 | 101 | 0.631723 | false |
wpjesus/codematch | ietf/group/mails.py | 1 | 2488 | # generation of mails
import re
from django.utils.html import strip_tags
from django.utils.text import wrap
from django.conf import settings
from django.core.urlresolvers import reverse as urlreverse
from ietf.utils.mail import send_mail, send_mail_text
from ietf.mailtrigger.utils import gather_address_lists
def email_admin_re_charter(request, group, subject, text, mailtrigger):
(to,cc) = gather_address_lists(mailtrigger,group=group)
full_subject = u"Regarding %s %s: %s" % (group.type.name, group.acronym, subject)
text = strip_tags(text)
send_mail(request, to, None, full_subject,
"group/email_iesg_secretary_re_charter.txt",
dict(text=text,
group=group,
group_url=settings.IDTRACKER_BASE_URL + group.about_url(),
charter_url=settings.IDTRACKER_BASE_URL + urlreverse('doc_view', kwargs=dict(name=group.charter.name)) if group.charter else "[no charter]",
),
cc=cc,
)
def email_personnel_change(request, group, text, changed_personnel):
(to, cc) = gather_address_lists('group_personnel_change',group=group,changed_personnel=changed_personnel)
full_subject = u"Personnel change for %s %s" % (group.acronym,group.type.name)
send_mail_text(request, to, None, full_subject, text, cc=cc)
def email_milestones_changed(request, group, changes, states):
def wrap_up_email(addrs, text):
subject = u"Milestones changed for %s %s" % (group.acronym, group.type.name)
if re.search("Added .* for review, due",text):
subject = u"Review Required - " + subject
text = wrap(strip_tags(text), 70)
text += "\n\n"
text += u"URL: %s" % (settings.IDTRACKER_BASE_URL + group.about_url())
send_mail_text(request, addrs.to, None, subject, text, cc=addrs.cc)
# first send to those who should see any edits (such as management and chairs)
addrs = gather_address_lists('group_milestones_edited',group=group)
if addrs.to or addrs.cc:
wrap_up_email(addrs, u"\n\n".join(c + "." for c in changes))
# then send only the approved milestones to those who shouldn't be
# bothered with milestones pending approval
addrs = gather_address_lists('group_approved_milestones_edited',group=group)
msg = u"\n\n".join(c + "." for c,s in zip(changes,states) if not s == "review")
if (addrs.to or addrs.cc) and msg:
wrap_up_email(addrs, msg)
| bsd-3-clause | -7,655,907,386,327,128,000 | 41.169492 | 159 | 0.659164 | false |
amaozhao/basecms | mptt/managers.py | 1 | 46897 | """
A custom manager for working with trees of objects.
"""
from __future__ import unicode_literals
import contextlib
from django.db import models, connections, router
from django.db.models import F, ManyToManyField, Max, Q
from django.utils.translation import ugettext as _
from mptt.exceptions import CantDisableUpdates, InvalidMove
__all__ = ('TreeManager',)
COUNT_SUBQUERY = """(
SELECT COUNT(*)
FROM %(rel_table)s
WHERE %(mptt_fk)s = %(mptt_table)s.%(mptt_pk)s
)"""
CUMULATIVE_COUNT_SUBQUERY = """(
SELECT COUNT(*)
FROM %(rel_table)s
WHERE %(mptt_fk)s IN
(
SELECT m2.%(mptt_pk)s
FROM %(mptt_table)s m2
WHERE m2.%(tree_id)s = %(mptt_table)s.%(tree_id)s
AND m2.%(left)s BETWEEN %(mptt_table)s.%(left)s
AND %(mptt_table)s.%(right)s
)
)"""
COUNT_SUBQUERY_M2M = """(
SELECT COUNT(*)
FROM %(rel_table)s j
INNER JOIN %(rel_m2m_table)s k ON j.%(rel_pk)s = k.%(rel_m2m_column)s
WHERE k.%(mptt_fk)s = %(mptt_table)s.%(mptt_pk)s
)"""
CUMULATIVE_COUNT_SUBQUERY_M2M = """(
SELECT COUNT(*)
FROM %(rel_table)s j
INNER JOIN %(rel_m2m_table)s k ON j.%(rel_pk)s = k.%(rel_m2m_column)s
WHERE k.%(mptt_fk)s IN
(
SELECT m2.%(mptt_pk)s
FROM %(mptt_table)s m2
WHERE m2.%(tree_id)s = %(mptt_table)s.%(tree_id)s
AND m2.%(left)s BETWEEN %(mptt_table)s.%(left)s
AND %(mptt_table)s.%(right)s
)
)"""
class TreeManager(models.Manager):
"""
A manager for working with trees of objects.
"""
def init_from_model(self, model):
"""
Sets things up. This would normally be done in contribute_to_class(),
but Django calls that before we've created our extra tree fields on the
model (which we need). So it's done here instead, after field setup.
"""
# Avoid calling "get_field_by_name()", which populates the related
# models cache and can cause circular imports in complex projects.
# Instead, find the tree_id field using "get_fields_with_model()".
[tree_field] = [
fld
for fld in model._meta.get_fields_with_model()
if fld[0].name == self.tree_id_attr]
if tree_field[1]:
# tree_model is the model that contains the tree fields.
# This is usually just the same as model, but not for derived
# models.
self.tree_model = tree_field[1]
else:
self.tree_model = model
self._base_manager = None
if self.tree_model is not model:
# _base_manager is the treemanager on tree_model
self._base_manager = self.tree_model._tree_manager
def get_query_set(self, *args, **kwargs):
"""
Ensures that this manager always returns nodes in tree order.
This method can be removed when support for Django < 1.6 is dropped.
"""
return super(TreeManager, self).get_query_set(*args, **kwargs).order_by(
self.tree_id_attr, self.left_attr)
def get_queryset(self, *args, **kwargs):
"""
Ensures that this manager always returns nodes in tree order.
"""
return super(TreeManager, self).get_queryset(*args, **kwargs).order_by(
self.tree_id_attr, self.left_attr)
def _get_queryset_relatives(self, queryset, direction, include_self):
"""
Returns a queryset containing either the descendants
``direction == desc`` or the ancestors ``direction == asc`` of a given
queryset.
This function is not meant to be called directly, although there is no
harm in doing so.
Instead, it should be used via ``get_queryset_descendants()`` and/or
``get_queryset_ancestors()``.
This function exists mainly to consolidate the nearly duplicate code
that exists between the two aforementioned functions.
"""
assert self.model is queryset.model
opts = queryset.model._mptt_meta
if not queryset:
return self.none()
filters = None
for node in queryset:
lft, rght = node.lft, node.rght
if direction == 'asc':
if include_self:
lft += 1
rght -= 1
lft_op = 'lt'
rght_op = 'gt'
elif direction == 'desc':
if include_self:
lft -= 1
rght += 1
lft_op = 'gt'
rght_op = 'lt'
q = Q(**{
opts.tree_id_attr: getattr(node, opts.tree_id_attr),
'%s__%s' % (opts.left_attr, lft_op): lft,
'%s__%s' % (opts.right_attr, rght_op): rght,
})
if filters is None:
filters = q
else:
filters |= q
return self.filter(filters)
def get_queryset_descendants(self, queryset, include_self=False):
"""
Returns a queryset containing the descendants of all nodes in the
given queryset.
If ``include_self=True``, nodes in ``queryset`` will also
be included in the result.
"""
return self._get_queryset_relatives(queryset, 'desc', include_self)
def get_queryset_ancestors(self, queryset, include_self=False):
"""
Returns a queryset containing the ancestors
of all nodes in the given queryset.
If ``include_self=True``, nodes in ``queryset`` will also
be included in the result.
"""
return self._get_queryset_relatives(queryset, 'asc', include_self)
@contextlib.contextmanager
def disable_mptt_updates(self):
"""
Context manager. Disables mptt updates.
NOTE that this context manager causes inconsistencies! MPTT model
methods are not guaranteed to return the correct results.
When to use this method:
If used correctly, this method can be used to speed up bulk
updates.
This doesn't do anything clever. It *will* mess up your tree. You
should follow this method with a call to ``TreeManager.rebuild()``
to ensure your tree stays sane, and you should wrap both calls in a
transaction.
This is best for updates that span a large part of the table. If
you are doing localised changes (one tree, or a few trees) consider
using ``delay_mptt_updates``.
If you are making only minor changes to your tree, just let the
updates happen.
Transactions:
This doesn't enforce any transactional behavior. You should wrap
this in a transaction to ensure database consistency.
If updates are already disabled on the model, this is a noop.
Usage::
with transaction.atomic():
with MyNode.objects.disable_mptt_updates():
## bulk updates.
MyNode.objects.rebuild()
"""
# Error cases:
if self.model._meta.abstract:
# an abstract model. Design decision needed - do we disable
# updates for all concrete models that derive from this model? I
# vote no - that's a bit implicit and it's a weird use-case
# anyway. Open to further discussion :)
raise CantDisableUpdates(
"You can't disable/delay mptt updates on %s,"
" it's an abstract model" % self.model.__name__
)
elif self.model._meta.proxy:
# a proxy model. disabling updates would implicitly affect other
# models using the db table. Caller should call this on the
# manager for the concrete model instead, to make the behavior
# explicit.
raise CantDisableUpdates(
"You can't disable/delay mptt updates on %s, it's a proxy"
" model. Call the concrete model instead."
% self.model.__name__
)
elif self.tree_model is not self.model:
# a multiple-inheritance child of an MPTTModel. Disabling
# updates may affect instances of other models in the tree.
raise CantDisableUpdates(
"You can't disable/delay mptt updates on %s, it doesn't"
" contain the mptt fields."
% self.model.__name__
)
if not self.model._mptt_updates_enabled:
# already disabled, noop.
yield
else:
self.model._set_mptt_updates_enabled(False)
try:
yield
finally:
self.model._set_mptt_updates_enabled(True)
@contextlib.contextmanager
def delay_mptt_updates(self):
"""
Context manager. Delays mptt updates until the end of a block of bulk
processing.
NOTE that this context manager causes inconsistencies! MPTT model
methods are not guaranteed to return the correct results until the end
of the context block.
When to use this method:
If used correctly, this method can be used to speed up bulk
updates. This is best for updates in a localised area of the db
table, especially if all the updates happen in a single tree and
the rest of the forest is left untouched. No subsequent rebuild is
necessary.
``delay_mptt_updates`` does a partial rebuild of the modified trees
(not the whole table). If used indiscriminately, this can actually
be much slower than just letting the updates occur when they're
required.
The worst case occurs when every tree in the table is modified just
once. That results in a full rebuild of the table, which can be
*very* slow.
If your updates will modify most of the trees in the table (not a
small number of trees), you should consider using
``TreeManager.disable_mptt_updates``, as it does much fewer
queries.
Transactions:
This doesn't enforce any transactional behavior. You should wrap
this in a transaction to ensure database consistency.
Exceptions:
If an exception occurs before the processing of the block, delayed
updates will not be applied.
Usage::
with transaction.atomic():
with MyNode.objects.delay_mptt_updates():
## bulk updates.
"""
with self.disable_mptt_updates():
if self.model._mptt_is_tracking:
# already tracking, noop.
yield
else:
self.model._mptt_start_tracking()
try:
yield
except Exception:
# stop tracking, but discard results
self.model._mptt_stop_tracking()
raise
results = self.model._mptt_stop_tracking()
partial_rebuild = self.partial_rebuild
for tree_id in results:
partial_rebuild(tree_id)
@property
def parent_attr(self):
return self.model._mptt_meta.parent_attr
@property
def left_attr(self):
return self.model._mptt_meta.left_attr
@property
def right_attr(self):
return self.model._mptt_meta.right_attr
@property
def tree_id_attr(self):
return self.model._mptt_meta.tree_id_attr
@property
def level_attr(self):
return self.model._mptt_meta.level_attr
def _translate_lookups(self, **lookups):
new_lookups = {}
join_parts = '__'.join
for k, v in lookups.items():
parts = k.split('__')
new_parts = []
new_parts__append = new_parts.append
for part in parts:
new_parts__append(getattr(self, part + '_attr', part))
new_lookups[join_parts(new_parts)] = v
return new_lookups
def _mptt_filter(self, qs=None, **filters):
"""
Like ``self.filter()``, but translates name-agnostic filters for MPTT
fields.
"""
if self._base_manager:
return self._base_manager._mptt_filter(qs=qs, **filters)
if qs is None:
qs = self
return qs.filter(**self._translate_lookups(**filters))
def _mptt_update(self, qs=None, **items):
"""
Like ``self.update()``, but translates name-agnostic MPTT fields.
"""
if self._base_manager:
return self._base_manager._mptt_update(qs=qs, **items)
if qs is None:
qs = self
return qs.update(**self._translate_lookups(**items))
def _get_connection(self, **hints):
return connections[router.db_for_write(self.model, **hints)]
def add_related_count(self, queryset, rel_model, rel_field, count_attr,
cumulative=False):
"""
Adds a related item count to a given ``QuerySet`` using its
``extra`` method, for a ``Model`` class which has a relation to
this ``Manager``'s ``Model`` class.
Arguments:
``rel_model``
A ``Model`` class which has a relation to this `Manager``'s
``Model`` class.
``rel_field``
The name of the field in ``rel_model`` which holds the
relation.
``count_attr``
The name of an attribute which should be added to each item in
this ``QuerySet``, containing a count of how many instances
of ``rel_model`` are related to it through ``rel_field``.
``cumulative``
If ``True``, the count will be for each item and all of its
descendants, otherwise it will be for each item itself.
"""
connection = self._get_connection()
qn = connection.ops.quote_name
meta = self.model._meta
mptt_field = rel_model._meta.get_field(rel_field)
if isinstance(mptt_field, ManyToManyField):
if cumulative:
subquery = CUMULATIVE_COUNT_SUBQUERY_M2M % {
'rel_table': qn(rel_model._meta.db_table),
'rel_pk': qn(rel_model._meta.pk.column),
'rel_m2m_table': qn(mptt_field.m2m_db_table()),
'rel_m2m_column': qn(mptt_field.m2m_column_name()),
'mptt_fk': qn(mptt_field.m2m_reverse_name()),
'mptt_table': qn(self.tree_model._meta.db_table),
'mptt_pk': qn(meta.pk.column),
'tree_id': qn(meta.get_field(self.tree_id_attr).column),
'left': qn(meta.get_field(self.left_attr).column),
'right': qn(meta.get_field(self.right_attr).column),
}
else:
subquery = COUNT_SUBQUERY_M2M % {
'rel_table': qn(rel_model._meta.db_table),
'rel_pk': qn(rel_model._meta.pk.column),
'rel_m2m_table': qn(mptt_field.m2m_db_table()),
'rel_m2m_column': qn(mptt_field.m2m_column_name()),
'mptt_fk': qn(mptt_field.m2m_reverse_name()),
'mptt_table': qn(self.tree_model._meta.db_table),
'mptt_pk': qn(meta.pk.column),
}
else:
if cumulative:
subquery = CUMULATIVE_COUNT_SUBQUERY % {
'rel_table': qn(rel_model._meta.db_table),
'mptt_fk': qn(rel_model._meta.get_field(rel_field).column),
'mptt_table': qn(self.tree_model._meta.db_table),
'mptt_pk': qn(meta.pk.column),
'tree_id': qn(meta.get_field(self.tree_id_attr).column),
'left': qn(meta.get_field(self.left_attr).column),
'right': qn(meta.get_field(self.right_attr).column),
}
else:
subquery = COUNT_SUBQUERY % {
'rel_table': qn(rel_model._meta.db_table),
'mptt_fk': qn(rel_model._meta.get_field(rel_field).column),
'mptt_table': qn(self.tree_model._meta.db_table),
'mptt_pk': qn(meta.pk.column),
}
return queryset.extra(select={count_attr: subquery})
def insert_node(self, node, target, position='last-child', save=False,
allow_existing_pk=False):
"""
Sets up the tree state for ``node`` (which has not yet been
inserted into in the database) so it will be positioned relative
to a given ``target`` node as specified by ``position`` (when
appropriate) it is inserted, with any neccessary space already
having been made for it.
A ``target`` of ``None`` indicates that ``node`` should be
the last root node.
If ``save`` is ``True``, ``node``'s ``save()`` method will be
called before it is returned.
NOTE: This is a low-level method; it does NOT respect
``MPTTMeta.order_insertion_by``. In most cases you should just
set the node's parent and let mptt call this during save.
"""
if self._base_manager:
return self._base_manager.insert_node(
node, target, position=position, save=save)
if node.pk and not allow_existing_pk and self.filter(pk=node.pk).exists():
raise ValueError(_('Cannot insert a node which has already been saved.'))
if target is None:
tree_id = self._get_next_tree_id()
setattr(node, self.left_attr, 1)
setattr(node, self.right_attr, 2)
setattr(node, self.level_attr, 0)
setattr(node, self.tree_id_attr, tree_id)
setattr(node, self.parent_attr, None)
elif target.is_root_node() and position in ['left', 'right']:
target_tree_id = getattr(target, self.tree_id_attr)
if position == 'left':
tree_id = target_tree_id
space_target = target_tree_id - 1
else:
tree_id = target_tree_id + 1
space_target = target_tree_id
self._create_tree_space(space_target)
setattr(node, self.left_attr, 1)
setattr(node, self.right_attr, 2)
setattr(node, self.level_attr, 0)
setattr(node, self.tree_id_attr, tree_id)
setattr(node, self.parent_attr, None)
else:
setattr(node, self.left_attr, 0)
setattr(node, self.level_attr, 0)
space_target, level, left, parent, right_shift = \
self._calculate_inter_tree_move_values(node, target, position)
tree_id = getattr(parent, self.tree_id_attr)
self._create_space(2, space_target, tree_id)
setattr(node, self.left_attr, -left)
setattr(node, self.right_attr, -left + 1)
setattr(node, self.level_attr, -level)
setattr(node, self.tree_id_attr, tree_id)
setattr(node, self.parent_attr, parent)
if parent:
self._post_insert_update_cached_parent_right(parent, right_shift)
if save:
node.save()
return node
def _move_node(self, node, target, position='last-child', save=True):
if self._base_manager:
return self._base_manager.move_node(node, target, position=position)
if self.tree_model._mptt_is_tracking:
# delegate to insert_node and clean up the gaps later.
return self.insert_node(node, target, position=position, save=save,
allow_existing_pk=True)
else:
if target is None:
if node.is_child_node():
self._make_child_root_node(node)
elif target.is_root_node() and position in ('left', 'right'):
self._make_sibling_of_root_node(node, target, position)
else:
if node.is_root_node():
self._move_root_node(node, target, position)
else:
self._move_child_node(node, target, position)
def move_node(self, node, target, position='last-child'):
"""
Moves ``node`` relative to a given ``target`` node as specified
by ``position`` (when appropriate), by examining both nodes and
calling the appropriate method to perform the move.
A ``target`` of ``None`` indicates that ``node`` should be
turned into a root node.
Valid values for ``position`` are ``'first-child'``,
``'last-child'``, ``'left'`` or ``'right'``.
``node`` will be modified to reflect its new tree state in the
database.
This method explicitly checks for ``node`` being made a sibling
of a root node, as this is a special case due to our use of tree
ids to order root nodes.
NOTE: This is a low-level method; it does NOT respect
``MPTTMeta.order_insertion_by``. In most cases you should just
move the node yourself by setting node.parent.
"""
self._move_node(node, target, position=position)
def root_node(self, tree_id):
"""
Returns the root node of the tree with the given id.
"""
if self._base_manager:
return self._base_manager.root_node(tree_id)
return self._mptt_filter(tree_id=tree_id, parent=None).get()
def root_nodes(self):
"""
Creates a ``QuerySet`` containing root nodes.
"""
if self._base_manager:
return self._base_manager.root_nodes()
return self._mptt_filter(parent=None)
def rebuild(self):
"""
Rebuilds all trees in the database table using `parent` link.
"""
if self._base_manager:
return self._base_manager.rebuild()
opts = self.model._mptt_meta
qs = self._mptt_filter(parent=None)
if opts.order_insertion_by:
qs = qs.order_by(*opts.order_insertion_by)
pks = qs.values_list('pk', flat=True)
rebuild_helper = self._rebuild_helper
idx = 0
for pk in pks:
idx += 1
rebuild_helper(pk, 1, idx)
rebuild.alters_data = True
def partial_rebuild(self, tree_id):
"""
Partially rebuilds a tree i.e. It rebuilds only the tree with given
``tree_id`` in database table using ``parent`` link.
"""
if self._base_manager:
return self._base_manager.partial_rebuild(tree_id)
opts = self.model._mptt_meta
qs = self._mptt_filter(parent=None, tree_id=tree_id)
if opts.order_insertion_by:
qs = qs.order_by(*opts.order_insertion_by)
pks = qs.values_list('pk', flat=True)
if not pks:
return
if len(pks) > 1:
raise RuntimeError(
"More than one root node with tree_id %d. That's invalid,"
" do a full rebuild." % tree_id)
self._rebuild_helper(pks[0], 1, tree_id)
def _rebuild_helper(self, pk, left, tree_id, level=0):
opts = self.model._mptt_meta
right = left + 1
qs = self._mptt_filter(parent__pk=pk)
if opts.order_insertion_by:
qs = qs.order_by(*opts.order_insertion_by)
child_ids = qs.values_list('pk', flat=True)
rebuild_helper = self._rebuild_helper
for child_id in child_ids:
right = rebuild_helper(child_id, right, tree_id, level + 1)
qs = self.model._default_manager.filter(pk=pk)
self._mptt_update(
qs,
left=left,
right=right,
level=level,
tree_id=tree_id
)
return right + 1
def _post_insert_update_cached_parent_right(self, instance, right_shift, seen=None):
setattr(instance, self.right_attr, getattr(instance, self.right_attr) + right_shift)
attr = '_%s_cache' % self.parent_attr
if hasattr(instance, attr):
parent = getattr(instance, attr)
if parent:
if not seen:
seen = set()
seen.add(instance)
if parent in seen:
# detect infinite recursion and throw an error
raise InvalidMove
self._post_insert_update_cached_parent_right(parent, right_shift, seen=seen)
def _calculate_inter_tree_move_values(self, node, target, position):
"""
Calculates values required when moving ``node`` relative to
``target`` as specified by ``position``.
"""
left = getattr(node, self.left_attr)
level = getattr(node, self.level_attr)
target_left = getattr(target, self.left_attr)
target_right = getattr(target, self.right_attr)
target_level = getattr(target, self.level_attr)
if position == 'last-child' or position == 'first-child':
if position == 'last-child':
space_target = target_right - 1
else:
space_target = target_left
level_change = level - target_level - 1
parent = target
elif position == 'left' or position == 'right':
if position == 'left':
space_target = target_left - 1
else:
space_target = target_right
level_change = level - target_level
parent = getattr(target, self.parent_attr)
else:
raise ValueError(_('An invalid position was given: %s.') % position)
left_right_change = left - space_target - 1
right_shift = 0
if parent:
right_shift = 2 * (node.get_descendant_count() + 1)
return space_target, level_change, left_right_change, parent, right_shift
def _close_gap(self, size, target, tree_id):
"""
Closes a gap of a certain ``size`` after the given ``target``
point in the tree identified by ``tree_id``.
"""
self._manage_space(-size, target, tree_id)
def _create_space(self, size, target, tree_id):
"""
Creates a space of a certain ``size`` after the given ``target``
point in the tree identified by ``tree_id``.
"""
self._manage_space(size, target, tree_id)
def _create_tree_space(self, target_tree_id, num_trees=1):
"""
Creates space for a new tree by incrementing all tree ids
greater than ``target_tree_id``.
"""
qs = self._mptt_filter(tree_id__gt=target_tree_id)
self._mptt_update(qs, tree_id=F(self.tree_id_attr) + num_trees)
self.tree_model._mptt_track_tree_insertions(target_tree_id + 1, num_trees)
def _get_next_tree_id(self):
"""
Determines the next largest unused tree id for the tree managed
by this manager.
"""
max_tree_id = list(self.aggregate(Max(self.tree_id_attr)).values())[0]
max_tree_id = max_tree_id or 0
return max_tree_id + 1
def _inter_tree_move_and_close_gap(
self, node, level_change,
left_right_change, new_tree_id, parent_pk=None):
"""
Removes ``node`` from its current tree, with the given set of
changes being applied to ``node`` and its descendants, closing
the gap left by moving ``node`` as it does so.
If ``parent_pk`` is ``None``, this indicates that ``node`` is
being moved to a brand new tree as its root node, and will thus
have its parent field set to ``NULL``. Otherwise, ``node`` will
have ``parent_pk`` set for its parent field.
"""
connection = self._get_connection(instance=node)
qn = connection.ops.quote_name
opts = self.model._meta
inter_tree_move_query = """
UPDATE %(table)s
SET %(level)s = CASE
WHEN %(left)s >= %%s AND %(left)s <= %%s
THEN %(level)s - %%s
ELSE %(level)s END,
%(tree_id)s = CASE
WHEN %(left)s >= %%s AND %(left)s <= %%s
THEN %%s
ELSE %(tree_id)s END,
%(left)s = CASE
WHEN %(left)s >= %%s AND %(left)s <= %%s
THEN %(left)s - %%s
WHEN %(left)s > %%s
THEN %(left)s - %%s
ELSE %(left)s END,
%(right)s = CASE
WHEN %(right)s >= %%s AND %(right)s <= %%s
THEN %(right)s - %%s
WHEN %(right)s > %%s
THEN %(right)s - %%s
ELSE %(right)s END,
%(parent)s = CASE
WHEN %(pk)s = %%s
THEN %(new_parent)s
ELSE %(parent)s END
WHERE %(tree_id)s = %%s""" % {
'table': qn(self.tree_model._meta.db_table),
'level': qn(opts.get_field(self.level_attr).column),
'left': qn(opts.get_field(self.left_attr).column),
'tree_id': qn(opts.get_field(self.tree_id_attr).column),
'right': qn(opts.get_field(self.right_attr).column),
'parent': qn(opts.get_field(self.parent_attr).column),
'pk': qn(opts.pk.column),
'new_parent': parent_pk is None and 'NULL' or '%s',
}
left = getattr(node, self.left_attr)
right = getattr(node, self.right_attr)
gap_size = right - left + 1
gap_target_left = left - 1
params = [
left, right, level_change,
left, right, new_tree_id,
left, right, left_right_change,
gap_target_left, gap_size,
left, right, left_right_change,
gap_target_left, gap_size,
node.pk,
getattr(node, self.tree_id_attr)
]
if parent_pk is not None:
params.insert(-1, parent_pk)
cursor = connection.cursor()
cursor.execute(inter_tree_move_query, params)
def _make_child_root_node(self, node, new_tree_id=None):
"""
Removes ``node`` from its tree, making it the root node of a new
tree.
If ``new_tree_id`` is not specified a new tree id will be
generated.
``node`` will be modified to reflect its new tree state in the
database.
"""
left = getattr(node, self.left_attr)
right = getattr(node, self.right_attr)
level = getattr(node, self.level_attr)
if not new_tree_id:
new_tree_id = self._get_next_tree_id()
left_right_change = left - 1
self._inter_tree_move_and_close_gap(node, level, left_right_change, new_tree_id)
# Update the node to be consistent with the updated
# tree in the database.
setattr(node, self.left_attr, left - left_right_change)
setattr(node, self.right_attr, right - left_right_change)
setattr(node, self.level_attr, 0)
setattr(node, self.tree_id_attr, new_tree_id)
setattr(node, self.parent_attr, None)
node._mptt_cached_fields[self.parent_attr] = None
def _make_sibling_of_root_node(self, node, target, position):
"""
Moves ``node``, making it a sibling of the given ``target`` root
node as specified by ``position``.
``node`` will be modified to reflect its new tree state in the
database.
Since we use tree ids to reduce the number of rows affected by
tree mangement during insertion and deletion, root nodes are not
true siblings; thus, making an item a sibling of a root node is
a special case which involves shuffling tree ids around.
"""
if node == target:
raise InvalidMove(_('A node may not be made a sibling of itself.'))
opts = self.model._meta
tree_id = getattr(node, self.tree_id_attr)
target_tree_id = getattr(target, self.tree_id_attr)
if node.is_child_node():
if position == 'left':
space_target = target_tree_id - 1
new_tree_id = target_tree_id
elif position == 'right':
space_target = target_tree_id
new_tree_id = target_tree_id + 1
else:
raise ValueError(_('An invalid position was given: %s.') % position)
self._create_tree_space(space_target)
if tree_id > space_target:
# The node's tree id has been incremented in the
# database - this change must be reflected in the node
# object for the method call below to operate on the
# correct tree.
setattr(node, self.tree_id_attr, tree_id + 1)
self._make_child_root_node(node, new_tree_id)
else:
if position == 'left':
if target_tree_id > tree_id:
left_sibling = target.get_previous_sibling()
if node == left_sibling:
return
new_tree_id = getattr(left_sibling, self.tree_id_attr)
lower_bound, upper_bound = tree_id, new_tree_id
shift = -1
else:
new_tree_id = target_tree_id
lower_bound, upper_bound = new_tree_id, tree_id
shift = 1
elif position == 'right':
if target_tree_id > tree_id:
new_tree_id = target_tree_id
lower_bound, upper_bound = tree_id, target_tree_id
shift = -1
else:
right_sibling = target.get_next_sibling()
if node == right_sibling:
return
new_tree_id = getattr(right_sibling, self.tree_id_attr)
lower_bound, upper_bound = new_tree_id, tree_id
shift = 1
else:
raise ValueError(_('An invalid position was given: %s.') % position)
connection = self._get_connection(instance=node)
qn = connection.ops.quote_name
root_sibling_query = """
UPDATE %(table)s
SET %(tree_id)s = CASE
WHEN %(tree_id)s = %%s
THEN %%s
ELSE %(tree_id)s + %%s END
WHERE %(tree_id)s >= %%s AND %(tree_id)s <= %%s""" % {
'table': qn(self.tree_model._meta.db_table),
'tree_id': qn(opts.get_field(self.tree_id_attr).column),
}
cursor = connection.cursor()
cursor.execute(root_sibling_query, [tree_id, new_tree_id, shift,
lower_bound, upper_bound])
setattr(node, self.tree_id_attr, new_tree_id)
def _manage_space(self, size, target, tree_id):
"""
Manages spaces in the tree identified by ``tree_id`` by changing
the values of the left and right columns by ``size`` after the
given ``target`` point.
"""
if self.tree_model._mptt_is_tracking:
self.tree_model._mptt_track_tree_modified(tree_id)
else:
connection = self._get_connection()
qn = connection.ops.quote_name
opts = self.model._meta
space_query = """
UPDATE %(table)s
SET %(left)s = CASE
WHEN %(left)s > %%s
THEN %(left)s + %%s
ELSE %(left)s END,
%(right)s = CASE
WHEN %(right)s > %%s
THEN %(right)s + %%s
ELSE %(right)s END
WHERE %(tree_id)s = %%s
AND (%(left)s > %%s OR %(right)s > %%s)""" % {
'table': qn(self.tree_model._meta.db_table),
'left': qn(opts.get_field(self.left_attr).column),
'right': qn(opts.get_field(self.right_attr).column),
'tree_id': qn(opts.get_field(self.tree_id_attr).column),
}
cursor = connection.cursor()
cursor.execute(space_query, [target, size, target, size, tree_id,
target, target])
def _move_child_node(self, node, target, position):
"""
Calls the appropriate method to move child node ``node``
relative to the given ``target`` node as specified by
``position``.
"""
tree_id = getattr(node, self.tree_id_attr)
target_tree_id = getattr(target, self.tree_id_attr)
if tree_id == target_tree_id:
self._move_child_within_tree(node, target, position)
else:
self._move_child_to_new_tree(node, target, position)
def _move_child_to_new_tree(self, node, target, position):
"""
Moves child node ``node`` to a different tree, inserting it
relative to the given ``target`` node in the new tree as
specified by ``position``.
``node`` will be modified to reflect its new tree state in the
database.
"""
left = getattr(node, self.left_attr)
right = getattr(node, self.right_attr)
level = getattr(node, self.level_attr)
new_tree_id = getattr(target, self.tree_id_attr)
space_target, level_change, left_right_change, parent, new_parent_right = \
self._calculate_inter_tree_move_values(node, target, position)
tree_width = right - left + 1
# Make space for the subtree which will be moved
self._create_space(tree_width, space_target, new_tree_id)
# Move the subtree
self._inter_tree_move_and_close_gap(
node, level_change, left_right_change, new_tree_id, parent.pk)
# Update the node to be consistent with the updated
# tree in the database.
setattr(node, self.left_attr, left - left_right_change)
setattr(node, self.right_attr, right - left_right_change)
setattr(node, self.level_attr, level - level_change)
setattr(node, self.tree_id_attr, new_tree_id)
setattr(node, self.parent_attr, parent)
node._mptt_cached_fields[self.parent_attr] = parent.pk
def _move_child_within_tree(self, node, target, position):
"""
Moves child node ``node`` within its current tree relative to
the given ``target`` node as specified by ``position``.
``node`` will be modified to reflect its new tree state in the
database.
"""
left = getattr(node, self.left_attr)
right = getattr(node, self.right_attr)
level = getattr(node, self.level_attr)
width = right - left + 1
tree_id = getattr(node, self.tree_id_attr)
target_left = getattr(target, self.left_attr)
target_right = getattr(target, self.right_attr)
target_level = getattr(target, self.level_attr)
if position == 'last-child' or position == 'first-child':
if node == target:
raise InvalidMove(_('A node may not be made a child of itself.'))
elif left < target_left < right:
raise InvalidMove(_('A node may not be made a child of any of its descendants.'))
if position == 'last-child':
if target_right > right:
new_left = target_right - width
new_right = target_right - 1
else:
new_left = target_right
new_right = target_right + width - 1
else:
if target_left > left:
new_left = target_left - width + 1
new_right = target_left
else:
new_left = target_left + 1
new_right = target_left + width
level_change = level - target_level - 1
parent = target
elif position == 'left' or position == 'right':
if node == target:
raise InvalidMove(_('A node may not be made a sibling of itself.'))
elif left < target_left < right:
raise InvalidMove(_('A node may not be made a sibling of any of its descendants.'))
if position == 'left':
if target_left > left:
new_left = target_left - width
new_right = target_left - 1
else:
new_left = target_left
new_right = target_left + width - 1
else:
if target_right > right:
new_left = target_right - width + 1
new_right = target_right
else:
new_left = target_right + 1
new_right = target_right + width
level_change = level - target_level
parent = getattr(target, self.parent_attr)
else:
raise ValueError(_('An invalid position was given: %s.') % position)
left_boundary = min(left, new_left)
right_boundary = max(right, new_right)
left_right_change = new_left - left
gap_size = width
if left_right_change > 0:
gap_size = -gap_size
connection = self._get_connection(instance=node)
qn = connection.ops.quote_name
opts = self.model._meta
# The level update must come before the left update to keep
# MySQL happy - left seems to refer to the updated value
# immediately after its update has been specified in the query
# with MySQL, but not with SQLite or Postgres.
move_subtree_query = """
UPDATE %(table)s
SET %(level)s = CASE
WHEN %(left)s >= %%s AND %(left)s <= %%s
THEN %(level)s - %%s
ELSE %(level)s END,
%(left)s = CASE
WHEN %(left)s >= %%s AND %(left)s <= %%s
THEN %(left)s + %%s
WHEN %(left)s >= %%s AND %(left)s <= %%s
THEN %(left)s + %%s
ELSE %(left)s END,
%(right)s = CASE
WHEN %(right)s >= %%s AND %(right)s <= %%s
THEN %(right)s + %%s
WHEN %(right)s >= %%s AND %(right)s <= %%s
THEN %(right)s + %%s
ELSE %(right)s END,
%(parent)s = CASE
WHEN %(pk)s = %%s
THEN %%s
ELSE %(parent)s END
WHERE %(tree_id)s = %%s""" % {
'table': qn(self.tree_model._meta.db_table),
'level': qn(opts.get_field(self.level_attr).column),
'left': qn(opts.get_field(self.left_attr).column),
'right': qn(opts.get_field(self.right_attr).column),
'parent': qn(opts.get_field(self.parent_attr).column),
'pk': qn(opts.pk.column),
'tree_id': qn(opts.get_field(self.tree_id_attr).column),
}
cursor = connection.cursor()
cursor.execute(move_subtree_query, [
left, right, level_change,
left, right, left_right_change,
left_boundary, right_boundary, gap_size,
left, right, left_right_change,
left_boundary, right_boundary, gap_size,
node.pk, parent.pk,
tree_id])
# Update the node to be consistent with the updated
# tree in the database.
setattr(node, self.left_attr, new_left)
setattr(node, self.right_attr, new_right)
setattr(node, self.level_attr, level - level_change)
setattr(node, self.parent_attr, parent)
node._mptt_cached_fields[self.parent_attr] = parent.pk
def _move_root_node(self, node, target, position):
"""
Moves root node``node`` to a different tree, inserting it
relative to the given ``target`` node as specified by
``position``.
``node`` will be modified to reflect its new tree state in the
database.
"""
left = getattr(node, self.left_attr)
right = getattr(node, self.right_attr)
level = getattr(node, self.level_attr)
tree_id = getattr(node, self.tree_id_attr)
new_tree_id = getattr(target, self.tree_id_attr)
width = right - left + 1
if node == target:
raise InvalidMove(_('A node may not be made a child of itself.'))
elif tree_id == new_tree_id:
raise InvalidMove(_('A node may not be made a child of any of its descendants.'))
space_target, level_change, left_right_change, parent, right_shift = \
self._calculate_inter_tree_move_values(node, target, position)
# Create space for the tree which will be inserted
self._create_space(width, space_target, new_tree_id)
# Move the root node, making it a child node
connection = self._get_connection(instance=node)
qn = connection.ops.quote_name
opts = self.model._meta
move_tree_query = """
UPDATE %(table)s
SET %(level)s = %(level)s - %%s,
%(left)s = %(left)s - %%s,
%(right)s = %(right)s - %%s,
%(tree_id)s = %%s,
%(parent)s = CASE
WHEN %(pk)s = %%s
THEN %%s
ELSE %(parent)s END
WHERE %(left)s >= %%s AND %(left)s <= %%s
AND %(tree_id)s = %%s""" % {
'table': qn(self.tree_model._meta.db_table),
'level': qn(opts.get_field(self.level_attr).column),
'left': qn(opts.get_field(self.left_attr).column),
'right': qn(opts.get_field(self.right_attr).column),
'tree_id': qn(opts.get_field(self.tree_id_attr).column),
'parent': qn(opts.get_field(self.parent_attr).column),
'pk': qn(opts.pk.column),
}
cursor = connection.cursor()
cursor.execute(move_tree_query, [
level_change, left_right_change, left_right_change,
new_tree_id, node.pk, parent.pk, left, right, tree_id])
# Update the former root node to be consistent with the updated
# tree in the database.
setattr(node, self.left_attr, left - left_right_change)
setattr(node, self.right_attr, right - left_right_change)
setattr(node, self.level_attr, level - level_change)
setattr(node, self.tree_id_attr, new_tree_id)
setattr(node, self.parent_attr, parent)
node._mptt_cached_fields[self.parent_attr] = parent.pk
| mit | -7,849,403,533,287,064,000 | 38.376154 | 99 | 0.543766 | false |
pickettd/SerialPort-RealTime-Data-Plotter | com_monitor.py | 1 | 5133 |
import Queue, threading, timeit, serial
from globals import *
class ComMonitorThread(threading.Thread):
""" A thread for monitoring a COM port. The COM port is
opened when the thread is started.
data_q:
Queue for received data. Items in the queue are
(data, timestamp) pairs, where data is a binary
string representing the received data, and timestamp
is the time elapsed from the thread's start (in
seconds).
error_q:
Queue for error messages. In particular, if the
serial port fails to open for some reason, an error
is placed into this queue.
port:
The COM port to open. Must be recognized by the
system.
port_baud/stopbits/parity:
Serial communication parameters
port_timeout:
The timeout used for reading the COM port. If this
value is low, the thread will return data in finer
grained chunks, with more accurate timestamps, but
it will also consume more CPU.
"""
def __init__( self,
data_q, error_q,
port_num,
port_baud,
port_stopbits = serial.STOPBITS_ONE,
port_parity = serial.PARITY_NONE,
port_timeout = 0.01):
threading.Thread.__init__(self)
self.serial_port = None
self.serial_arg = dict( port = port_num,
baudrate = port_baud,
stopbits = port_stopbits,
parity = port_parity,
timeout = port_timeout)
self.data_q = data_q
self.error_q = error_q
self.alive = threading.Event()
self.alive.set()
#------------------------------------------------------
def getAxes(self, bytes, gforce = True):
x = bytes[0] | (bytes[1] << 8)
if(x & (1 << 16 - 1)):
x = x - (1<<16)
y = bytes[2] | (bytes[3] << 8)
if(y & (1 << 16 - 1)):
y = y - (1<<16)
z = bytes[4] | (bytes[5] << 8)
if(z & (1 << 16 - 1)):
z = z - (1<<16)
x = x * SCALE_MULTIPLIER
y = y * SCALE_MULTIPLIER
z = z * SCALE_MULTIPLIER
if gforce == False:
x = x * EARTH_GRAVITY_MS2
y = y * EARTH_GRAVITY_MS2
z = z * EARTH_GRAVITY_MS2
x = round(x, 3)
y = round(y, 3)
z = round(z, 3)
return {"x": x, "y": y, "z": z}
#------------------------------------------------------
def run(self):
try:
if self.serial_port:
self.serial_port.close()
setTimeout = self.serial_arg['timeout']
self.serial_arg['timeout'] = 100
self.serial_port = serial.Serial(**self.serial_arg)
print(self.serial_port.readline())
self.serial_port.write("A")
print(self.serial_port.readline())
print(self.serial_port.readline())
self.serial_port.timeout = setTimeout
except serial.SerialException, e:
self.error_q.put(e.message)
return
# Restart the clock
startTime = timeit.default_timer()
while self.alive.isSet():
Line = self.serial_port.readline()
bytes = Line.split()
#print bytes
#use map(int) for simulation
#data = map(ord, bytes)
data = bytes
qdata = [0,0,0]
if len(data) == 0:
#print "zero data"
timestamp = timeit.default_timer() - startTime
self.data_q.put((qdata, timestamp))
if len(data) > 0:
print "got data"
timestamp = timeit.default_timer() - startTime
qdata = [4,4,4]
self.data_q.put((qdata, timestamp))
'''
if len(data) == 6:
timestamp = timeit.default_timer() - startTime
#data = list(map(ord, list(Line)))
print "Line", Line
print "bytes", bytes
print "data", data
axes = self.getAxes(data)
print " x = %.3fG" % ( axes['x'] )
print " y = %.3fG" % ( axes['y'] )
print " z = %.3fG" % ( axes['z'] )
qdata[0] = axes['x']
qdata[1] = axes['y']
qdata[2] = axes['z']
print "qdata :", qdata
#timestamp = timeit.default_timer()
self.data_q.put((qdata, timestamp))
'''
# clean up
if self.serial_port:
self.serial_port.close()
def join(self, timeout=None):
self.alive.clear()
threading.Thread.join(self, timeout)
| mit | 5,029,071,917,656,844,000 | 31.283019 | 64 | 0.447691 | false |
fintech-circle/edx-platform | lms/djangoapps/gating/api.py | 1 | 4680 | """
API for the gating djangoapp
"""
from collections import defaultdict
import json
import logging
from lms.djangoapps.courseware.entrance_exams import get_entrance_exam_content
from openedx.core.lib.gating import api as gating_api
from opaque_keys.edx.keys import UsageKey
from util import milestones_helpers
log = logging.getLogger(__name__)
@gating_api.gating_enabled(default=False)
def evaluate_prerequisite(course, subsection_grade, user):
"""
Evaluates any gating milestone relationships attached to the given
subsection. If the subsection_grade meets the minimum score required
by dependent subsections, the related milestone will be marked
fulfilled for the user.
"""
prereq_milestone = gating_api.get_gating_milestone(course.id, subsection_grade.location, 'fulfills')
if prereq_milestone:
gated_content_milestones = defaultdict(list)
for milestone in gating_api.find_gating_milestones(course.id, content_key=None, relationship='requires'):
gated_content_milestones[milestone['id']].append(milestone)
gated_content = gated_content_milestones.get(prereq_milestone['id'])
if gated_content:
for milestone in gated_content:
min_percentage = _get_minimum_required_percentage(milestone)
subsection_percentage = _get_subsection_percentage(subsection_grade)
if subsection_percentage >= min_percentage:
milestones_helpers.add_user_milestone({'id': user.id}, prereq_milestone)
else:
milestones_helpers.remove_user_milestone({'id': user.id}, prereq_milestone)
def _get_minimum_required_percentage(milestone):
"""
Returns the minimum percentage requirement for the given milestone.
"""
# Default minimum score to 100
min_score = 100
requirements = milestone.get('requirements')
if requirements:
try:
min_score = int(requirements.get('min_score'))
except (ValueError, TypeError):
log.warning(
u'Gating: Failed to find minimum score for gating milestone %s, defaulting to 100',
json.dumps(milestone)
)
return min_score
def _get_subsection_percentage(subsection_grade):
"""
Returns the percentage value of the given subsection_grade.
"""
return _calculate_ratio(subsection_grade.graded_total.earned, subsection_grade.graded_total.possible) * 100.0
def _calculate_ratio(earned, possible):
"""
Returns the percentage of the given earned and possible values.
"""
return float(earned) / float(possible) if possible else 0.0
def evaluate_entrance_exam(course_grade, user):
"""
Evaluates any entrance exam milestone relationships attached
to the given course. If the course_grade meets the
minimum score required, the dependent milestones will be marked
fulfilled for the user.
"""
course = course_grade.course_data.course
if milestones_helpers.is_entrance_exams_enabled() and getattr(course, 'entrance_exam_enabled', False):
if get_entrance_exam_content(user, course):
exam_chapter_key = get_entrance_exam_usage_key(course)
exam_score_ratio = get_entrance_exam_score_ratio(course_grade, exam_chapter_key)
if exam_score_ratio >= course.entrance_exam_minimum_score_pct:
relationship_types = milestones_helpers.get_milestone_relationship_types()
content_milestones = milestones_helpers.get_course_content_milestones(
course.id,
exam_chapter_key,
relationship=relationship_types['FULFILLS']
)
# Mark each entrance exam dependent milestone as fulfilled by the user.
for milestone in content_milestones:
milestones_helpers.add_user_milestone({'id': user.id}, milestone)
def get_entrance_exam_usage_key(course):
"""
Returns the UsageKey of the entrance exam for the course.
"""
return UsageKey.from_string(course.entrance_exam_id).replace(course_key=course.id)
def get_entrance_exam_score_ratio(course_grade, exam_chapter_key):
"""
Returns the score for the given chapter as a ratio of the
aggregated earned over the possible points, resulting in a
decimal value less than 1.
"""
try:
earned, possible = course_grade.score_for_chapter(exam_chapter_key)
except KeyError:
earned, possible = 0.0, 0.0
log.warning(u'Gating: Unexpectedly failed to find chapter_grade for %s.', exam_chapter_key)
return _calculate_ratio(earned, possible)
| agpl-3.0 | -109,539,272,815,895,200 | 39.344828 | 113 | 0.678846 | false |
starnose/liar | liarutils.py | 1 | 3600 | ###################################################################################
# liarutils.py, utility calls for liar
###################################################################################
#
# Copyright 2013 David Hicks (Starnose Ltd)
#
# This file is part of Liar
#
# Liar is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3
# as published by the Free Software Foundation,
#
# Liar is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Liar. If not, see <http://www.gnu.org/licenses/>.
#
###################################################################################
#
# A couple of utility functions, both of which could be done a lot better.
# hexdump is used for output formatting and is a hacky translation of a C function
# createServerCert is a mess of os.system stuff for generating certificates from
# templates
#
###################################################################################
import string
import os
import random
def hexdump(data="", length=0):
output=""
p = 0
major = 0
minor = 0
printbuf = ""
plaintext=""
output+= " | 0 1 2 3 4 5 6 7 8 9 A B C D E F\n----+--------------------------------------------------"
while p < length:
if (minor % 16) == 0:
output+= " %s\n %3x|" % (printbuf,major)
plaintext = plaintext + printbuf
printbuf = ""
major += 1;
if (minor % 8) == 0:
if (minor % 16) != 0:
output+= " "
output+= " %2.2x" % ( ord(data[p]) )
if data[p] in string.letters or data[p] in string.digits or data[p] in string.punctuation:
if data[p] == '\x0a':
printbuf += '.'
else:
printbuf += data[p]
else:
printbuf +='.'
minor += 1
p += 1
plaintext = plaintext + printbuf
major = minor % 16
if major != 0:
major = (16 - major) * 3
if major > 24:
major += 1
while major != 0:
printbuf = " " + printbuf
major -= 1
output+= " %s\n\n" % (printbuf)
return output, plaintext
def createServerCert(servername, rootcert, rootkey, templatefile, outputdir):
random.seed()
if not (os.path.isfile(os.path.join(outputdir,"%s.key" % servername)) and os.path.isfile(os.path.join(outputdir,"%s.cert" % servername))):
#DIRTY, VERY VERY DIRTY INDEED - this should probably done with real, actual python.
os.system("sed s/SERVERNAME/%s/ %s > %s/%s.tmpl" % (servername, templatefile, outputdir, servername) )
os.system("sed s/SERVERSERIAL/%d/ %s/%s.tmpl > %s/%s.tmp" % (random.randint(0,32767), outputdir, servername, outputdir, servername) )
os.system("certtool --generate-privkey --bits 512 --outfile %s/%s.key" % (outputdir, servername) )
os.system("certtool --generate-request --load-privkey %s/%s.key --outfile %s/%s.req --template %s/%s.tmp" %
(outputdir, servername, outputdir, servername, outputdir, servername) )
os.system("certtool --generate-certificate --load-request %s/%s.req --outfile %s/%s.cert --load-ca-certificate %s --load-ca-privkey %s --template %s/%s.tmp" %
(outputdir, servername, outputdir, servername, rootcert, rootkey, outputdir, servername) )
| gpl-3.0 | 8,115,426,600,985,354,000 | 40.37931 | 164 | 0.553611 | false |
Pinyto/cloud | keyserver/models.py | 1 | 4666 | # coding=utf-8
"""
Pinyto cloud - A secure cloud database for your personal data
Copyright (C) 2019 Pina Merkert <[email protected]>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from django.db import models
from hashlib import sha256
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import rsa
from keyserver.helper import create_salt
class Account(models.Model):
"""
A Pinyto account consists of a username, a password and
a pair of asymmetric keys. The keys are used for the
authentication with a pinyto server which stores the
data. Username and password are the credentials memorized
by the user which he can use to access his keys.
The password is not stored but a hash. If a password is
supplied the salt is added and the concatenation is hashed.
The hash of the hash gets hashed until the password was
hashed for hash_iteration times. The algorithm which is used
is SHA256. After the last iteration the hash can be compared
to the stored hash. If they match the password is correct.
"""
name = models.CharField(max_length=30, primary_key=True)
salt = models.CharField(max_length=10)
hash_iterations = models.IntegerField(default=10000)
hash = models.CharField(max_length=32)
N = models.CharField(max_length=1400)
e = models.BigIntegerField()
d = models.CharField(max_length=1400)
p = models.CharField(max_length=800)
q = models.CharField(max_length=800)
@staticmethod
def hash_password(password, salt, hash_iterations):
hash_string = password + salt
for i in range(hash_iterations):
hasher = sha256()
hasher.update(hash_string.encode('utf-8'))
hash_string = hasher.hexdigest()
return hash_string[:32]
@classmethod
def create(cls, name, password='', hash_iterations=420):
"""
Creates an account with hashed password, new random salt and 4096 bit RSA key pair.
:param name:
:type name: str
:param password: (technically this is an optional parameter but in reality you should not
use empty passwords)
:type password: str
:param hash_iterations: (optional)
:type hash_iterations: int
:return: An Account instance already saved to the database
:rtype: keyserver.models.Account
"""
salt = create_salt(10)
hash_string = cls.hash_password(password, salt, hash_iterations)
key = rsa.generate_private_key(public_exponent=65537, key_size=4096, backend=default_backend())
account = cls(name=name,
salt=salt,
hash_iterations=hash_iterations,
hash=hash_string,
N=str(key.public_key().public_numbers().n),
e=str(key.public_key().public_numbers().e),
d=str(key.private_numbers().d),
p=str(key.private_numbers().p),
q=str(key.private_numbers().q))
account.save()
return account
def check_password(self, password):
"""
This method checks if the given password is valid by comparing it to the stored hash.
:param password:
:type password: str
:rtype: boolean
"""
hash_string = self.hash_password(password, self.salt, self.hash_iterations)
return hash_string == self.hash
def change_password(self, password, hash_iterations=420):
"""
Changes the password to the supplied one.
hash_iterations are optional but can be used to upgrade the passwords to faster servers.
:param password:
:type password: str
:param hash_iterations: (optional)
:type hash_iterations: int
"""
self.salt = create_salt(10)
hash_string = self.hash_password(password, self.salt, hash_iterations)
self.hash_iterations = hash_iterations
self.hash = str(hash_string, encoding='utf-8')
self.save()
| gpl-3.0 | -5,410,453,920,801,267,000 | 39.224138 | 103 | 0.661166 | false |
digitie/magneto | core/util.py | 1 | 3603 | # -*- coding: UTF-8 -*-
from time import sleep
import logging
import serial
import sys
import binascii
from log import MainLogger
def enum(*sequential, **named):
enums = dict(zip(sequential, range(len(sequential))), **named)
reverse = dict((value, key) for key, value in enums.iteritems())
enums['reverse_mapping'] = reverse
return type('Enum', (), enums)
"""This file provides useful utilities for the wanglib package."""
class InstrumentError(Exception):
"""Raise this when talking to instruments fails."""
pass
def is_ascii(s):
return all(ord(c) < 128 for c in s)
def show_newlines(string):
"""
replace CR+LF with the words "CR" and "LF".
useful for debugging.
"""
if is_ascii(string):
try:
return string.replace('\r', '<CR>').replace('\n', '<LF>')
except AttributeError:
return binascii.hexlify(bytearray(string))
else:
try:
return binascii.hexlify(string)
except TypeError:
return binascii.hexlify(bytearray(string))
class Serial(serial.Serial):
"""
Extension of the standard serial class.
to log whatever's written or read, pass a filename into
the 'log' kwarg.
"""
def __init__(self, *args, **kwargs):
# make an event logger
self.logger = MainLogger
# take 'log' kwarg.
self.logfile = kwargs.pop('log', False)
if self.logfile:
self.start_logging(self.logfile)
# take default termination character
# by default, append empty string
self.term_chars = kwargs.pop('term_chars', '')
# hand off to standard serial init function
super(Serial, self).__init__(*args, **kwargs)
def start_logging(self):
""" start logging read/write data to file. """
# make log file handler
lfh = logging.StreamHandler(sys.stdout)
self.logger.addHandler(lfh)
# make log file formatter
lff = logging.Formatter('%(asctime)s %(message)s')
lfh.setFormatter(lff)
# set level low to log everything
self.logger.setLevel(1)
self.logger.debug('opened serial port')
def write(self, data):
data += self.term_chars
super(Serial, self).write(data)
self.logger.debug('write: ' + show_newlines(data))
def read(self, size=1):
resp = super(Serial, self).read(size)
#self.logger.debug(' read: ' + show_newlines(resp))
return resp
def readall(self):
"""Automatically read all the bytes from the serial port."""
return self.read(self.inWaiting())
def ask(self, query, lag=0.05):
"""
Write to the bus, then read response.
This doesn't seem to work very well.
"""
self.write(query)
sleep(lag)
return self.readall()
# ------------------------------------------------------
# up to this point, this file has dealt with customizing
# communication interfaces (GPIB / RS232). What follows
# are more random (though useful) utilities.
#
# The two halves of this file serve rather disparate
# needs, and should probably be broken in two pieces.
# Before I actually do that I'd like to trim down
# dependencies in the rest of the library - I think that
# will go a long way in reducing complexity.
# ------------------------------------------------------
def num(string):
"""
convert string to number. decide whether to convert to int or float.
"""
if '.' not in string:
return int(string)
else:
return float(string) | unlicense | -5,885,136,105,583,912,000 | 28.540984 | 72 | 0.600888 | false |
robmcmullen/pyatasm | pyatasm/assemble.py | 1 | 3007 | from .pyatasm_mac65 import mac65_assemble
class Assemble(object):
def __init__(self, source, verbose=False):
self.verbose = verbose
if isinstance(source, str):
source = source.encode("utf-8")
self.errors, text = mac65_assemble(source)
self.segments = []
self.transitory_equates = {}
self.equates = {}
self.labels = {}
self.current_parser = self.null_parser
self.first_addr = None
self.last_addr = None
self.current_bytes = []
if text:
self.parse(text)
def __len__(self):
return len(self.segments)
def null_parser(self, line, cleanup=False):
pass
def source_parser(self, line, cleanup=False):
if cleanup:
if self.verbose: print("Code block: %x-%x" % (self.first_addr, self.last_addr))
self.segments.append((self.first_addr, self.last_addr, self.current_bytes))
self.first_addr = None
self.last_addr = None
self.current_bytes = []
return
lineno, addr, data, text = line[0:5], line[6:10], line[12:30], line[31:]
addr = int(addr, 16)
b = [int(a,16) for a in data.split()]
#print hex(index), b
if b:
count = len(b)
if self.first_addr is None:
self.first_addr = self.last_addr = addr
elif addr != self.last_addr:
if self.verbose: print("Code block: %x-%x" % (self.first_addr, self.last_addr))
self.segments.append((self.first_addr, self.last_addr, self.current_bytes))
self.first_addr = self.last_addr = addr
self.current_bytes = []
self.current_bytes.extend(b)
self.last_addr += count
def equates_parser(self, line, cleanup=False):
if cleanup:
return
symbol, addr = line.split(": ")
if symbol[0] == "*":
self.transitory_equates[symbol[1:].lower()] = int(addr, 16)
else:
self.equates[symbol.lower()] = int(addr, 16)
def symbol_parser(self, line, cleanup=False):
if cleanup:
return
symbol, addr = line.split(": ")
self.labels[symbol.lower()] = int(addr, 16)
def parse(self, text):
for line in text.splitlines():
line = line.strip()
if not line:
continue
if self.verbose: print("parsing:", line)
if line.startswith("Source:"):
self.current_parser(None, cleanup=True)
self.current_parser = self.source_parser
elif line == "Equates:":
self.current_parser(None, cleanup=True)
self.current_parser = self.equates_parser
elif line == "Symbol table:":
self.current_parser(None, cleanup=True)
self.current_parser = self.symbol_parser
else:
self.current_parser(line)
| gpl-2.0 | -70,870,082,602,270,100 | 35.228916 | 95 | 0.536748 | false |
igorcoding/asynctnt | asynctnt/_testbase.py | 1 | 8329 | import asyncio
import contextlib
import functools
import inspect
import logging
import os
import time
import unittest
import sys
import asynctnt
from asynctnt.instance import \
TarantoolSyncInstance, TarantoolSyncDockerInstance
from asynctnt import TarantoolTuple
__all__ = (
'TestCase', 'TarantoolTestCase', 'ensure_version', 'check_version'
)
@contextlib.contextmanager
def silence_asyncio_long_exec_warning():
def flt(log_record):
msg = log_record.getMessage()
return not msg.startswith('Executing ')
logger = logging.getLogger('asyncio')
logger.addFilter(flt)
try:
yield
finally:
logger.removeFilter(flt)
class TestCaseMeta(type(unittest.TestCase)):
@staticmethod
def _iter_methods(bases, ns):
for base in bases:
for methname in dir(base):
if not methname.startswith('test_'):
continue
meth = getattr(base, methname)
if not inspect.iscoroutinefunction(meth):
continue
yield methname, meth
for methname, meth in ns.items():
if not methname.startswith('test_'):
continue
if not inspect.iscoroutinefunction(meth):
continue
yield methname, meth
def __new__(mcls, name, bases, ns):
for methname, meth in mcls._iter_methods(bases, ns):
@functools.wraps(meth)
def wrapper(self, *args, __meth__=meth, **kwargs):
self.loop.run_until_complete(__meth__(self, *args, **kwargs))
ns[methname] = wrapper
return super().__new__(mcls, name, bases, ns)
class TestCase(unittest.TestCase, metaclass=TestCaseMeta):
loop = None
@classmethod
def setUpClass(cls):
use_uvloop = os.environ.get('USE_UVLOOP')
if use_uvloop:
import uvloop
uvloop.install()
cls.loop = asyncio.get_event_loop()
if use_uvloop:
import uvloop
assert isinstance(cls.loop, uvloop.Loop)
@contextlib.contextmanager
def assertRunUnder(self, delta):
st = time.monotonic()
try:
yield
finally:
if time.monotonic() - st > delta:
raise AssertionError(
'running block took longer than {}'.format(delta))
@classmethod
def ensure_future(cls, coro_or_future):
return asyncio.ensure_future(coro_or_future)
@classmethod
def sleep(cls, delay, result=None):
return asyncio.sleep(delay, result)
class TarantoolTestCase(TestCase):
DO_CONNECT = True
LOGGING_LEVEL = logging.WARNING
LOGGING_STREAM = sys.stderr
TNT_APP_LUA_PATH = None
TNT_CLEANUP = True
tnt = None
in_docker = False
@classmethod
def read_applua(cls):
if cls.TNT_APP_LUA_PATH:
with open(cls.TNT_APP_LUA_PATH, 'r') as f:
return f.read()
@classmethod
def setUpClass(cls):
TestCase.setUpClass()
logging.basicConfig(level=cls.LOGGING_LEVEL,
stream=cls.LOGGING_STREAM)
tnt, in_docker = cls._make_instance()
tnt.start()
cls.tnt = tnt
cls.in_docker = in_docker
@classmethod
def make_instance(cls):
obj, _ = cls._make_instance()
return obj
@classmethod
def _make_instance(cls, *args, **kwargs):
tarantool_docker_image = os.getenv('TARANTOOL_DOCKER_IMAGE')
tarantool_docker_tag = os.getenv('TARANTOOL_DOCKER_VERSION')
in_docker = False
if tarantool_docker_tag:
print('Running tarantool in docker: {}:{}'.format(
tarantool_docker_image or 'tarantool/tarantool',
tarantool_docker_tag))
tnt = TarantoolSyncDockerInstance(
applua=cls.read_applua(),
docker_image=tarantool_docker_image,
docker_tag=tarantool_docker_tag
)
in_docker = True
else:
unix_path = os.getenv('TARANTOOL_LISTEN_UNIX_PATH')
if not unix_path:
tnt = TarantoolSyncInstance(
port=TarantoolSyncInstance.get_random_port(),
console_port=TarantoolSyncInstance.get_random_port(),
applua=cls.read_applua(),
cleanup=cls.TNT_CLEANUP
)
else:
tnt = TarantoolSyncInstance(
host='unix/',
port=unix_path,
console_host='127.0.0.1',
applua=cls.read_applua(),
cleanup=cls.TNT_CLEANUP
)
return tnt, in_docker
@classmethod
def tearDownClass(cls):
if cls.tnt:
cls.tnt.stop()
TestCase.tearDownClass()
def setUp(self):
super(TarantoolTestCase, self).setUp()
if self.DO_CONNECT:
self.loop.run_until_complete(self.tnt_connect())
def tearDown(self):
self.loop.run_until_complete(self.tnt_disconnect())
super(TarantoolTestCase, self).tearDown()
@property
def conn(self) -> asynctnt.Connection:
return self._conn
async def tnt_connect(self, *,
username=None, password=None,
fetch_schema=True,
auto_refetch_schema=False,
connect_timeout=None, reconnect_timeout=1/3,
ping_timeout=0,
request_timeout=None, encoding='utf-8',
initial_read_buffer_size=None):
self._conn = asynctnt.Connection(
host=self.tnt.host,
port=self.tnt.port,
username=username,
password=password,
fetch_schema=fetch_schema,
auto_refetch_schema=auto_refetch_schema,
connect_timeout=connect_timeout,
reconnect_timeout=reconnect_timeout,
request_timeout=request_timeout,
ping_timeout=ping_timeout,
encoding=encoding,
initial_read_buffer_size=initial_read_buffer_size)
await self._conn.connect()
return self._conn
async def tnt_disconnect(self):
if hasattr(self, 'conn') and self.conn is not None:
await self._conn.disconnect()
self._conn = None
async def tnt_reconnect(self, **kwargs):
await self.tnt_disconnect()
await self.tnt_connect(**kwargs)
def assertResponseEqual(self, resp, target, *args):
tuples = []
for item in resp:
if isinstance(item, TarantoolTuple):
item = list(item)
tuples.append(item)
return self.assertListEqual(tuples, target, *args)
def assertResponseEqualKV(self, resp, target, *args):
tuples = []
for item in resp:
if isinstance(item, TarantoolTuple):
item = dict(item)
tuples.append(item)
return self.assertListEqual(tuples, target, *args)
def ensure_version(*, min=None, max=None,
min_included=False, max_included=False):
def check_version_wrap(f):
@functools.wraps(f)
async def wrap(self, *args, **kwargs):
if check_version(self, self.conn.version,
min=min, max=max,
min_included=min_included,
max_included=max_included):
res = f(self, *args, **kwargs)
if inspect.isawaitable(res):
return await res
return res
return wrap
return check_version_wrap
def check_version(test, version, *, min=None, max=None,
min_included=False, max_included=False):
if min and (version < min or (min_included and version <= min)):
test.skipTest(
'version mismatch - required min={} got={}'.format(
min, version))
return False
if max and (version > max or (max_included and version >= max)):
test.skipTest(
'version mismatch - required max={} got={}'.format(
max, version))
return False
return True
| apache-2.0 | -4,799,521,511,850,995,000 | 29.621324 | 77 | 0.559611 | false |
stcakova/Battle-city | world.py | 1 | 1375 | import pygame
from pygame import rect
from constants import *
from part import Wall, Brick
class World:
world = [[None for x in range(26)] for y in range(19)]
def __init__(self):
self.brick = pygame.image.load("pictures/brick.jpeg")
self.wall = pygame.image.load("pictures/wall.png")
self.screen = pygame.display.set_mode(SCREEN_SIZE)
self.phoenix = pygame.image.load("pictures/phoenix.jpg")
self.phoenix = pygame.transform.scale(
self.phoenix, (PART_SIZE * 2, PART_SIZE * 2))
def extract_world(self, multiplayer):
if not multiplayer:
maps = open(MAP)
else:
maps = open(MULTIPLAYER_MAP)
for height, line in enumerate(maps):
for width, element in enumerate(line):
if element == 'B' or element == 'F':
self.world[height][width] = Wall(
(width * PART_SIZE, height * PART_SIZE))
elif element == '#':
self.world[height][width] = Brick(
(width * PART_SIZE, height * PART_SIZE))
def draw_world(self, multiplayer):
for wall in walls:
if wall.existing:
wall.draw()
self.screen.blit(self.phoenix, (440, 640))
if multiplayer:
self.screen.blit(self.phoenix, (440, 0))
| gpl-2.0 | 8,877,047,811,533,497,000 | 33.375 | 64 | 0.555636 | false |
DTOcean/dtocean-core | test_data/inputs_wp2_wave.py | 1 | 3952 | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 09 10:39:38 2015
@author: 108630
"""
import os
from datetime import datetime, timedelta
import numpy as np
dir_path = os.path.dirname(__file__)
# Setup
x = np.linspace(0.,1000.,20.)
y = np.linspace(0.,300.,20.)
nx = len(x)
ny = len(y)
# Bathymetry?
X, Y = np.meshgrid(x,y)
Z = -X * 0.1 - 1
depths = Z.T[:, :, np.newaxis]
sediments = np.chararray((nx,ny,1), itemsize=20)
sediments[:] = "rock"
strata = {"values": {'depth': depths,
'sediment': sediments},
"coords": [x, y, ["layer 1"]]}
# Mannings
#geoxyz = np.vstack((X.ravel(),Y.ravel(),G.ravel())).T
G = np.zeros((nx, ny)) + 0.3
geo_raw = {"values": G,
"coords": [x, y]}
sample_size = 1000
dates = []
dt = datetime(2010, 12, 01)
step = timedelta(seconds=3600)
for _ in xrange(sample_size):
dates.append(dt)
dt += step
Hm0 = 9. * np.random.random_sample(sample_size)
direction = 360. * np.random.random_sample(sample_size)
Te = 15. * np.random.random_sample(sample_size)
wave_series = {"DateTime": dates,
"Te": Te,
"Hm0": Hm0,
"Dir": direction}
# Fixed array layout
pos = [(450., 100.),
(550., 100.),
(450., 150.),
(550., 150.)]
FixedArrayLayout = np.array(pos)
#wave_xgrid = None
#B= np.array([0.,270.])/180*np.pi
#H= np.array([1.])
#T= np.array([6.])
#p= 1.0/len(B)/len(H)/len(T)* np.ones((len(T),len(H),len(B)))
#
#occurrence_matrix_coords = [T,H,B]
#wave_xgrid = {"values": p,
# "coords": occurrence_matrix_coords}
lease_area = np.array([[50., 50.],[950., 50.],[950., 250.],[50., 250.]],dtype=float)
power_law_exponent = np.array([7.])
nogo_areas = {"a": np.array([[0, 0],[.1, 0],[.1, .1],[0, .1]])}
rated_array_power = 5
main_direction = None
blockage_ratio = 1.
spectrum_type_farm = 'JONSWAP'
spectrum_gamma_farm = 3.3
spectrum_dir_spreading_farm = 0.
point_SSH = 0.
#user_array_option = 'rectangular'
#user_array_layout = None
user_array_option = 'User Defined Fixed'
user_array_layout = FixedArrayLayout
wave_data_directory = os.path.abspath(os.path.join(dir_path, "nemoh"))
float_flag = False
min_install = -np.inf
max_install = 0.
min_dist_x = 40.
min_dist_y = 40.
yaw_angle = 0.
rated_power_device = 1
op_threshold = 0
landing_point = (0.,0.)
test_data = {'bathymetry.layers': strata,
'corridor.landing_point': landing_point,
'device.installation_depth_max': max_install,
'device.installation_depth_min': min_install,
'device.minimum_distance_x': min_dist_x,
'device.minimum_distance_y': min_dist_y,
'options.optimisation_threshold': op_threshold,
'device.power_rating': rated_power_device,
'device.wave_data_directory': wave_data_directory,
'device.yaw': yaw_angle,
'farm.blockage_ratio': blockage_ratio,
'bathymetry.mannings': geo_raw,
'site.lease_boundary': lease_area,
'project.main_direction': main_direction,
'farm.nogo_areas': nogo_areas,
# 'farm.point_sea_surface_height': point_SSH,
# 'farm.power_law_exponent': power_law_exponent,
'project.rated_power': rated_array_power,
'farm.spec_gamma': spectrum_gamma_farm,
'farm.spec_spread': spectrum_dir_spreading_farm,
'farm.spectrum_name': spectrum_type_farm,
# 'farm.wave_occurrence': wave_xgrid,
'farm.wave_series': wave_series,
'options.user_array_layout': user_array_layout,
'options.user_array_option': user_array_option}
if __name__ == "__main__":
from dtocean_core.utils.files import pickle_test_data
file_path = os.path.abspath(__file__)
pkl_path = pickle_test_data(file_path, test_data)
print "generate test data: {}".format(pkl_path)
| gpl-3.0 | 6,940,826,449,139,902,000 | 27.846715 | 84 | 0.584008 | false |
qsnake/gpaw | gpaw/test/xcatom.py | 1 | 1515 | import numpy as np
import numpy.random as ra
from gpaw.setup import create_setup
from gpaw.xc import XC
from gpaw.test import equal
x = 0.000001
ra.seed(8)
for xc in ['LDA', 'PBE']:
print xc
xc = XC(xc)
s = create_setup('N', xc)
ni = s.ni
nii = ni * (ni + 1) // 2
D_p = 0.1 * ra.random(nii) + 0.2
H_p = np.zeros(nii)
E = s.xc_correction.calculate(xc,D_p.reshape(1, -1),
H_p.reshape(1, -1))
dD_p = x * ra.random(nii)
dE = np.dot(H_p, dD_p) / x
D_p += dD_p
Ep = s.xc_correction.calculate(xc,D_p.reshape(1, -1),
H_p.reshape(1, -1))
D_p -= 2 * dD_p
Em = s.xc_correction.calculate(xc,D_p.reshape(1, -1),
H_p.reshape(1, -1))
print dE, dE - 0.5 * (Ep - Em) / x
equal(dE, 0.5 * (Ep - Em) / x, 1e-6)
Ems = s.xc_correction.calculate(xc,np.array(
[0.5 * D_p, 0.5 * D_p]), np.array([H_p, H_p]))
print Em - Ems
equal(Em, Ems, 1.0e-12)
D_sp = 0.1 * ra.random((2, nii)) + 0.2
H_sp = np.zeros((2, nii))
E = s.xc_correction.calculate(xc, D_sp, H_sp)
dD_sp = x * ra.random((2, nii))
dE = np.dot(H_sp.ravel(), dD_sp.ravel()) / x
D_sp += dD_sp
Ep = s.xc_correction.calculate(xc, D_sp, H_sp)
D_sp -= 2 * dD_sp
Em = s.xc_correction.calculate(xc, D_sp, H_sp)
print dE, dE - 0.5 * (Ep - Em) / x
equal(dE, 0.5 * (Ep - Em) / x, 1e-6)
| gpl-3.0 | -138,882,968,460,386,080 | 30.5625 | 77 | 0.473267 | false |
cabanm/project-euler | problem46.py | 1 | 1213 | # Project Euler - Problem 46
# --------------------------
# What is the smallest odd composite that cannot be written as the sum of a prime and twice a square?
from myMath import isPrime
from time import time
# Find primes up to a certain number and output a list of them
def primes(top):
seive = range(2, top+1)
for m in range(2, top+1):
for n in range(m, top//m+1):
p = m*n
if p<=top: seive[p-2] = 0
primes = []
for i in range(top-1):
if seive[i] != 0: primes.append(seive[i])
return primes
p_max = 10000 # These have to be high enough for the program to produce the correct answer
s_max = 1000 # => 10000, 100
start_tot = time()
prime_list = primes(p_max)[1:] # Remove the number 2
can_be_written = []
# Get a large list of composites that we can write in such a way
for p in prime_list:
for s in range(1,s_max+1):
n = p + 2*s**2
if not isPrime(n): can_be_written.append(n)
# Get large list of odd composites and check whether each element is in the "can_be_written" list
max_comp = p_max+2*s_max**2
for n in [n for n in range(1,max_comp,2) if not isPrime(n)]: # The [...] generates odd composites
if n not in can_be_written:
print n
break
print "Time taken:", time()-start_tot
| gpl-2.0 | -4,701,724,964,660,752,000 | 29.325 | 101 | 0.663644 | false |
tdjordan/tortoisegit | gitgtk/addremove.py | 1 | 1219 | #
# Add/Remove dialog for TortoiseHg
#
# Copyright (C) 2007 TK Soh <[email protected]>
#
try:
import pygtk
pygtk.require("2.0")
except:
pass
import gtk
import gobject
from mercurial import ui, util, hg
from mercurial.i18n import _
from status import GStatus
def run(hgcmd='add', root='', cwd='', files=[], **opts):
u = ui.ui()
u.updateopts(debug=False, traceback=False)
repo = hg.repository(u, path=root)
cmdoptions = {
'all':False, 'clean':False, 'ignored':False, 'modified':False,
'added':True, 'removed':True, 'deleted':True, 'unknown':False, 'rev':[],
'exclude':[], 'include':[], 'debug':True,'verbose':True
}
if hgcmd == 'add':
cmdoptions['unknown'] = True
elif hgcmd == 'remove':
cmdoptions['clean'] = True
else:
raise "Invalid command '%s'" % hgcmd
dialog = GStatus(u, repo, cwd, files, cmdoptions, True)
gtk.gdk.threads_init()
gtk.gdk.threads_enter()
dialog.display()
gtk.main()
gtk.gdk.threads_leave()
if __name__ == "__main__":
import sys
opts = {}
opts['hgcmd'] = 'adda'
opts['root'] = len(sys.argv) > 1 and sys.argv[1] or ''
run(**opts)
| gpl-2.0 | 2,293,648,415,399,881,700 | 23.38 | 80 | 0.579984 | false |
vaimi/pydisambiguation | ui/qtgui.py | 1 | 6824 | from PyQt5.QtWidgets import (QWidget, QFrame, QLabel, QPushButton, QComboBox, QLineEdit,QTextEdit, QGridLayout, QApplication, QHBoxLayout, QRadioButton)
import logging
class AlgorithmRadioButton(QRadioButton):
def __init__(self, text, id=None, group=None):
super().__init__(text)
self.algorithmId = id
self.group = group
class DisambiquateApp(QApplication):
def __init__(self, sysargs, core):
super(DisambiquateApp, self).__init__(sysargs)
self.dw = DisambiquateWindow(core)
class DisambiquateWindow(QFrame):
def __init__(self, core):
super().__init__()
self.core = core
self.initUI()
def __makeLabel(self, text, tooltip):
label = QLabel(text)
label.setToolTip(tooltip)
return QLabel(text)
def __makeEditBox(self):
return QLineEdit()
def __makeRadioButton(self, text, key=None, group=None):
radiobutton = AlgorithmRadioButton(text, key, group)
radiobutton.clicked.connect(self.selectionChanged)
return radiobutton
def __makeComboBox(self, items):
comboBox = QComboBox()
[comboBox.addItem(algorithm['name'], algorithm['key']) for algorithm in items]
return comboBox
def __makeHorizontalLine(self):
hLine = QFrame()
hLine.setFrameShape(QFrame.HLine)
hLine.setFrameShadow(QFrame.Sunken)
return hLine
def __initElements(self):
self.gridLayout = QGridLayout()
self.radioLayout = QHBoxLayout()
self.variantLayout = QHBoxLayout()
self.buttonLayout = QHBoxLayout()
# First row
self.wordsLabel = self.__makeLabel('Word', '')
self.wordsEdit = self.__makeEditBox()
# Second row
self.sentencesLabel = self.__makeLabel('Sentence(s)', '')
self.sentencesEdit = QTextEdit()
# Third row
self.methodLabel = self.__makeLabel('Method', '')
groupsList = [algorithm['parent'] for algorithm in self.core.getAlgorithmsInfo() if algorithm['parent'] is not None]
groupsDict = dict((group,groupsList.count(group)) for group in set(groupsList))
groups = [group for group in groupsDict if groupsDict[group] > 1]
self.algorithmsRadioButtons = []
for group in groups:
self.algorithmsRadioButtons += [self.__makeRadioButton(group.name + ' (+)', None, group)]
self.algorithmsRadioButtons += [self.__makeRadioButton(algorithm['name'], algorithm['key']) for algorithm in self.core.getAlgorithmsInfo() if algorithm['parent'] is None or algorithm['parent'] not in groups]
#
self.variantLabel = self.__makeLabel('Variant', '')
self.variantComboBox = QComboBox()
# Fourth row
self.disambiquateButton = QPushButton("Disambiquate")
# Fifth row
self.hLine = self.__makeHorizontalLine()
# Sixth row
self.outputLabel = self.__makeLabel('Sense', '')
self.outputEdit = QTextEdit()
def __setElementSettings(self):
self.outputEdit.setReadOnly(True)
self.algorithmsRadioButtons[0].setChecked(True)
self.selectionChanged()
self.disambiquateButton.clicked.connect(self.disambiquateButtonClicked)
self.gridLayout.setSpacing(10)
def __setLayout(self):
row = 1
labelColumn = 0
contentColumn = 1
self.setLayout(self.gridLayout)
self.gridLayout.addWidget(self.wordsLabel, row, labelColumn)
self.gridLayout.addWidget(self.wordsEdit, row, contentColumn)
row += 1
self.gridLayout.addWidget(self.sentencesLabel, row, labelColumn)
self.gridLayout.addWidget(self.sentencesEdit, row, contentColumn, 2, 1)
row += 2
self.gridLayout.addWidget(self.methodLabel, row, labelColumn)
self.gridLayout.addLayout(self.radioLayout, row, contentColumn)
[self.radioLayout.addWidget(button) for button in self.algorithmsRadioButtons]
self.radioLayout.addStretch(1)
row += 1
self.gridLayout.addWidget(self.variantLabel, row, labelColumn)
self.gridLayout.addLayout(self.variantLayout, row, contentColumn)
self.variantLayout.addWidget(self.variantComboBox)
#self.variantLayout.addStretch(1)
row += 1
self.gridLayout.addLayout(self.buttonLayout, row, contentColumn)
self.buttonLayout.addWidget(self.disambiquateButton)
self.buttonLayout.addStretch(1)
row += 1
self.gridLayout.addWidget(self.hLine, row, contentColumn)
row += 1
self.gridLayout.addWidget(self.outputLabel, row, labelColumn)
self.gridLayout.addWidget(self.outputEdit, row, contentColumn, 2, 1)
def initUI(self):
self.__initElements()
self.__setElementSettings()
self.__setLayout()
self.setGeometry(300, 300, 350, 300)
self.setWindowTitle('PyDisambiquate')
self.show()
def selectionChanged(self):
self.variantComboBox.clear()
for button in self.algorithmsRadioButtons:
if button.isChecked():
if button.algorithmId == None:
self.variantComboBox.setEnabled(True)
[self.variantComboBox.addItem(algorithm['name'], algorithm['key']) for algorithm in self.core.getAlgorithmsInfo() if algorithm['parent'] is button.group]
else:
self.variantComboBox.setDisabled(True)
def disambiquateButtonClicked(self):
logging.debug("Disambiquate button pressed")
self.disambiquateButton.setDisabled(True)
words = self.__getWord().strip()
sentences = self.__getSentence().strip()
if not words or not sentences:
self.disambiquateButton.setEnabled(True)
pass
logging.debug("Words content: " + str(words))
logging.debug("Sentences content: " + str(sentences))
sense = False
for button in self.algorithmsRadioButtons:
if button.isChecked():
if button.group is None:
sense = self.core.runAlgorithm(button.algorithmId, words, sentences)
break
else:
sense = self.core.runAlgorithm(self.variantComboBox.itemData(self.variantComboBox.currentIndex()), words, sentences)
break
if sense['sense']:
outText = "%s: %s" % (sense['sense'], sense['sense'].definition())
else:
outText = "Unable to make sense"
logging.debug("Made sense: " + outText)
self.outputEdit.setText(outText)
self.disambiquateButton.setEnabled(True)
def __getWord(self):
return self.wordsEdit.text()
def __getSentence(self):
return self.sentencesEdit.toPlainText()
| gpl-3.0 | -1,755,694,650,714,857,700 | 36.494505 | 215 | 0.637749 | false |
armstrong/armstrong.utils.backends | armstrong/utils/backends/base.py | 1 | 2522 | from django.conf import settings as default_settings
from django.core.exceptions import ImproperlyConfigured
try:
from importlib import import_module
except ImportError: # PY26 # pragma: no cover
from django.utils.importlib import import_module
# DEPRECATED: To be removed in Backends 2.0
import warnings
DID_NOT_HANDLE = object()
class BackendDidNotHandle(Exception):
"""The backend did not perform the expected action"""
pass
class Proxy(object):
def __init__(self, possibles, attr):
self.attr = attr
self.possibles = possibles
def __call__(self, *args, **kwargs):
for possible in self.possibles:
try:
ret = getattr(possible, self.attr, None)(*args, **kwargs)
except BackendDidNotHandle:
continue
# DEPRECATED: To be removed in Backends 2.0
if ret is DID_NOT_HANDLE:
errmsg = ("DID_NOT_HANDLE is deprecated and will be removed in "
"armstrong.utils.backends 2.0. "
"Use BackendDidNotHandle.")
warnings.warn(errmsg, DeprecationWarning, stacklevel=2)
continue
return ret
class MultipleBackendProxy(object):
def __init__(self, *others):
self.others = others
def __getattr__(self, key):
return Proxy(self.others, key)
class GenericBackend(object):
proxy_class = MultipleBackendProxy
def __init__(self, key, settings=None, defaults=None):
self.key = key
self.settings = settings or default_settings
self.defaults = defaults
@property
def configured_backend(self):
try:
return getattr(self.settings, self.key)
except AttributeError:
if self.defaults:
return self.defaults
msg = "Unable to find '%s' backend, " \
"please make sure it is in your settings" % self.key
raise ImproperlyConfigured(msg)
def get_backend(self, *args, **kwargs):
def to_backend(a):
module, backend_class = a.rsplit(".", 1)
backend_module = import_module(module)
return getattr(backend_module, backend_class)
if type(self.configured_backend) is str:
return to_backend(self.configured_backend)(*args, **kwargs)
else:
return self.proxy_class(*[to_backend(a)(*args, **kwargs)
for a in self.configured_backend])
| apache-2.0 | -8,133,730,749,667,820,000 | 31.753247 | 80 | 0.599524 | false |
hroncok/python-brainfuck | test.py | 1 | 5155 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import unittest
# tested modules
import brainx
import image_png
#
# Class with temporary fake output
#
import sys
class FakeStdOut:
def write(self, *args, **kwargs):
pass
def flush(self):
pass
#
# Classes with tests
#
class TestBrainfuck(unittest.TestCase):
"""Tests the Brainfuck interpreter."""
def setUp(self):
self.BF = brainx.BrainFuck
# skrytí výstupu
self.out = sys.stdout
sys.stdout = FakeStdOut()
def tearDown(self):
sys.stdout = self.out
def test_bf_01(self):
"""zero current cell"""
program = self.BF('[-]', memory=b'\x03\x02', memory_pointer=1)
self.assertEqual(program.get_memory(), b'\x03\x00')
def test_bf_02(self):
"""zero all non-zero cells to left"""
program = self.BF('[[-]<]', memory=b'\x03\x03\x00\x02\x02', memory_pointer=4)
self.assertEqual(program.get_memory(), b'\x03\x03\x00\x00\x00')
def test_bf_03(self):
"""move to the first non-zero cell to left"""
program = self.BF('[<]', memory=b'\x03\x03\x00\x02\x02', memory_pointer=4)
self.assertEqual(program.memory_pointer, 2)
def test_bf_04(self):
"""move to the first non-zero cell to right"""
program = self.BF('[>]', memory=b'\x03\x03\x00\x02\x02')
self.assertEqual(program.memory_pointer, 2)
def test_bf_05(self):
"""destructive addition of the current cell to the next one"""
program = self.BF('[>+<-]', memory=b'\x03\x03')
self.assertEqual(program.get_memory(), b'\x00\x06')
def test_bf_06(self):
"""non-destructive addition of the current cell to the next one"""
program = self.BF('[>+>+<<-]>>[<<+>>-]', memory=b'\x03\x03')
self.assertEqual(program.get_memory(), b'\x03\x06\x00')
def test_bf_07(self):
"""destructive removeal of the current cell from the next one"""
program = self.BF('[>-<-]', memory=b'\x03\x05')
self.assertEqual(program.get_memory(), b'\x00\x02')
def test_bf_11(self):
r"""HelloWorld with \n"""
with open( 'test_data/hello1.b', encoding='ascii' ) as stream:
data = stream.read()
program = self.BF(data)
self.assertEqual(program.output, 'Hello World!\n')
def test_bf_12(self):
r"""HelloWorld without \n"""
with open( 'test_data/hello2.b', encoding='ascii' ) as stream:
data = stream.read()
program = self.BF(data)
self.assertEqual(program.output, 'Hello World!')
class TestBrainfuckWithInput(unittest.TestCase):
"""Tests the Brainfuck interpreter using programs with defined input."""
def setUp(self):
self.BF = brainx.BrainFuck
# hide output
self.out = sys.stdout
sys.stdout = FakeStdOut()
def tearDown(self):
sys.stdout = self.out
def test_bf_input_2(self):
"""numwarp.b with '123' input"""
with open( 'test_data/numwarp_input.b', encoding='ascii' ) as stream:
data = stream.read()
program = self.BF(data)
self.assertEqual(program.output, ' /\\\n /\\\n /\\ /\n / \n \\ \\/\n \\\n \n')
class TestPNG(unittest.TestCase):
"""Tests correct load of PNG images."""
def setUp(self):
self.png = image_png.PngReader
# skrytí výstupu
self.out = sys.stdout
sys.stdout = FakeStdOut()
def tearDown(self):
sys.stdout = self.out
def test_png_01(self):
"""only support PNGs"""
self.assertRaises( image_png.PNGWrongHeaderError, self.png, 'test_data/sachovnice.jpg' )
def test_png_02(self):
"""only support some PNGs"""
self.assertRaises( image_png.PNGNotImplementedError, self.png, 'test_data/sachovnice_paleta.png' )
def test_png_03(self):
"""simple PNG load"""
image = self.png('test_data/sachovnice.png')
self.assertEqual( image.rgb, [[(255, 0, 0), (0, 255, 0), (0, 0, 255)], [(255, 255, 255), (127, 127, 127), (0, 0, 0)], [(255, 255, 0), (255, 0, 255), (0, 255, 255)]] )
class TestBrainloller(unittest.TestCase):
"""Tests the BrainLoller interpreter."""
def setUp(self):
self.BF = brainx.BrainFuck
self.BL = brainx.BrainLoller
# hide output
self.out = sys.stdout
sys.stdout = FakeStdOut()
def tearDown(self):
sys.stdout = self.out
def test_bl_1a(self):
"""load data from HelloWorld.png image"""
obj = self.BL('test_data/HelloWorld.png')
self.assertEqual(obj.data, '>+++++++++[<++++++++>-]<.>+++++++[<++++>-]<+.+++++++..+++.>>>++++++++[<++++>-]<.>>>++++++++++[<+++++++++>-]<---.<<<<.+++.------.--------.>>+.')
def test_bl_1b(self):
"""run the program from HelloWorld.png image"""
obj = self.BL('test_data/HelloWorld.png')
self.assertEqual(obj.program.output, 'Hello World!')
#
# Run the tests when this script is run
#
if __name__ == '__main__':
unittest.main()
| isc | 4,899,451,979,865,395,000 | 29.844311 | 179 | 0.562027 | false |
piotrmaslanka/satella | satella/configuration/schema/basic.py | 1 | 6186 | import codecs
import os
import re
import typing as tp
from satella.exceptions import ConfigurationValidationError
from .base import Descriptor, ConfigDictValue
from .registry import register_custom_descriptor
@staticmethod
def _make_boolean(v: tp.Any) -> bool:
if isinstance(v, str):
if v.upper() == 'TRUE':
return True
elif v.upper() == 'FALSE':
return False
else:
raise ConfigurationValidationError('Unknown value of "%s" posing to be a bool'
% (v,))
else:
return bool(v)
@register_custom_descriptor('bool')
class Boolean(Descriptor):
"""
This value must be a boolean, or be converted to one
"""
BASIC_MAKER = _make_boolean
@register_custom_descriptor('int')
class Integer(Descriptor):
"""
This value must be an integer, or be converted to one
"""
BASIC_MAKER = int
@register_custom_descriptor('float')
class Float(Descriptor):
"""
This value must be a float, or be converted to one
"""
BASIC_MAKER = float
@register_custom_descriptor('str')
class String(Descriptor):
"""
This value must be a string, or be converted to one
"""
BASIC_MAKER = str
class FileObject:
"""
What you get for values in schema of :class:`~satella.configuration.schema.File`.
This object is comparable and hashable, and is equal to the string of it's path
"""
__slots__ = 'path',
def __init__(self, path: str):
self.path = path
def __repr__(self):
return '<File object %s>' % (self.path,)
def __str__(self):
return self.path
def __eq__(self, other) -> bool:
return self.path == str(other) and isinstance(other, FileObject)
def __hash__(self) -> int:
return hash(self.path)
def get_value(self, encoding: tp.Optional[str] = None) -> tp.Union[str, bytes]:
"""
Read in the entire file into memory
:param encoding: optional encoding to apply. If None given, bytes will be returned
:return: file contents
"""
with open(self.path, 'rb') as f_in:
data = f_in.read()
if encoding:
return data.decode(encoding)
else:
return data
def open(self, mode: str):
"""
Open the file in specified mode
:param mode: mode to open the file in
:return: file handle
"""
return open(self.path, mode)
class DirectoryObject:
"""
What you get for values in schema of :class:`~satella.configuration.schema.Directory`.
This object is comparable and hashable, and is equal to the string of it's path
"""
__slots__ = 'path',
def __init__(self, path: str):
self.path = path
def __repr__(self):
return '<Directory object %s>' % (self.path,)
def __str__(self):
return self.path
def __eq__(self, other) -> bool:
return self.path == str(other) and isinstance(other, DirectoryObject)
def __hash__(self) -> int:
return hash(self.path)
def get_files(self) -> tp.Iterable[str]:
"""
Return a list of files inside this directory
:return:
"""
return os.listdir(self.path)
@staticmethod
def _make_file(v: str) -> bool:
if not os.path.isfile(v):
raise ConfigurationValidationError('Expected to find a file under %s'
% (v,))
return FileObject(v)
@register_custom_descriptor('file')
class File(Descriptor):
"""
This value must be a valid path to a file. The value in your schema will be
an instance of :class:`~satella.configuration.schema.basic.FileObject`
"""
BASIC_MAKER = _make_file
@register_custom_descriptor('file_contents')
class FileContents(Descriptor):
"""
This value must be a valid path to a file. The value in your schema will be
the contents of this file, applied with encoding (if given). By default, bytes will be read in
"""
def __init__(self, encoding: tp.Optional[str] = None, strip_afterwards: bool = False):
super().__init__()
self.encoding = encoding
self.strip_afterwards = strip_afterwards
def BASIC_MAKER(self, c: str):
if not self.encoding:
with open(c, 'rb') as f_in:
y = f_in.read()
else:
with codecs.open(c, 'r', encoding=self.encoding) as f_in:
y = f_in.read()
if self.strip_afterwards:
y = y.strip()
return y
@staticmethod
def _make_directory(v: str) -> bool:
if not os.path.isdir(v):
raise ConfigurationValidationError('Expected to find a directory under %s'
% (v,))
return DirectoryObject(v)
@register_custom_descriptor('dir')
class Directory(Descriptor):
"""
This value must be a valid path to a file. The value in your schema will be
an instance of :class:`~satella.configuration.schema.basic.FileObject`
"""
BASIC_MAKER = _make_directory
class Regexp(String):
"""
Base class for declaring regexp-based descriptors. Overload it's attribute REGEXP. Use as
following:
>>> class IPv6(Regexp):
>>> REGEXP = '(([0-9a-f]{1,4}:)' ...
"""
__slots__ = ('regexp',)
REGEXP = r'.*'
def __init__(self):
super().__init__()
if isinstance(self.REGEXP, str):
self.regexp = re.compile(self.REGEXP)
else:
self.regexp = self.REGEXP
def __call__(self, value: ConfigDictValue) -> str:
value = super(Regexp, self).__call__(value)
match = self.regexp.match(value)
if not match:
raise ConfigurationValidationError('value does not match %s' % (self.REGEXP.pattern,),
value)
return match.group(0)
def __str__(self):
return 'Regexp(%s)' % (self.REGEXP.pattern,)
@register_custom_descriptor('ipv4')
class IPv4(Regexp):
"""
This must be a valid IPv4 address (no hostnames allowed)
"""
REGEXP = r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}'
| bsd-3-clause | 4,943,166,276,343,785,000 | 25.435897 | 98 | 0.585031 | false |
citrix-openstack-build/python-cinderclient | cinderclient/v2/quotas.py | 1 | 1597 | # Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinderclient import base
class QuotaSet(base.Resource):
@property
def id(self):
"""Needed by base.Resource to self-refresh and be indexed."""
return self.tenant_id
def update(self, *args, **kwargs):
self.manager.update(self.tenant_id, *args, **kwargs)
class QuotaSetManager(base.Manager):
resource_class = QuotaSet
def get(self, tenant_id):
if hasattr(tenant_id, 'tenant_id'):
tenant_id = tenant_id.tenant_id
return self._get("/os-quota-sets/%s" % (tenant_id), "quota_set")
def update(self, tenant_id, **updates):
body = {'quota_set': {'tenant_id': tenant_id}}
for update in updates.keys():
body['quota_set'][update] = updates[update]
self._update('/os-quota-sets/%s' % (tenant_id), body)
def defaults(self, tenant_id):
return self._get('/os-quota-sets/%s/defaults' % tenant_id,
'quota_set')
| apache-2.0 | 1,890,877,957,061,482,800 | 32.270833 | 78 | 0.647464 | false |
aaronmckinstry706/twitter-crime-prediction | src/jobs/crime_prediction/run.py | 1 | 13899 | import datetime
import logging
import operator
import os
import sys
import pyspark as pyspark
import pyspark.ml.feature as feature
import pyspark.ml.classification as classification
import pyspark.ml as ml
import pyspark.ml.clustering as clustering
import pyspark.sql as sql
import pyspark.sql.functions as functions
import pyspark.sql.types as types
import twokenize
import grid
LOGGER = logging.getLogger(__name__)
FORMATTER = logging.Formatter(
"[%(asctime)s] %(levelname)s [%(name)s.%(funcName)s:%(lineno)d] %(message)s")
FILE_HANDLER = logging.FileHandler('script_log.txt')
FILE_HANDLER.setFormatter(FORMATTER)
LOGGER.addHandler(FILE_HANDLER)
LOGGER.setLevel(logging.DEBUG)
LOGGER.info("Starting run.")
sc = pyspark.SparkContext()
# From https://stackoverflow.com/a/36218558 .
def sparkImport(module_name, module_directory):
"""
Convenience function.
Tells the SparkContext sc (must already exist) to load
module module_name on every computational node before
executing an RDD.
Args:
module_name: the name of the module, without ".py".
module_directory: the path, absolute or relative, to
the directory containing module
module_Name.
Returns: none.
"""
module_path = os.path.abspath(
module_directory + "/" + module_name + ".py")
sc.addPyFile(module_path)
# --------------------------------------------------------------------------------------------------
# PART 0: Define some useful parameters that define our task.
# --------------------------------------------------------------------------------------------------
# We are only considering data between 1 and 31 (inclusive) days prior to the prediction date.
NUM_DAYS = 31
PREDICTION_DATE = datetime.datetime(2015, 3, 3)
HISTORICAL_CUTOFF_DATE = PREDICTION_DATE - datetime.timedelta(days=31)
# We're only considering tweets and complaints within the given grid square.
# Southwest corner of New York:
# lat = 40.488320, lon = -74.290739
# Northeast corner of New York:
# lat = 40.957189, lon = -73.635679
latlongrid = grid.LatLonGrid(
lat_min=40.488320,
lat_max=40.957189,
lon_min=-74.290739,
lon_max=-73.635679,
lat_step=grid.get_lon_delta(1000, (40.957189 - 40.488320)/2.0),
lon_step=grid.get_lat_delta(1000))
# PART 1: Get topic distributions.
sparkImport("twokenize", ".")
sparkImport('grid', '.')
ss = (sql.SparkSession.builder.appName("TwitterTokenizing")
.getOrCreate())
tweets_schema = types.StructType([
types.StructField('id', types.LongType()),
types.StructField('timestamp', types.LongType()),
types.StructField('postalCode', types.StringType()),
types.StructField('lon', types.DoubleType()),
types.StructField('lat', types.DoubleType()),
types.StructField('tweet', types.StringType()),
types.StructField('user_id', types.LongType()),
types.StructField('application', types.StringType()),
types.StructField('source', types.StringType())
])
tweets_df = ss.read.csv('tweets2.csv',
escape='"',
header='true',
schema=tweets_schema,
mode='DROPMALFORMED')
tweets_df = tweets_df.select(['timestamp', 'lon', 'lat', 'tweet'])
date_column = (tweets_df['timestamp'].cast(types.TimestampType())
.cast(types.DateType()))
tweets_df = (tweets_df.withColumn('date', date_column)
.drop('timestamp'))
date_to_column = functions.lit(PREDICTION_DATE)
date_from_column = functions.lit(HISTORICAL_CUTOFF_DATE)
tweets_df = tweets_df.filter(
~(tweets_df['date'] < date_from_column)
& (tweets_df['date'] < date_to_column))
sql_tokenize = functions.udf(
lambda tweet: twokenize.tokenize(tweet),
returnType=types.ArrayType(types.StringType()))
tweets_df = (tweets_df
.withColumn('tweet_tokens', sql_tokenize(tweets_df['tweet']))
.drop('tweet'))
# tweets_df now has Row(tweet_tokens, date, lon, lat)
# The only way to group elements and get a set of data (as far as I know) is by converting the
# DataFrame into an RDD. This is because I can't find the right operation on GroupedData in Pyspark.
row_to_gridsquare_tokens = lambda row: (
latlongrid.grid_square_index(lat=row['lat'], lon=row['lon']),
row['tweet_tokens'])
tokens_rdd = (tweets_df.rdd.map(row_to_gridsquare_tokens)
.reduceByKey(operator.concat))
tokens_df_schema = types.StructType([
types.StructField('grid_square', types.IntegerType()),
types.StructField('tokens', types.ArrayType(types.StringType()))
])
tokens_df = ss.createDataFrame(tokens_rdd, schema=tokens_df_schema)
hashing_tf = feature.HashingTF(
numFeatures=(2^18)-1, inputCol='tokens', outputCol='token_frequencies')
lda = (clustering.LDA()
.setFeaturesCol('token_frequencies')
.setK(10)
.setTopicDistributionCol('topic_distribution'))
topic_distribution_pipeline = ml.Pipeline(stages=[hashing_tf, lda])
lda_model = topic_distribution_pipeline.fit(tokens_df)
topic_distributions = lda_model.transform(tokens_df).select(['grid_square', 'topic_distribution'])
# --------------------------------------------------------------------------------------------------
# PART 2: Get complaint counts per (grid square, date).
# --------------------------------------------------------------------------------------------------
complaints_df_schema = types.StructType([
types.StructField('CMPLNT_NUM', types.IntegerType(),
nullable=False),
types.StructField('CMPLNT_FR_DT', types.StringType()),
types.StructField('CMPLNT_FR_TM', types.StringType()),
types.StructField('CMPLNT_TO_DT', types.StringType()),
types.StructField('CMPLNT_TO_TM', types.StringType()),
types.StructField('RPT_DT', types.StringType(), nullable=False),
types.StructField('KY_CD', types.StringType()),
types.StructField('OFNS_DESC', types.StringType()),
types.StructField('PD_CD', types.IntegerType()),
types.StructField('PD_DESC', types.StringType()),
types.StructField('CRM_ATPT_CPTD_CD', types.StringType()),
types.StructField('LAW_CAT_CD', types.StringType()),
types.StructField('JURIS_DESC', types.StringType()),
types.StructField('BORO_NM', types.StringType()),
types.StructField('ADDR_PCT_CD', types.StringType()),
types.StructField('LOC_OF_OCCUR_DESC', types.StringType()),
types.StructField('PREM_TYP_DESC', types.StringType()),
types.StructField('PARKS_NM', types.StringType()),
types.StructField('HADEVELOPT', types.StringType()),
types.StructField('X_COORD_CD', types.FloatType()),
types.StructField('Y_COORD_CD', types.FloatType()),
types.StructField('Latitude', types.FloatType()),
types.StructField('Longitude', types.FloatType()),
types.StructField('Lat_Lon', types.StringType())])
complaints_df = ss.read.csv(
"crime_complaints_with_header.csv",
header=True,
schema=complaints_df_schema)
complaints_df = (complaints_df
.select(['CMPLNT_FR_DT', 'CMPLNT_TO_DT', 'Latitude', 'Longitude'])
.withColumnRenamed('CMPLNT_FR_DT', 'from_date_string')
.withColumnRenamed('CMPLNT_TO_DT', 'to_date_string')
.withColumnRenamed('Latitude', 'lat')
.withColumnRenamed('Longitude', 'lon'))
# Filter to find the complaints which have an exact date of occurrence
# or which have a start and end date.
complaints_df = complaints_df.filter(~complaints_df['from_date_string'].isNull())
# Now get the actual column dates.
def string_to_date(s):
if s == None:
return None
else:
return datetime.datetime.strptime(s, '%m/%d/%Y')
string_to_date_udf = functions.udf(string_to_date, types.DateType())
complaints_df = (complaints_df
.withColumn('from_date', string_to_date_udf(complaints_df['from_date_string']))
.withColumn('to_date', string_to_date_udf(complaints_df['to_date_string']))
.select(['from_date', 'to_date', 'lat', 'lon']))
# Now filter for complaints which occur on one day only.
complaints_df = (complaints_df
.filter(complaints_df['to_date'].isNull()
| (complaints_df['to_date'] == complaints_df['from_date']))
.withColumnRenamed('from_date', 'date'))
# Columns are now 'date', 'lat', and 'lon'.
# Compute grid square for each crime.
def grid_square_from_lat_lon(lat, lon):
return latlongrid.grid_square_index(lat=lat, lon=lon)
grid_square_from_lat_lon_udf = functions.udf(
grid_square_from_lat_lon, returnType=types.IntegerType())
complaints_df = (complaints_df
.withColumn('grid_square',
grid_square_from_lat_lon_udf(complaints_df['lat'], complaints_df['lon']))
.select('date', 'grid_square'))
# Now count by (GridSquare, Date).
complaint_counts_df = (complaints_df
.groupBy(complaints_df['grid_square'], complaints_df['date'])
.count()
.withColumnRenamed('count', 'complaint_count'))
complaint_counts_df = (complaint_counts_df
.withColumn(
'complaint_count',
complaint_counts_df['complaint_count'].cast(types.DoubleType())))
count_binarizer = feature.Binarizer(
threshold=0, inputCol='complaint_count', outputCol='binary_complaint_count')
complaint_counts_df = count_binarizer.transform(complaint_counts_df)
complaint_counts_df = (complaint_counts_df
.drop('complaint_counts')
.withColumnRenamed('binary_complaint_counts', 'complaint_counts'))
# Columns are now 'date', 'grid_square', 'complaint_count', 'binary_complaint_count'.
# Filter for complaints occurring within the date range..
past_complaint_counts_df = complaint_counts_df.filter(
(complaint_counts_df['date'] < date_to_column)
& (complaint_counts_df['date'] >= date_from_column))
current_complaint_counts_df = complaint_counts_df.filter(
complaint_counts_df['date'] == date_to_column)
# --------------------------------------------------------------------------------------------------
# PART 3: Defining the data matrix.
# --------------------------------------------------------------------------------------------------
# Complaint count dataframes only have entries for nonzero counts. Fill in the nonzero entries.
all_dates_squares_df = ss.createDataFrame(
[(gridSquare, PREDICTION_DATE - datetime.timedelta(days=i))
for gridSquare in range(-1, latlongrid.grid_size)
for i in range(1, 1 + NUM_DAYS)],
schema=types.StructType([
types.StructField('grid_square', types.IntegerType()),
types.StructField('date', types.DateType())]))
all_squares_df = ss.createDataFrame(
[(gridSquare,) for gridSquare in range(-1, latlongrid.grid_size)],
schema=types.StructType([
types.StructField('grid_square', types.IntegerType())]))
past_complaint_counts_df = past_complaint_counts_df.join(
all_dates_squares_df,
on=['grid_square', 'date'],
how='right_outer')
past_complaint_counts_df = past_complaint_counts_df.fillna({'complaint_count': 0})
current_complaint_counts_df = current_complaint_counts_df.join(
all_squares_df,
on='grid_square',
how='right_outer')
current_complaint_counts_df = (current_complaint_counts_df
.fillna({'complaint_count': 0})
.withColumn('date',
functions.when(current_complaint_counts_df['date'].isNull(), PREDICTION_DATE)
.otherwise(current_complaint_counts_df['date'])))
# Do a left outer join on topic_distributions and past_complaint_counts_df to get our data matrix.
data_matrix = topic_distributions.join(
past_complaint_counts_df,
on='grid_square',
how='inner')
# So far, data_matrix contains Row(date, grid_square, topic_distributions, complaint_count).
# Get weekday from date.
get_weekday_udf = functions.udf(lambda d: d.weekday(), returnType=types.IntegerType())
data_matrix = data_matrix.withColumn('weekday', get_weekday_udf(data_matrix['date']))
# Assemble the feature vectors.
weekday_one_hot_encoder = feature.OneHotEncoder(inputCol='weekday', outputCol='weekday_vector')
feature_vector_assembler = feature.VectorAssembler(
inputCols=['weekday_vector', 'topic_distribution'], outputCol='final_feature_vector')
feature_assembly_pipeline = (
ml.Pipeline(stages=[weekday_one_hot_encoder, feature_vector_assembler]).fit(data_matrix))
data_matrix = (feature_assembly_pipeline.transform(data_matrix)
.select('date', 'grid_square', 'final_feature_vector', 'complaint_count'))
LOGGER.debug(str(data_matrix.count()) + " rows like " + str(data_matrix.take(1)))
#logistic_regression = classification.LogisticRegression(
# maxIter=10, regParam=0.3, elasticNetParam=0.8,
# featuresCol='final_feature_vector', labelCol='complaint_count',
# probabilityCol='predicted_probability')
#logistic_model = logistic_regression.fit(data_matrix)
#LOGGER.info(
# "coefficients: " + str(logistic_model.coefficientMatrix) + ", intercept: " + str(logistic_model.interceptVector))
prediction_data_matrix = topic_distributions.join(
current_complaint_counts_df,
on='grid_square',
how='inner')
prediction_data_matrix = (prediction_data_matrix
.withColumn('weekday', get_weekday_udf(prediction_data_matrix['date']))
.select('weekday', 'grid_square', 'date', 'topic_distribution', 'complaint_count'))
prediction_data_matrix = (feature_assembly_pipeline.transform(prediction_data_matrix)
.select('grid_square', 'date', 'final_feature_vector', 'complaint_count'))
LOGGER.debug(str(prediction_data_matrix.count()) + " rows like "
+ str(prediction_data_matrix.take(1)))
exit(0)
#predicted_complaint_counts = (logistic_model.transform(prediction_data_matrix)
# .select('grid_square', 'complaint_count', 'predicted_probability')
# .collect())
#
#LOGGER.debug(str(predicted_complaint_counts.count()) + " rows like "
# + str(predicted_complaint_counts.take(1)) + ".")
#exit(0)
| gpl-3.0 | 3,743,084,505,325,110,000 | 38.598291 | 118 | 0.66163 | false |
RuthAngus/LSST-max | code/GP_periodogram.py | 1 | 1066 | from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from GProtation import make_plot, lnprob, neglnlike
import emcee
import time
import george
from george.kernels import ExpSquaredKernel, ExpSine2Kernel
import scipy.optimize as spo
def GP_periodogram(x, y, yerr, p_init, plims, N):
"""
This function takes a light curves and attempts to produce a GP periodogram.
It returns the value of the highest peak.
The kernel hyperparameters are optimised over a grid of periods.
This is also a "profile likelihood".
x, y, yerr: the light curve.
p_init: the initial guess for the period.
plims: the (log) boundaries for the grid.
N: the number of grid points.
"""
# create the grid
periods = np.linspace(np.exp(plims[0], np.exp(plims[1], 10)
# initial hyperparameters
if __name__ == "__main__":
# fake data
x = np.arange(0, 10, 100)
p = 2
err = .1
y = np.sin(2*np.pi*(1./p)*x) + np.random.randn(100)*err
yerr = np.ones_like(y) * err
p_init, plims = 2, np.log(.1, 5)
GP_periodogram(x, y, yerr, p_init, plims, 10)
| mit | -6,016,110,204,443,567,000 | 27.052632 | 77 | 0.707317 | false |
cffex/cmdb | test/project/webci_attr_type.py | 1 | 10754 | #!/usr/bin/python
#coding: utf-8
#print "Content-type: text/html\n"
#Author: LIPH
import web
import demjson
import json
import string
import time
import urlparse
import webci_attr
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
render = web.template.render('templates/')
urls = ( '/ciattrtype/(.*)', 'ATTRTYPE',
'/ciattrtype', 'attrtype', )
db = web.database(dbn='oracle', user='cmdb', pw='cmdb123', db='cffexcmdb')
#Define time stamp 9999/12/31 23:59:59
ENDTIME = str(int('99991231235959'))
DELETETIME = str('00000000000000')
def fn_create_ci_attrtype(i_name,i_description,i_type_fid,i_mandatory,i_owner,i_family_id,i_change_log,i_displayname, i_value_type, i_endtime = ENDTIME):
#Function:create ci attribute type
v_cat = 'PCAT00000001'
v_fat = 'FCAT00000001'
ct_id = db.query('select max(id) cid, max(family_id) fid from t_ci_attribute_type ')
#Although there will be only one record, it also needs to iteratively generate the dict. It will raise an error if directly transforming ci_id[0] to json format
ci_as_dict = []
for ci in ct_id:
ci_as_dict.append(ci)
v_json = json.dumps(ci_as_dict).decode("GB2312")
v_djson = json.loads(v_json,encoding="gb2312")
v_num = len(v_djson)
#Take the default value when inserting the first record
if v_num <> 0 :
v_cat = v_djson[0]['CID']
v_fat = v_djson[0]['FID']
v_cat = 'PCAT' + str(string.atoi(v_cat[4:])+1).rjust(8,'0')
print v_cat
if i_family_id == None :
v_fat = 'FCAT' + str(string.atoi(v_fat[4:])+1).rjust(8,'0')
else:
v_fat = i_family_id
print v_fat
v_curtime = time.strftime("%Y%m%d%H%M%S", time.localtime())
n = db.insert('t_ci_attribute_type',id = v_cat,name = i_name, description = i_description, ci_type_fid = i_type_fid, mandatory= i_mandatory,
owner = i_owner, starttime = v_curtime, endtime = i_endtime, family_id = v_fat, change_log = i_change_log,displayname = i_displayname, value_type = i_value_type)
return v_fat
def fn_delete_ci_attrtype(i_family_id,i_curtime,i_change_log):
#Function: Delete ci attribute type
v_ca_fids = db.query('select distinct a.family_id from t_ci_attribute a where a.type_fid = $fid and a.endtime = $endtime',vars={'fid':i_family_id,'endtime':ENDTIME})
json_en = demjson.encode(v_ca_fids)
json_de = demjson.decode(json_en)
v_ca_fid_num = len(json_de)
if v_ca_fid_num <> 0:
for v_ca_fid in json_de:
n = webci_attr.fn_delete_ciattr(v_ca_fid['FAMILY_ID'], i_curtime, i_change_log)
v_ct_fids = db.query("select a.name ,convert(a.description,'utf8') description,a.ci_type_fid,a.mandatory, a.owner,a.family_id,convert(a.displayname,'utf8') displayname, a.value_type from t_ci_attribute_type a where a.endtime = $aendtime and a.family_id = $fid ",vars={'aendtime':ENDTIME,'fid':i_family_id})
ci_as_dict = []
for ci in v_ct_fids:
ci_as_dict.append(ci)
ci_type_json = json.dumps(ci_as_dict, indent = 4,ensure_ascii=False, separators = (',',':')).decode("GB2312")
ci_type_djson = json.loads(ci_type_json,encoding="gb2312")
#delete the record
n = db.update('t_ci_attribute_type', where='family_id = $fid and endtime = $endtime', vars={'fid':i_family_id,'endtime':ENDTIME}, endtime=i_curtime)
#insert a new record and set the endtime=deletetime
v_fid = fn_create_ci_attrtype(ci_type_djson[0]['NAME'], ci_type_djson[0]['DESCRIPTION'], ci_type_djson[0]['CI_TYPE_FID'],ci_type_djson[0]['MANDATORY'],ci_type_djson[0]['OWNER'],
ci_type_djson[0]['FAMILY_ID'], i_change_log,ci_type_djson[0]['DISPLAYNAME'], ci_type_djson[0]['VALUE_TYPE'], DELETETIME)
return n
class ATTRTYPE:
def GET(self,fid):
ci_attrtype = db.query("select b.name citype_name, a.name ,convert(a.description,'utf8') description,a.ci_type_fid,a.mandatory, a.owner,a.family_id,convert(a.displayname,'utf8') displayname,a.value_type,a.change_log from t_ci_attribute_type a ,t_ci_type b where a.ci_type_fid = b.family_id and a.endtime = $endtime and b.endtime = $endtime and a.family_id = $fid ",vars={'endtime':ENDTIME,'fid':fid})
ci_as_dict = []
for ci in ci_attrtype:
ci_as_dict.append(ci)
ci_type_json = json.dumps(ci_as_dict, indent = 4,ensure_ascii=False, separators = (',',':')).decode("GB2312")
print ci_type_json
return ci_type_json
class attrtype:
def GET(self):
all_col = ('name','description','ci_type_fid','mandatory','owner','family_id','time','change_log','citype_name','value_type')
citype_input = web.input()
condition = " "
for col in range(len(all_col)):
col_name = all_col[col]
value = citype_input.get(col_name,None)
if value <> None:
if col_name == 'time' :
condition = condition + "cat.starttime <= '" + value + "' and cat.endtime > '" + value + "' and b.starttime <= '" + value + "' and b.endtime > '" + value + "' and "
elif col_name == 'citype_name':
condition = condition + "b.name = '" + value + "' and "
else :
condition = condition + "cat." + col_name + " = '" + value + "' and "
if value == None and col_name == 'time':
condition = condition + "cat.endtime = '" + ENDTIME + "' and b.endtime = '" + ENDTIME + "' and "
print condition
v_sql = "select b.name citype_name, cat.name ,convert(cat.description,'utf8') description,cat.ci_type_fid, cat.mandatory, cat.owner,cat.family_id,convert(cat.displayname,'utf8') displayname, cat.value_type, cat.change_log from t_ci_attribute_type cat, t_ci_type b where " + condition + " cat.ci_type_fid = b.family_id "
ci_type = db.query(v_sql)
ci_as_dict = []
for ci in ci_type:
ci_as_dict.append(ci)
ci_type_json = json.dumps(ci_as_dict, indent = 4,ensure_ascii=False, separators = (',',':')).decode("GB2312")
print ci_type_json
# import sys,httplib, urllib
# params = urllib.urlencode({'fid':'FCAT00000027','change_log':'test'})
# headers = {'Content-type': 'application/x-www-form-urlencoded', 'Accept': 'text/plain'}
# con2 = httplib.HTTPConnection("localhost:8080")
# con2.request("DELETE","/ciattrtype",params,headers)
# con2.close()
return ci_type_json
def POST(self):
citype_input = web.input()
#Besides some fields in t_ci_attribute_type, input parameters also include the "name" field in t_ci_type
v_ct_fids = db.query('SELECT distinct ct.family_id FROM t_ci_type ct WHERE ct.endtime = $endtime and ct.family_id = $ctfid',vars={'endtime':ENDTIME,'ctfid':citype_input.get('ci_type_fid',None)})
json_en = demjson.encode(v_ct_fids)
json_de = demjson.decode(json_en)
v_ct_fid_num = len(json_de)
if v_ct_fid_num == 0:
return 2 #there is no relative family_id in table T_CI_TYPE
elif v_ct_fid_num > 1:
return 3 #there are more than one relative family_ids in table T_CI_TYPE
v_ct_fid = json_de[0]['FAMILY_ID']
print v_ct_fid
#Users don't need to input the family_id . The afferent parameter for the function is null
v_fid = fn_create_ci_attrtype(citype_input.get('name',None), citype_input.get('description',None), v_ct_fid,citype_input.get('mandatory',None),
citype_input.get('owner',None), None, 'initialization', citype_input.get('displayname',None),citype_input.get('value_type',None))
return v_fid
def DELETE(self):
input_data = web.data()
data = urlparse.parse_qs(input_data)
v_ct_fids = db.query("SELECT distinct c.name FROM t_ci_attribute_type c WHERE c.family_id = $fid and c.endtime = $endtime",vars={'fid':data['fid'][0],'endtime':ENDTIME})
json_en = demjson.encode(v_ct_fids)
json_de = demjson.decode(json_en)
v_ct_fid_num = len(json_de)
if v_ct_fid_num == 0:
return 2 #There are no records to delete in table t_ci_attribute_type
elif v_ct_fid_num > 1:
return 3 #There are more than one records to delete in table t_ci_attribute_type
v_curtime = time.strftime("%Y%m%d%H%M%S", time.localtime())
#When deleting t_ci_attribute_type, we should delete all the relative attribute
n = fn_delete_ci_attrtype(data['fid'][0],v_curtime,data['change_log'][0])
return n
def PUT(self):
citype_input = web.input()
v_ct_fids = db.query("select a.name ,convert(a.description,'utf8') description,a.ci_type_fid,a.mandatory, a.owner,a.family_id,convert(a.displayname,'utf8') displayname,a.value_type, a.change_log from t_ci_attribute_type a where a.endtime = $aendtime and a.family_id = $fid ",vars={'aendtime':ENDTIME,'fid':citype_input.get('fid',None)})
ci_as_dict = []
for ci in v_ct_fids:
ci_as_dict.append(ci)
ci_type_json = json.dumps(ci_as_dict, indent = 4,ensure_ascii=False, separators = (',',':')).decode("GB2312")
ci_type_djson = json.loads(ci_type_json,encoding="gb2312")
v_ct_fid_num = len(ci_type_djson)
if v_ct_fid_num == 0:
return 2 #There are no records to modify in table t_ci_attribute_type
elif v_ct_fid_num > 1:
return 3 #There are more than one records to modify in table t_ci_attribute_type
v_curtime = time.strftime("%Y%m%d%H%M%S", time.localtime())
n = db.update('t_ci_attribute_type', where='family_id = $fid and endtime = $endtime', vars={'fid':citype_input.get('fid'),'endtime':ENDTIME}, endtime=v_curtime)
v_fid = fn_create_ci_attrtype(citype_input.get('name',ci_type_djson[0]['NAME']), citype_input.get('description',ci_type_djson[0]['DESCRIPTION']),
ci_type_djson[0]['CI_TYPE_FID'],citype_input.get('mandatory',ci_type_djson[0]['MANDATORY']),citype_input.get('owner',ci_type_djson[0]['OWNER']),
ci_type_djson[0]['FAMILY_ID'], citype_input.get('change_log',ci_type_djson[0]['CHANGE_LOG']),citype_input.get('displayname',ci_type_djson[0]['DISPLAYNAME']),citype_input.get('value_type',ci_type_djson[0]['VALUE_TYPE']))
return v_fid
if __name__ == "__main__":
app = web.application(urls, globals())
app.run()
| apache-2.0 | 268,012,882,790,708,770 | 56.118919 | 407 | 0.603981 | false |
egemsoft/esef-yawd-translation | translations/utils.py | 1 | 5003 | import os
import sys
from django.conf import settings
from django.utils.encoding import smart_str
from django.utils.translation import check_for_language
from django.utils.translation.trans_real import get_language_from_path
_default = None
_supported = []
def get_default_language():
"""
Detects the default language from the database.
If no default language is present, the default
settings.LANGUAGE_CODE is used.
This will reload its values in the context of a new thread.
"""
global _default
if _default is None:
try:
from models import Language
_default = smart_str(Language.objects.get(default=True).name)
except:
_default = settings.LANGUAGE_CODE
return _default
def get_supported_languages():
"""
Retrieve the supported languages.
"""
global _supported
if not _supported:
from models import Language
_supported = [smart_str(l) for l in Language.objects.values_list('name', flat=True)]
# if no languages are set use the default language
if not _supported:
_supported = [settings.LANGUAGE_CODE]
return _supported
def get_language_from_request(request, check_path=False):
"""
This method is used as a replacement to the original django language
detection algorithm. It takes the db default language into
consideration and does not deal with the Accept-Language header.
Analyzes the request to find what language the user wants the system to
show. Only languages listed in settings.LANGUAGES are taken into account.
If the user requests a sublanguage where we have a main language, we send
out the main language.
If check_path is True, the URL path prefix will be checked for a language
code, otherwise this is skipped for backwards compatibility.
"""
# retrieve list of supported languages
supported = get_supported_languages()
if check_path:
lang_code = get_language_from_path(request.path_info, [settings.LANGUAGE_CODE].append(supported))
if lang_code is not None:
return lang_code
if hasattr(request, 'session'):
lang_code = request.session.get('django_language', None)
if lang_code in supported and lang_code is not None and check_for_language(lang_code):
return lang_code
lang_code = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME)
if lang_code and lang_code not in supported:
lang_code = lang_code.split('-')[0] # e.g. if fr-ca is not supported fallback to fr
if lang_code and lang_code in supported and check_for_language(lang_code):
return lang_code
# original Django middleware used to look for the Accept-Language
# HTTP header and extract the language. This is replaced in our
# mechanism
return get_default_language()
def compile_message_file(fn):
"""
Accepts a .po file path as argument and generates an appropriate .mo file.
This copies the needed functionality from the original compilemessages command
"""
pf = os.path.splitext(fn)[0]
# Store the names of the .mo and .po files in an environment
# variable, rather than doing a string replacement into the
# command, so that we can take advantage of shell quoting, to
# quote any malicious characters/escaping.
# See http://cyberelk.net/tim/articles/cmdline/ar01s02.html
os.environ['djangocompilemo'] = pf + '.mo'
os.environ['djangocompilepo'] = pf + '.po'
if sys.platform == 'win32': # Different shell-variable syntax
cmd = 'msgfmt --check-format -o "%djangocompilemo%" "%djangocompilepo%"'
else:
cmd = 'msgfmt --check-format -o "$djangocompilemo" "$djangocompilepo"'
os.system(cmd)
os.chmod(pf + '.mo', 0664)
def concat_message_files(files, fn):
"""
Accepts a list of po files and a target file and uses the
msgcat command to concat the files.
"""
files_str = ' '.join(files)
os.environ['djangosourcepo'] = files_str
os.environ['djangotargetpo'] = fn
if sys.platform == 'win32': # Different shell-variable syntax
cmd = 'msgcat --use-first -o "%djangotargetpo%" %djangosourcepo%'
else:
cmd = 'msgcat --use-first -o "$djangotargetpo" $djangosourcepo'
os.system(cmd)
os.chmod(fn, 0664)
def reset_translations(lang):
"""
Empty django's internal translations dictionary when a message translation
changes or the translations list is regenerated.
"""
from django.utils import translation
from django.utils.translation import trans_real
import gettext
if lang in trans_real._translations:
del trans_real._translations[lang]
gettext._translations = {}
if settings.LANGUAGE_CODE == lang:
trans_real._default = None
# force current thread translations reload
current_lang = translation.get_language()
if current_lang == lang:
translation.activate(current_lang)
| bsd-3-clause | -1,003,490,242,642,508,300 | 31.914474 | 105 | 0.681991 | false |
DBeath/flask-feedrsub | feedrsub/feeds/feedfinder/feedfinder4.py | 1 | 7270 | import logging
import time
from typing import Tuple
from urllib.parse import urlsplit, urljoin
from bs4 import BeautifulSoup
from feedrsub.feeds.feedfinder.feedinfo import FeedInfo
from feedrsub.utils.requests_session import RequestsSession, requests_session
logger = logging.getLogger("feedfinder4")
def coerce_url(url: str) -> str:
url = url.strip()
if url.startswith("feed://"):
return "http://{0}".format(url[7:])
for proto in ["http://", "https://"]:
if url.startswith(proto):
return url
return "https://{0}".format(url)
def get_site_root(url: str) -> str:
"""
Find the root domain of a url
"""
url = coerce_url(url)
parsed = urlsplit(url)
print(parsed)
return parsed.netloc
class FeedFinder:
def __init__(self, session, get_feed_info=False, timeout=(3.05, 10)):
self.session = session
self.get_feed_info = get_feed_info
self.timeout = timeout
def get_url(self, url: str):
try:
r = self.session.get(url, timeout=self.timeout)
except Exception as e:
logger.warning(u"Error while getting URL: {0}, {1}".format(url, str(e)))
return None
return r
@staticmethod
def is_feed_data(text: str) -> bool:
data = text.lower()
if data.count("<html"):
return False
return bool(data.count("<rss") + data.count("<rdf") + data.count("<feed"))
def is_feed(self, url: str) -> str:
response = self.get_url(url)
if not response or not response.text or not self.is_feed_data(response.text):
return ""
return response.text
@staticmethod
def is_feed_url(url: str) -> bool:
return any(map(url.lower().endswith, [".rss", ".rdf", ".xml", ".atom"]))
@staticmethod
def is_feedlike_url(url: str) -> bool:
return any(map(url.lower().count, ["rss", "rdf", "xml", "atom", "feed"]))
def check_urls(self, urls: list) -> list:
feeds = []
for url in urls:
url_text = self.is_feed(url)
if url_text:
feed = self.create_feed_info(url, url_text)
feeds.append(feed)
return feeds
def create_feed_info(self, url: str, text: str) -> FeedInfo:
info = FeedInfo(url)
if self.get_feed_info:
logger.info(u"Getting FeedInfo for {0}".format(url))
info.get_info(text=text, soup=self.soup, finder=self)
return info
@property
def soup(self) -> BeautifulSoup:
return self.parsed_soup
def create_soup(self, text: str) -> None:
self.parsed_soup = BeautifulSoup(text, "html.parser")
def search_links(self, url: str) -> list:
links = []
for link in self.soup.find_all("link"):
if link.get("type") in [
"application/rss+xml",
"text/xml",
"application/atom+xml",
"application/x.atom+xml",
"application/x-atom+xml",
]:
links.append(urljoin(url, link.get("href", "")))
return self.check_urls(links)
def search_a_tags(self, url: str) -> Tuple[list, list]:
logger.info("Looking for <a> tags.")
local, remote = [], []
for a in self.soup.find_all("a"):
href = a.get("href", None)
if href is None:
continue
if "://" not in href and self.is_feed_url(href):
local.append(href)
if self.is_feedlike_url(href):
remote.append(href)
return local, remote
@requests_session()
def find_feeds(
url: str,
check_all: bool = False,
get_feed_info: bool = False,
timeout: tuple = (3.05, 10),
**kwargs
) -> list:
finder = FeedFinder(
kwargs.get("session"), get_feed_info=get_feed_info, timeout=timeout
)
# Format the URL properly.
url = coerce_url(url)
feeds = []
start_time = time.perf_counter()
# Download the requested URL
logger.info("Finding feeds at URL: {0}".format(url))
response = finder.get_url(url)
search_time = int((time.perf_counter() - start_time) * 1000)
logger.debug("Searched url in {0}ms".format(search_time))
if not response or not response.text:
return []
text = response.text
# Parse text with BeautifulSoup
finder.create_soup(text)
# Check if it is already a feed.
if finder.is_feed_data(text):
found = finder.create_feed_info(url, text)
feeds.append(found)
return feeds
# Search for <link> tags
logger.info("Looking for <link> tags.")
found_links = finder.search_links(url)
feeds.extend(found_links)
logger.info("Found {0} feed <link> tags.".format(len(found_links)))
search_time = int((time.perf_counter() - start_time) * 1000)
logger.debug("Searched <link> tags in {0}ms".format(search_time))
if len(feeds) and not check_all:
return sort_urls(feeds, url)
# Look for <a> tags.
logger.info("Looking for <a> tags.")
local, remote = finder.search_a_tags(url)
# Check the local URLs.
local = [urljoin(url, l) for l in local]
found_local = finder.check_urls(local)
feeds.extend(found_local)
logger.info("Found {0} local <a> links to feeds.".format(len(found_local)))
# Check the remote URLs.
remote = [urljoin(url, l) for l in remote]
found_remote = finder.check_urls(remote)
feeds.extend(found_remote)
logger.info("Found {0} remote <a> links to feeds.".format(len(found_remote)))
search_time = int((time.perf_counter() - start_time) * 1000)
logger.debug("Searched <a> links in {0}ms".format(search_time))
if len(feeds) and not check_all:
return sort_urls(feeds, url)
# Guessing potential URLs.
fns = ["atom.xml", "index.atom", "index.rdf", "rss.xml", "index.xml", "index.rss"]
urls = list(urljoin(url, f) for f in fns)
found_guessed = finder.check_urls(urls)
feeds.extend(found_guessed)
logger.info("Found {0} guessed links to feeds.".format(len(found_guessed)))
search_time = int((time.perf_counter() - start_time) * 1000)
logger.debug("Searched guessed urls in {0}ms".format(search_time))
return sort_urls(feeds, url)
def url_feed_prob(url: str, original_url: str = None) -> int:
score = 0
if original_url:
url_domain = get_site_root(url)
original_domain = get_site_root(original_url)
if url_domain not in original_domain:
score -= 17
if "comments" in url:
score -= 15
if "georss" in url:
score -= 9
if "alt" in url:
score -= 7
kw = ["rss", "atom", ".xml", "feed", "rdf"]
for p, t in zip(range(len(kw) * 2, 0, -2), kw):
if t in url:
score += p
if url.startswith("https"):
score += 9
print("Url: {0}, Score: {1}".format(url, score))
return score
def sort_urls(feeds, original_url=None):
print("Sorting feeds: {0}".format(feeds))
sorted_urls = sorted(
list(set(feeds)), key=lambda x: url_feed_prob(x.url, original_url), reverse=True
)
logger.info(u"Returning sorted URLs: {0}".format(sorted_urls))
return sorted_urls
| mit | -221,718,999,646,376,000 | 28.552846 | 88 | 0.587895 | false |
Badg/hwiopy | hwiopy/platforms/beagle.py | 1 | 6484 | ''' Beaglebone/Beagleboard/Etc hardware-specific operations.
LICENSING
-------------------------------------------------
hwiopy: A common API for hardware input/output access.
Copyright (C) 2014-2015 Nicholas Badger
[email protected]
nickbadger.com
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
USA
------------------------------------------------------
Something something sooooomething goes here.
'''
# Global dependencies
import io
import struct
import mmap
import json
from warnings import warn
from pkg_resources import resource_string
# from os import listdir
# from os.path import isfile, join, split
# Intrapackage dependencies
from . import __path__
from .. import core
from .. import systems
# from . import generic
# from .generic import device
class _header_map():
''' Callable class that resolves the header pins into their connections,
as well as providing several utility functions to describe the device.
_header_map():
======================================================
Returns the header connection, be it a hardwired one (ex 5VDC) or a SoC
terminal.
*args
------------------------------------------------------
pin_num: str 'pin number'
return
-------------------------------------------------------
str 'SoC terminal or other'
_header_map.list_system_headers():
========================================================
Returns all of the header pins that connect to the sitara SoC.
return
--------------------------------------------------------
dict {'pin num':
_memory_map.list_all_headers():
=========================================================
*args
---------------------------------------------------------
register: str 'name of register'
return
-------------------------------------------------------
str 'description of register'
'''
def __init__(self):
# Load the corresponding json file and create a map dict
self._sys_map = json.loads(
resource_string('hwiopy', 'maps/bbb_sysmap.json').\
decode('utf-8'))
self._header_pins = tuple(self._sys_map.keys())
# Predeclare
self._hardwired = {}
self._connected = {}
self._all_headers = {}
# Separate any hardwired (5VDC, GND, etc) pins from SoC connections
# Need way to collapse dict list into single item for _all_headers
for pin_num, pin_dict in self._sys_map.items():
if pin_dict['connections']:
self._hardwired[pin_num] = pin_dict['connections']
self._all_headers[pin_num] = pin_dict['connections']
elif pin_dict['terminals']:
self._connected[pin_num] = pin_dict['terminals']
self._all_headers[pin_num] = pin_dict['terminals']
def __call__(self, pin_num, pin_941=None, pin_942=None, *args, **kwargs):
# Grab the start and convert it to int (aka long)
# NOTE THAT HERE IS THE PLACE TO DEAL WITH THE TWO HEADER PINS THAT
# ARE CONNECTED TO TWO SOC PINS!! (pin 9_41 and pin 9_42)
# Don't necessarily want to error trap out declaring pin_941 and/or
# pin_942 with each other, or with a different pin number
which_connection = 0
if pin_num == '9_41':
if pin_941:
which_connection = pin_941
else:
warn(RuntimeWarning('Lookup on pin 9_41 without specifying '
'which mode to connect to. Defaulting to Sitara pin D14. '
'Consult the BBB system reference manual for details.'))
if pin_num == '9_42':
if pin_942:
which_connection = pin_942
else:
warn(RuntimeWarning('Lookup on pin 9_42 without specifying '
'which mode to connect to. Defaulting to Sitara pin C18. '
'Consult the BBB system reference manual for details.'))
# Now use whatever information we have to output the connection
return self._all_headers[pin_num][which_connection]
# Returns all header pins that are configurable
def list_system_headers(self):
return self._connected
# Very simply return a description of the queried register
def list_all_headers(self):
return self._all_headers
class BBB(core.Device):
''' A beaglebone black. Must have kernel version >=3.8, use overlays, etc.
'''
# Where is the memory mapping stored to?
# mem_reg_loc = '/dev/mem'
# What pins correspond to what possible mappings?
def __init__(self, mem_filename='/dev/mem'):
''' Creates the device and begins setting it up.
'''
# Call super, initializing all of the abstract base class attributes
super().__init__(systems.Sitara335(mem_filename), _header_map())
def create_pin(self, pin_num, mode, name=None):
''' Gets a pin object from the self.chipset object and connects it to
a pin on the self.pinout dict.
which_terminal is redundant with mode?
'''
# NOTE THAT DUE TO THE ODDITY OF THE BBB, pins 9_41 and 9_42 need to
# be specially configured, as they each connect to two SoC terminals.
super().create_pin(pin_num, mode, name)
# pin = self.pinout[pin_num]
# return pin
return self.pinout[pin_num]
def validate(self):
''' Checks the device setup for conflicting pins, etc.
Actually this is probably unnecessary (?), as individual pin
assignments should error out with conflicting setups.
'''
pass | lgpl-2.1 | -8,934,214,126,180,577,000 | 34.244565 | 78 | 0.571869 | false |
TalwalkarLab/paleo | paleo/layers/conv.py | 1 | 11804 | """The module estimates 2D convolution layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import six
from paleo.layers import base
class Deconv2D(base.BaseLayer):
"""Deconv2D"""
def __init__(self,
name,
inputs,
filters,
strides,
padding,
output_shape,
use_cudnn=False,
backprop=True,
activation_fn='relu',
percent_holes=0.0):
super(Deconv2D, self).__init__(name, 'deconv2d')
self._inputs = inputs
self._filters = filters
self._strides = strides
self._padding = padding
self._backprop = backprop
# The deconv2d is implemented with conv2d.
self._transposed = Conv2d(
name + '_reverse',
output_shape,
filters,
strides,
padding,
use_cudnn=use_cudnn,
backprop=backprop,
activation_fn=activation_fn)
self._pad_h = self._transposed._pad_h
self._pad_w = self._transposed._pad_w
self._equivalent_conv = self._transposed.gradients(wrt='inputs')
self._outputs = output_shape
assert self._equivalent_conv.outputs == output_shape, (
'Output {} does not match the desired shape {}'.format(
str(self._equivalent_conv.outputs), str(output_shape)))
# Verify the backprop will get the correct gradient shapes.
self._back_filters = self._equivalent_conv.gradients(wrt='filters')
self._back_filters._percent_holes = (
self._equivalent_conv._percent_holes)
self._back_filters._hole_position = 'filters'
assert self._back_filters.outputs[1:3] == filters[:2], (
'Back filters {} does not match the desired shape {}'.format(
str(self._back_filters.outputs[1:3]), str(filters[:2])))
# Back wrt to input is a regular conv2d op.
self._back_inputs = self._transposed
assert self._back_inputs.outputs == inputs, (
'Back inputs {} does not match the desired shape {}'.format(
str(self._back_inputs.outputs), str(inputs)))
def gradients(self, wrt='inputs'):
"""Returns a conv layer that is equivalent to calculating the gradient
on this layer.
Args:
wrt: inputs or filters
"""
if wrt == 'inputs':
return self._back_inputs
elif wrt == 'filters':
return self._back_filters
def additional_summary(self):
return "Filters: {} Params: {:,}".format(self._filters,
self.num_params)
@property
def filters(self):
return self._filters
@property
def strides(self):
return self._strides
@property
def padding(self):
return self._padding
@property
def backprop(self):
return self._backprop
@property
def weights_in_bytes(self):
"""Returns weights."""
_BYTES_FLOAT = 4
kernel_h, kernel_w, in_channel, out_channel = self._filters
filters_in_bytes = (kernel_h * kernel_w * in_channel * out_channel *
_BYTES_FLOAT)
bias_in_bytes = out_channel * _BYTES_FLOAT
return filters_in_bytes + bias_in_bytes
@property
def num_params(self):
weights = six.moves.reduce(lambda x, y: x * y, self._filters, 1)
bias = self._filters[-1]
return weights + bias
class Conv2d(base.BaseLayer):
"""Estimator for 2D Convolutional layers. """
def __init__(self,
name,
inputs,
filters,
strides,
padding,
use_cudnn=False,
backprop=True,
activation_fn='relu',
percent_holes=0.0,
hole_position='filters',
splits=None):
"""Initialize estimator. """
super(Conv2d, self).__init__(name, 'conv2d')
self._inputs = list(inputs)
self._filters = list(filters)
if self._filters[2] == -1:
self._filters[2] = self._inputs[3]
self._strides = list(strides)
self._padding = padding
if splits is not None:
self.split_model(splits)
self._outputs = self._calculate_output_shape()
self._use_cudnn = use_cudnn
self._backprop = backprop
self._activation_fn = activation_fn
# Percent of holes in astrous convolution.
self._percent_holes = percent_holes
self._hole_position = hole_position
@property
def percent_holes(self):
return self._percent_holes
@property
def percent_holes_in_inputs(self):
if self._hole_position == 'inputs':
return self.percent_holes
else:
return 0.0
@property
def percent_holes_in_filters(self):
if self._hole_position == 'filters':
return self.percent_holes
else:
return 0.0
@property
def activation_fn(self):
return self._activation_fn
@property
def bias(self):
return self._filters[-1]
@property
def filters(self):
return self._filters
@property
def backprop(self):
return self._backprop
@property
def strides(self):
return self._strides
@property
def padding(self):
return self._padding
def split_model(self, num_splits):
"""Split in model parallel fashion."""
self._filters[3] = self._filters[3] // num_splits
def additional_summary(self):
return ("""Filters: {} Pad: {} ({}, {}) """
"""Stride: {}, {} Params: {:,}""".format(
self._filters, self._padding, self._pad_h, self._pad_w,
self.strides[1], self.strides[2], self.num_params))
def _calculate_output_shape(self):
"""Returns the output tensor shape."""
n, h, w, c = self._inputs
kernel_h, kernel_w, in_channel, out_channel = self._filters
_, stride_h, stride_w, _ = self._strides
if self._padding == 'VALID':
out_height = int(
math.ceil(float(h - kernel_h + 1) / float(stride_h)))
out_width = int(
math.ceil(float(w - kernel_w + 1) / float(stride_w)))
self._pad_h = 0
self._pad_w = 0
elif self._padding == 'SAME':
out_height = int(math.ceil(float(h) / float(stride_h)))
out_width = int(math.ceil(float(w) / float(stride_w)))
pad_along_height = (out_height - 1) * stride_h + kernel_h - h
pad_along_width = (out_width - 1) * stride_w + kernel_w - w
self._pad_h = pad_along_height // 2
self._pad_w = pad_along_width // 2
elif isinstance(self._padding, list):
self._pad_h, self._pad_w = self._padding
out_height = (h + 2 * self._pad_h - kernel_h) // stride_h + 1
out_width = (w + 2 * self._pad_w - kernel_w) // stride_w + 1
assert in_channel == c, (
"Input channel shall match. Layer %s: %d != %d" %
(self.name, in_channel, c))
# out_h = (h + 2 * self._pad_h - kernel_h) // stride_h + 1
# out_w = (w + 2 * self._pad_w - kernel_w) // stride_w + 1
return [n, out_height, out_width, out_channel]
@property
def weights_in_bytes(self):
"""Returns weights."""
_BYTES_FLOAT = 4
kernel_h, kernel_w, in_channel, out_channel = self._filters
filters_in_bytes = (kernel_h * kernel_w * in_channel * out_channel *
_BYTES_FLOAT)
bias_in_bytes = out_channel * _BYTES_FLOAT
return filters_in_bytes + bias_in_bytes
@property
def num_params(self):
weights = six.moves.reduce(lambda x, y: x * y, self._filters, 1)
bias = self._filters[-1]
return weights + bias
def gradients(self, wrt='inputs'):
"""Returns a conv layer that is equivalent to calculating the gradient
on this layer.
Args:
wrt: inputs or filters
"""
layer = self
def _compute_padding(layer):
# Reference: TensorFlow ConvBackpropExtractAndVerifyDimension()
# Convolution of inputs with padded output grads and filters.
expanded_output_h = (layer.outputs[1] - 1) * layer.strides[1] + 1
expanded_output_w = (layer.outputs[2] - 1) * layer.strides[2] + 1
padded_out_h = layer.inputs[1] + layer.filters[0] - 1
padded_out_w = layer.inputs[2] + layer.filters[1] - 1
# Number of padding elements to be added before/after this
# dimension of input when computing Conv2DBackpropInput.
pad_before_h = layer.filters[0] - 1 - layer._pad_h
pad_before_w = layer.filters[1] - 1 - layer._pad_w
pad_after_h = padded_out_h - expanded_output_h - pad_before_h
pad_after_w = padded_out_w - expanded_output_w - pad_before_w
# Add one when padding is odd.
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/kernels/conv_grad_filter_ops.cc#L471
if abs(pad_before_h - pad_after_h) == 1:
expanded_output_h += 1
if abs(pad_before_w - pad_after_w) == 1:
expanded_output_w += 1
p_h = min(pad_before_h, pad_after_h)
p_w = min(pad_before_w, pad_after_w)
return (expanded_output_h, expanded_output_w, p_h, p_w)
expanded_output_h, expanded_output_w, pad_h, pad_w = _compute_padding(
layer)
holes = (expanded_output_h * expanded_output_w - self.outputs[1] *
self.outputs[2])
percent_holes = (holes / expanded_output_h / expanded_output_w)
# print('gradient wrt: {}'.format(wrt))
# print('expanded outputs: {} {}'.format(expanded_output_h,
# expanded_output_w))
# print('padding: {} {}'.format(pad_h, pad_h))
# print('holes: {} ({})'.format(holes, percent_holes))
if wrt == 'inputs':
dummy_layer = Conv2d(
name="dummy_layer",
inputs=[
layer.outputs[0], expanded_output_h, expanded_output_w,
layer.outputs[3]
],
filters=[
layer.filters[0], layer.filters[1], layer.filters[3],
layer.filters[2]
],
strides=[1, 1, 1, 1],
padding=[pad_h, pad_w],
percent_holes=percent_holes,
hole_position='inputs')
# FIXME: distinguish holes in input and filter
elif wrt == 'filters':
if layer.padding == 'VALID':
_p = "VALID"
else:
_p = [pad_h, pad_w]
# Convolution of inputs with inputs and output grads.
dummy_layer = Conv2d(
name="dummy_layer",
inputs=[
layer.inputs[3], layer.inputs[1], layer.inputs[2],
layer.inputs[0]
],
filters=[
expanded_output_h, expanded_output_w, layer.outputs[0],
layer.outputs[3]
],
strides=[1, 1, 1, 1],
padding=_p,
percent_holes=percent_holes,
hole_position='filters')
return dummy_layer
| apache-2.0 | 4,500,665,056,704,952,000 | 33.923077 | 119 | 0.527194 | false |
nathaliaspatricio/febracev | friends/views.py | 1 | 1329 | # -*- coding: utf-8 -*-
from django.contrib.auth.models import User
from django.shortcuts import render_to_response, get_object_or_404, redirect
from django.template import RequestContext
from friends.utils import get_following_set, get_follower_set, get_mutual_set
from django.contrib.auth.decorators import login_required
from friends.models import FriendLink
FRIEND_FUNCTION_MAP = {
'followers': get_follower_set,
'following': get_following_set,
'mutual': get_mutual_set,
}
def friend_list(request, username, function_alias):
user = get_object_or_404(User, username=username)
context = {'friend_list': FRIEND_FUNCTION_MAP[function_alias](user)}
return render_to_response('friends/friend_list.html',
context,
context_instance = RequestContext(request))
@login_required
def add_friend(request, username):
user = request.user
friend = get_object_or_404(User, username=username)
FriendLink.objects.get_or_create(from_user=user, to_user=friend)
return redirect(friend.get_profile())
@login_required
def remove_friend(request, username):
user = request.user
friend = get_object_or_404(User, username=username)
FriendLink.objects.get(from_user=user, to_user=friend).delete()
return redirect(friend.get_profile())
| gpl-2.0 | -6,370,096,574,285,482,000 | 38.088235 | 77 | 0.712566 | false |
cstipkovic/spidermonkey-research | testing/marionette/harness/marionette/tests/unit/test_switch_remote_frame.py | 1 | 5471 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from marionette import MarionetteTestCase
from marionette_driver.by import By
OOP_BY_DEFAULT = "dom.ipc.browser_frames.oop_by_default"
BROWSER_FRAMES_ENABLED = "dom.mozBrowserFramesEnabled"
class TestSwitchRemoteFrame(MarionetteTestCase):
def setUp(self):
super(TestSwitchRemoteFrame, self).setUp()
with self.marionette.using_context('chrome'):
self.oop_by_default = self.marionette.get_pref(OOP_BY_DEFAULT)
self.mozBrowserFramesEnabled = self.marionette.get_pref(BROWSER_FRAMES_ENABLED)
self.marionette.set_pref(OOP_BY_DEFAULT, True)
self.marionette.set_pref(BROWSER_FRAMES_ENABLED, True)
self.multi_process_browser = self.marionette.execute_script("""
try {
return Services.appinfo.browserTabsRemoteAutostart;
} catch (e) {
return false;
}""")
def tearDown(self):
with self.marionette.using_context("chrome"):
if self.oop_by_default is None:
self.marionette.clear_pref(OOP_BY_DEFAULT)
else:
self.marionette.set_pref(OOP_BY_DEFAULT, self.oop_by_default)
if self.mozBrowserFramesEnabled is None:
self.marionette.clear_pref(BROWSER_FRAMES_ENABLED)
else:
self.marionette.set_pref(BROWSER_FRAMES_ENABLED, self.mozBrowserFramesEnabled)
@property
def is_main_process(self):
return self.marionette.execute_script("""
return Components.classes["@mozilla.org/xre/app-info;1"].
getService(Components.interfaces.nsIXULRuntime).
processType == Components.interfaces.nsIXULRuntime.PROCESS_TYPE_DEFAULT;
""", sandbox="system")
def test_remote_frame(self):
self.marionette.navigate(self.marionette.absolute_url("test.html"))
self.marionette.push_permission('browser', True)
self.marionette.execute_script("""
let iframe = document.createElement("iframe");
iframe.setAttribute('mozbrowser', true);
iframe.setAttribute('remote', true);
iframe.id = "remote_iframe";
iframe.style.height = "100px";
iframe.style.width = "100%%";
iframe.src = "%s";
document.body.appendChild(iframe);
""" % self.marionette.absolute_url("test.html"))
remote_iframe = self.marionette.find_element(By.ID, "remote_iframe")
self.marionette.switch_to_frame(remote_iframe)
main_process = self.is_main_process
self.assertFalse(main_process)
def test_remote_frame_revisit(self):
# test if we can revisit a remote frame (this takes a different codepath)
self.marionette.navigate(self.marionette.absolute_url("test.html"))
self.marionette.push_permission('browser', True)
self.marionette.execute_script("""
let iframe = document.createElement("iframe");
iframe.setAttribute('mozbrowser', true);
iframe.setAttribute('remote', true);
iframe.id = "remote_iframe";
iframe.style.height = "100px";
iframe.style.width = "100%%";
iframe.src = "%s";
document.body.appendChild(iframe);
""" % self.marionette.absolute_url("test.html"))
self.marionette.switch_to_frame(self.marionette.find_element(By.ID,
"remote_iframe"))
main_process = self.is_main_process
self.assertFalse(main_process)
self.marionette.switch_to_frame()
main_process = self.is_main_process
should_be_main_process = not self.multi_process_browser
self.assertEqual(main_process, should_be_main_process)
self.marionette.switch_to_frame(self.marionette.find_element(By.ID,
"remote_iframe"))
main_process = self.is_main_process
self.assertFalse(main_process)
def test_we_can_switch_to_a_remote_frame_by_index(self):
# test if we can revisit a remote frame (this takes a different codepath)
self.marionette.navigate(self.marionette.absolute_url("test.html"))
self.marionette.push_permission('browser', True)
self.marionette.execute_script("""
let iframe = document.createElement("iframe");
iframe.setAttribute('mozbrowser', true);
iframe.setAttribute('remote', true);
iframe.id = "remote_iframe";
iframe.style.height = "100px";
iframe.style.width = "100%%";
iframe.src = "%s";
document.body.appendChild(iframe);
""" % self.marionette.absolute_url("test.html"))
self.marionette.switch_to_frame(0)
main_process = self.is_main_process
self.assertFalse(main_process)
self.marionette.switch_to_frame()
main_process = self.is_main_process
should_be_main_process = not self.multi_process_browser
self.assertEqual(main_process, should_be_main_process)
self.marionette.switch_to_frame(0)
main_process = self.is_main_process
self.assertFalse(main_process)
| mpl-2.0 | 5,705,097,036,018,414,000 | 45.760684 | 94 | 0.618351 | false |
stuaxo/mnd | mnd/handler.py | 1 | 6612 | """
To make callbacks work with instance methods some things need to happen.
The handler decorator attaches instances of MNDInfo to functions to enable
the dispatcher to work with classes and instances via the Handler metaclass.
At instance creation time the metaclass converts finds any handlers with
MNDFunction, replaces them with MNDMethods + informs the dispatcher.
"""
import pickle
import weakref
from collections import defaultdict
class ArgSpec(object):
"""
The arguments a function accepts.
Keeps a pickled copy of the arguments for hashing purposes.
"""
def __init__(self, key=None, *accept_args, **accept_kwargs):
"""
:param key: optional - already pickled tuple (accept_args, accept_kwargs)
:param accept_args: positional args
:param accept_kwargs: keyword args
"""
if key is None:
key = pickle.dumps(dict(args=accept_args, kwargs=accept_kwargs))
self.key = key
self.accept_args = accept_args
self.accept_kwargs = accept_kwargs
def __repr__(self):
return "ArgSpec([A(%s), KW(%s)])" % (self.accept_args, self.accept_kwargs)
@property
def accepts(self):
return self.accept_args, self.accept_kwargs
class MNDInfo(object):
# base class
def __init__(self, type):
self.type = type
@property
def is_class(self):
return self.type == "class"
@property
def is_function(self):
return self.type == "function"
class MNDFunction(MNDInfo):
"""
stores weakref to a function and list of weakrefs to
dispatchers that point to it
"""
def __init__(self, f, dispatcher, argspec):
"""
:param f: callback function to call
"""
self._wf = weakref.ref(f)
self.bound_to = defaultdict(set)
self.bind_to(argspec, dispatcher)
MNDInfo.__init__(self, "function")
def bind_to(self, argspec, dispatcher):
"""
Add our function to dispatcher
"""
self.bound_to[argspec.key].add((argspec, dispatcher))
dispatcher.bind(self.f, argspec)
@property
def f(self):
return self._wf()
def unbind(self):
"""
Unbind from dispatchers and target function.
:return: set of tuples containing [argspec, dispatcher]
"""
args_dispatchers = set()
f = self._wf()
if f is not None:
for ad_list in self.bound_to.values():
args_dispatchers.update(ad_list)
for argspec, dispatcher in ad_list:
dispatcher.unbind(self.f, argspec)
del f.__dict__['__mnd__']
self.bound_to = {}
return args_dispatchers
class MNDMethod(MNDInfo):
def __init__(self, m, dispatcher, argspec):
"""
:param m: callback method to call
:param dispatcher: initial dispatcher
"""
self.bound_to = defaultdict(set)
self.bind_to(m, argspec, dispatcher)
MNDInfo.__init__(self, "method")
def bind_to(self, instancemethod, argspec, dispatcher):
"""
Add dispatcher for argspec
"""
self.bound_to[argspec.key].add((argspec, dispatcher))
dispatcher.bind(instancemethod, argspec)
class MNDClass(MNDInfo):
def __init__(self, bind_to):
MNDInfo.__init__(self, "class")
self.bind_to = bind_to
def bind_handler_methods(self):
for name, ad_list in self.__mnd__.bind_to.items():
m = getattr(self, name)
for argspec, dispatcher in ad_list:
mnd = m.__dict__.get('__mnd__')
if mnd is None:
mnd = MNDMethod(m, dispatcher, argspec)
m.__dict__['__mnd__'] = mnd
def base_mnds(bases):
"""
:param bases: sequence of base classes
:yield: mnd of any base classes
"""
for base in bases:
mnd = getattr(base, "__mnd__", None)
if mnd is not None:
yield mnd
class Handler(type):
"""
Metaclass enables instance methods to be used as handlers.
"""
def __new__(meta, name, bases, dct):
bind_to = defaultdict(set) # { method_name: ((argspec, dispatcher)...)}
for mnd in base_mnds(bases):
bind_to.update(mnd.bind_to)
for mname, member in dct.items():
mnd = getattr(member, "__mnd__", None)
if mnd is not None and mnd.is_function:
args_dispatchers = mnd.unbind() # set
bind_to[mname].update(args_dispatchers) # ((argspec, dispatcher)...)
dct['__mnd__'] = MNDClass(bind_to)
# wrap __init__
wrapped_init = dct.get('__init__')
if wrapped_init is None:
def wrapped_init(self, *args, **kwargs):
pass
def __init__(self, *args, **kwargs):
bind_handler_methods(self)
wrapped_init(self, *args, **kwargs)
dct['__init__'] = __init__
return super(Handler, meta).__new__(meta, name, bases, dct)
def __init__(cls, name, bases, dct):
super(Handler, cls).__init__(name, bases, dct)
def bind_function(f, dispatcher, *accept_args, **accept_kwargs):
"""
Bind a function to a dispatcher.
Takes accept_args, and accept_kwargs and creates and ArgSpec instance,
adding that to the MNDFunction which annotates the function
:param f: function to wrap
:param accept_args:
:param accept_kwargs:
:return:
"""
argspec = ArgSpec(None, *accept_args, **accept_kwargs)
mnd = MNDFunction(f, dispatcher, argspec)
f.__mnd__ = mnd
return f
def bind_instancemethod(m, dispatcher, *accept_args, **accept_kwargs):
"""
Bind a function to a dispatcher.
Takes accept_args, and accept_kwargs and creates and ArgSpec instance,
adding that to the MNDFunction which annotates the function
:param f: function to wrap
:param accept_args:
:param accept_kwargs:
:return:
"""
argspec = ArgSpec(None, *accept_args, **accept_kwargs)
mnd = MNDMethod(m, dispatcher, argspec)
m.__dict__['__mnd__'] = mnd
return m
def handle(dispatcher, *accept_args, **accept_kwargs):
"""
:param dispatcher: dispatcher to recieve events from
:param accept_args: args to match on
:param accept_kwargs: kwargs to match on
Creates an MNDFunction instance which containing the
argspec and adds the function to the dispatcher.
"""
def bind_function_later(f):
bind_function(f, dispatcher, *accept_args, **accept_kwargs)
return f
return bind_function_later
| mit | -7,329,817,114,201,264,000 | 27.747826 | 85 | 0.598306 | false |
ofirpicazo/solitario | bin/generate_cards.py | 1 | 8706 | #!/usr/bin/python
# coding: utf-8
import argparse
import sys
SUIT_MAP = {
'club': '♣',
'diamond': '♦',
'heart': '♥',
'spade': '♠',
}
templateA = """
<div class="card %(suit)s" id="%(id)s" data-suit="%(suit)s" data-number="%(number)s">
<div class="flipper">
<div class="front">
<div class="corner top">
<span class="number">%(number)s</span>
<span>%(symbol)s</span>
</div>
<span class="suit middle center">%(symbol)s</span>
<div class="corner bottom">
<span class="number">%(number)s</span>
<span>%(symbol)s</span>
</div>
</div>
<div class="back"></div>
</div>
</div>"""
template2 = """
<div class="card %(suit)s" id="%(id)s" data-suit="%(suit)s" data-number="%(number)s">
<div class="flipper">
<div class="front">
<div class="corner top">
<span class="number">%(number)s</span>
<span>%(symbol)s</span>
</div>
<span class="suit top center">%(symbol)s</span>
<span class="suit bottom center">%(symbol)s</span>
<div class="corner bottom">
<span class="number">%(number)s</span>
<span>%(symbol)s</span>
</div>
</div>
<div class="back"></div>
</div>
</div>"""
template3 = """
<div class="card %(suit)s" id="%(id)s" data-suit="%(suit)s" data-number="%(number)s">
<div class="flipper">
<div class="front">
<div class="corner top">
<span class="number">%(number)s</span>
<span>%(symbol)s</span>
</div>
<span class="suit top center">%(symbol)s</span>
<span class="suit middle center">%(symbol)s</span>
<span class="suit bottom center">%(symbol)s</span>
<div class="corner bottom">
<span class="number">%(number)s</span>
<span>%(symbol)s</span>
</div>
</div>
<div class="back"></div>
</div>
</div>"""
template4 = """
<div class="card %(suit)s" id="%(id)s" data-suit="%(suit)s" data-number="%(number)s">
<div class="flipper">
<div class="front">
<div class="corner top">
<span class="number">%(number)s</span>
<span>%(symbol)s</span>
</div>
<span class="suit top left">%(symbol)s</span>
<span class="suit top right">%(symbol)s</span>
<span class="suit bottom left">%(symbol)s</span>
<span class="suit bottom right">%(symbol)s</span>
<div class="corner bottom">
<span class="number">%(number)s</span>
<span>%(symbol)s</span>
</div>
</div>
<div class="back"></div>
</div>
</div>"""
template5 = """
<div class="card %(suit)s" id="%(id)s" data-suit="%(suit)s" data-number="%(number)s">
<div class="flipper">
<div class="front">
<div class="corner top">
<span class="number">%(number)s</span>
<span>%(symbol)s</span>
</div>
<span class="suit top left">%(symbol)s</span>
<span class="suit top right">%(symbol)s</span>
<span class="suit middle center">%(symbol)s</span>
<span class="suit bottom left">%(symbol)s</span>
<span class="suit bottom right">%(symbol)s</span>
<div class="corner bottom">
<span class="number">%(number)s</span>
<span>%(symbol)s</span>
</div>
</div>
<div class="back"></div>
</div>
</div>"""
template6 = """
<div class="card %(suit)s" id="%(id)s" data-suit="%(suit)s" data-number="%(number)s">
<div class="flipper">
<div class="front">
<div class="corner top">
<span class="number">%(number)s</span>
<span>%(symbol)s</span>
</div>
<span class="suit top left">%(symbol)s</span>
<span class="suit top right">%(symbol)s</span>
<span class="suit middle left">%(symbol)s</span>
<span class="suit middle right">%(symbol)s</span>
<span class="suit bottom left">%(symbol)s</span>
<span class="suit bottom right">%(symbol)s</span>
<div class="corner bottom">
<span class="number">%(number)s</span>
<span>%(symbol)s</span>
</div>
</div>
<div class="back"></div>
</div>
</div>"""
template7 = """
<div class="card %(suit)s" id="%(id)s" data-suit="%(suit)s" data-number="%(number)s">
<div class="flipper">
<div class="front">
<div class="corner top">
<span class="number">%(number)s</span>
<span>%(symbol)s</span>
</div>
<span class="suit top left">%(symbol)s</span>
<span class="suit top right">%(symbol)s</span>
<span class="suit under-top center">%(symbol)s</span>
<span class="suit middle left">%(symbol)s</span>
<span class="suit middle right">%(symbol)s</span>
<span class="suit bottom left">%(symbol)s</span>
<span class="suit bottom right">%(symbol)s</span>
<div class="corner bottom">
<span class="number">%(number)s</span>
<span>%(symbol)s</span>
</div>
</div>
<div class="back"></div>
</div>
</div>"""
template8 = """
<div class="card %(suit)s" id="%(id)s" data-suit="%(suit)s" data-number="%(number)s">
<div class="flipper">
<div class="front">
<div class="corner top">
<span class="number">%(number)s</span>
<span>%(symbol)s</span>
</div>
<span class="suit top left">%(symbol)s</span>
<span class="suit top right">%(symbol)s</span>
<span class="suit under-top center">%(symbol)s</span>
<span class="suit middle left">%(symbol)s</span>
<span class="suit middle right">%(symbol)s</span>
<span class="suit over-bottom center">%(symbol)s</span>
<span class="suit bottom left">%(symbol)s</span>
<span class="suit bottom right">%(symbol)s</span>
<div class="corner bottom">
<span class="number">%(number)s</span>
<span>%(symbol)s</span>
</div>
</div>
<div class="back"></div>
</div>
</div>"""
template9 = """
<div class="card %(suit)s" id="%(id)s" data-suit="%(suit)s" data-number="%(number)s">
<div class="flipper">
<div class="front">
<div class="corner top">
<span class="number">%(number)s</span>
<span>%(symbol)s</span>
</div>
<span class="suit top left">%(symbol)s</span>
<span class="suit top right">%(symbol)s</span>
<span class="suit under-top center">%(symbol)s</span>
<span class="suit over-middle left">%(symbol)s</span>
<span class="suit over-middle right">%(symbol)s</span>
<span class="suit under-middle left">%(symbol)s</span>
<span class="suit under-middle right">%(symbol)s</span>
<span class="suit bottom left">%(symbol)s</span>
<span class="suit bottom right">%(symbol)s</span>
<div class="corner bottom">
<span class="number">%(number)s</span>
<span>%(symbol)s</span>
</div>
</div>
<div class="back"></div>
</div>
</div>"""
template10 = """
<div class="card %(suit)s" id="%(id)s" data-suit="%(suit)s" data-number="%(number)s">
<div class="flipper">
<div class="front">
<div class="corner top">
<span class="number">%(number)s</span>
<span>%(symbol)s</span>
</div>
<span class="suit top left">%(symbol)s</span>
<span class="suit top right">%(symbol)s</span>
<span class="suit under-top center">%(symbol)s</span>
<span class="suit over-middle left">%(symbol)s</span>
<span class="suit over-middle right">%(symbol)s</span>
<span class="suit under-middle left">%(symbol)s</span>
<span class="suit under-middle right">%(symbol)s</span>
<span class="suit over-bottom center">%(symbol)s</span>
<span class="suit bottom left">%(symbol)s</span>
<span class="suit bottom right">%(symbol)s</span>
<div class="corner bottom">
<span class="number">%(number)s</span>
<span>%(symbol)s</span>
</div>
</div>
<div class="back"></div>
</div>
</div>"""
CARD_TEMPLATES = (
('A', templateA),
('2', template2),
('3', template3),
('4', template4),
('5', template5),
('6', template6),
('7', template7),
('8', template8),
('9', template9),
('10', template10),
('J', templateA),
('Q', templateA),
('K', templateA),
)
def main(args):
parser = argparse.ArgumentParser(description='Create card templates')
parser.add_argument("suit", type=str, choices=SUIT_MAP.keys(),
help="Suit to create templates for")
args = parser.parse_args(args)
for number, template in CARD_TEMPLATES:
symbol = SUIT_MAP[args.suit]
id = args.suit[0] + number.lower() # e.g. d9 for diamond 9
print template % {'suit': args.suit,
'number': number,
'symbol': symbol,
'id': id}
if __name__ == '__main__':
main(sys.argv[1:])
| mit | 7,593,103,439,347,813,000 | 30.977941 | 85 | 0.557714 | false |
Aahung/Handbook-for-Programming-Contest | source/conf.py | 1 | 9229 | # -*- coding: utf-8 -*-
#
# sphinx test documentation build configuration file, created by
# sphinx-quickstart on Sat May 9 20:23:49 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.mathjax',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Handbook for Programming Contest'
copyright = u'2015, Xinhong'
author = u'Xinhong'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1 alpha'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'handbookdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'handbook.tex', u'Handbook for Programming Contest',
u'Xinhong', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'handbook', u'Handbook for Programming Contest',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'handbook', u'Handbook for Programming Contest',
author, 'handbook', 'Know some quick killer.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| cc0-1.0 | -6,724,777,362,360,831,000 | 31.269231 | 79 | 0.707661 | false |
brython-dev/brython | www/src/Lib/threading.py | 1 | 51971 | """Thread module emulating a subset of Java's threading model."""
import os as _os
import sys as _sys
import _thread
import functools
from time import monotonic as _time
from _weakrefset import WeakSet
from itertools import islice as _islice, count as _count
try:
from _collections import deque as _deque
except ImportError:
from collections import deque as _deque
# Note regarding PEP 8 compliant names
# This threading model was originally inspired by Java, and inherited
# the convention of camelCase function and method names from that
# language. Those original names are not in any imminent danger of
# being deprecated (even for Py3k),so this module provides them as an
# alias for the PEP 8 compliant names
# Note that using the new PEP 8 compliant names facilitates substitution
# with the multiprocessing module, which doesn't provide the old
# Java inspired names.
__all__ = ['get_ident', 'active_count', 'Condition', 'current_thread',
'enumerate', 'main_thread', 'TIMEOUT_MAX',
'Event', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Thread',
'Barrier', 'BrokenBarrierError', 'Timer', 'ThreadError',
'setprofile', 'settrace', 'local', 'stack_size',
'excepthook', 'ExceptHookArgs']
# Rename some stuff so "from threading import *" is safe
_start_new_thread = _thread.start_new_thread
_allocate_lock = _thread.allocate_lock
_set_sentinel = _thread._set_sentinel
get_ident = _thread.get_ident
try:
get_native_id = _thread.get_native_id
_HAVE_THREAD_NATIVE_ID = True
__all__.append('get_native_id')
except AttributeError:
_HAVE_THREAD_NATIVE_ID = False
ThreadError = _thread.error
try:
_CRLock = _thread.RLock
except AttributeError:
_CRLock = None
TIMEOUT_MAX = _thread.TIMEOUT_MAX
del _thread
# Support for profile and trace hooks
_profile_hook = None
_trace_hook = None
def setprofile(func):
"""Set a profile function for all threads started from the threading module.
The func will be passed to sys.setprofile() for each thread, before its
run() method is called.
"""
global _profile_hook
_profile_hook = func
def settrace(func):
"""Set a trace function for all threads started from the threading module.
The func will be passed to sys.settrace() for each thread, before its run()
method is called.
"""
global _trace_hook
_trace_hook = func
# Synchronization classes
Lock = _allocate_lock
def RLock(*args, **kwargs):
"""Factory function that returns a new reentrant lock.
A reentrant lock must be released by the thread that acquired it. Once a
thread has acquired a reentrant lock, the same thread may acquire it again
without blocking; the thread must release it once for each time it has
acquired it.
"""
if _CRLock is None:
return _PyRLock(*args, **kwargs)
return _CRLock(*args, **kwargs)
class _RLock:
"""This class implements reentrant lock objects.
A reentrant lock must be released by the thread that acquired it. Once a
thread has acquired a reentrant lock, the same thread may acquire it
again without blocking; the thread must release it once for each time it
has acquired it.
"""
def __init__(self):
self._block = _allocate_lock()
self._owner = None
self._count = 0
def __repr__(self):
owner = self._owner
try:
owner = _active[owner].name
except KeyError:
pass
return "<%s %s.%s object owner=%r count=%d at %s>" % (
"locked" if self._block.locked() else "unlocked",
self.__class__.__module__,
self.__class__.__qualname__,
owner,
self._count,
hex(id(self))
)
def _at_fork_reinit(self):
self._block._at_fork_reinit()
self._owner = None
self._count = 0
def acquire(self, blocking=True, timeout=-1):
"""Acquire a lock, blocking or non-blocking.
When invoked without arguments: if this thread already owns the lock,
increment the recursion level by one, and return immediately. Otherwise,
if another thread owns the lock, block until the lock is unlocked. Once
the lock is unlocked (not owned by any thread), then grab ownership, set
the recursion level to one, and return. If more than one thread is
blocked waiting until the lock is unlocked, only one at a time will be
able to grab ownership of the lock. There is no return value in this
case.
When invoked with the blocking argument set to true, do the same thing
as when called without arguments, and return true.
When invoked with the blocking argument set to false, do not block. If a
call without an argument would block, return false immediately;
otherwise, do the same thing as when called without arguments, and
return true.
When invoked with the floating-point timeout argument set to a positive
value, block for at most the number of seconds specified by timeout
and as long as the lock cannot be acquired. Return true if the lock has
been acquired, false if the timeout has elapsed.
"""
me = get_ident()
if self._owner == me:
self._count += 1
return 1
rc = self._block.acquire(blocking, timeout)
if rc:
self._owner = me
self._count = 1
return rc
__enter__ = acquire
def release(self):
"""Release a lock, decrementing the recursion level.
If after the decrement it is zero, reset the lock to unlocked (not owned
by any thread), and if any other threads are blocked waiting for the
lock to become unlocked, allow exactly one of them to proceed. If after
the decrement the recursion level is still nonzero, the lock remains
locked and owned by the calling thread.
Only call this method when the calling thread owns the lock. A
RuntimeError is raised if this method is called when the lock is
unlocked.
There is no return value.
"""
if self._owner != get_ident():
raise RuntimeError("cannot release un-acquired lock")
self._count = count = self._count - 1
if not count:
self._owner = None
self._block.release()
def __exit__(self, t, v, tb):
self.release()
# Internal methods used by condition variables
def _acquire_restore(self, state):
self._block.acquire()
self._count, self._owner = state
def _release_save(self):
if self._count == 0:
raise RuntimeError("cannot release un-acquired lock")
count = self._count
self._count = 0
owner = self._owner
self._owner = None
self._block.release()
return (count, owner)
def _is_owned(self):
return self._owner == get_ident()
_PyRLock = _RLock
class Condition:
"""Class that implements a condition variable.
A condition variable allows one or more threads to wait until they are
notified by another thread.
If the lock argument is given and not None, it must be a Lock or RLock
object, and it is used as the underlying lock. Otherwise, a new RLock object
is created and used as the underlying lock.
"""
def __init__(self, lock=None):
if lock is None:
lock = RLock()
self._lock = lock
# Export the lock's acquire() and release() methods
self.acquire = lock.acquire
self.release = lock.release
# If the lock defines _release_save() and/or _acquire_restore(),
# these override the default implementations (which just call
# release() and acquire() on the lock). Ditto for _is_owned().
try:
self._release_save = lock._release_save
except AttributeError:
pass
try:
self._acquire_restore = lock._acquire_restore
except AttributeError:
pass
try:
self._is_owned = lock._is_owned
except AttributeError:
pass
self._waiters = _deque()
def _at_fork_reinit(self):
self._lock._at_fork_reinit()
self._waiters.clear()
def __enter__(self):
return self._lock.__enter__()
def __exit__(self, *args):
return self._lock.__exit__(*args)
def __repr__(self):
return "<Condition(%s, %d)>" % (self._lock, len(self._waiters))
def _release_save(self):
self._lock.release() # No state to save
def _acquire_restore(self, x):
self._lock.acquire() # Ignore saved state
def _is_owned(self):
# Return True if lock is owned by current_thread.
# This method is called only if _lock doesn't have _is_owned().
if self._lock.acquire(False):
self._lock.release()
return False
else:
return True
def wait(self, timeout=None):
"""Wait until notified or until a timeout occurs.
If the calling thread has not acquired the lock when this method is
called, a RuntimeError is raised.
This method releases the underlying lock, and then blocks until it is
awakened by a notify() or notify_all() call for the same condition
variable in another thread, or until the optional timeout occurs. Once
awakened or timed out, it re-acquires the lock and returns.
When the timeout argument is present and not None, it should be a
floating point number specifying a timeout for the operation in seconds
(or fractions thereof).
When the underlying lock is an RLock, it is not released using its
release() method, since this may not actually unlock the lock when it
was acquired multiple times recursively. Instead, an internal interface
of the RLock class is used, which really unlocks it even when it has
been recursively acquired several times. Another internal interface is
then used to restore the recursion level when the lock is reacquired.
"""
if not self._is_owned():
raise RuntimeError("cannot wait on un-acquired lock")
waiter = _allocate_lock()
waiter.acquire()
self._waiters.append(waiter)
saved_state = self._release_save()
gotit = False
try: # restore state no matter what (e.g., KeyboardInterrupt)
if timeout is None:
waiter.acquire()
gotit = True
else:
if timeout > 0:
gotit = waiter.acquire(True, timeout)
else:
gotit = waiter.acquire(False)
return gotit
finally:
self._acquire_restore(saved_state)
if not gotit:
try:
self._waiters.remove(waiter)
except ValueError:
pass
def wait_for(self, predicate, timeout=None):
"""Wait until a condition evaluates to True.
predicate should be a callable which result will be interpreted as a
boolean value. A timeout may be provided giving the maximum time to
wait.
"""
endtime = None
waittime = timeout
result = predicate()
while not result:
if waittime is not None:
if endtime is None:
endtime = _time() + waittime
else:
waittime = endtime - _time()
if waittime <= 0:
break
self.wait(waittime)
result = predicate()
return result
def notify(self, n=1):
"""Wake up one or more threads waiting on this condition, if any.
If the calling thread has not acquired the lock when this method is
called, a RuntimeError is raised.
This method wakes up at most n of the threads waiting for the condition
variable; it is a no-op if no threads are waiting.
"""
if not self._is_owned():
raise RuntimeError("cannot notify on un-acquired lock")
all_waiters = self._waiters
waiters_to_notify = _deque(_islice(all_waiters, n))
if not waiters_to_notify:
return
for waiter in waiters_to_notify:
waiter.release()
try:
all_waiters.remove(waiter)
except ValueError:
pass
def notify_all(self):
"""Wake up all threads waiting on this condition.
If the calling thread has not acquired the lock when this method
is called, a RuntimeError is raised.
"""
self.notify(len(self._waiters))
notifyAll = notify_all
class Semaphore:
"""This class implements semaphore objects.
Semaphores manage a counter representing the number of release() calls minus
the number of acquire() calls, plus an initial value. The acquire() method
blocks if necessary until it can return without making the counter
negative. If not given, value defaults to 1.
"""
# After Tim Peters' semaphore class, but not quite the same (no maximum)
def __init__(self, value=1):
if value < 0:
raise ValueError("semaphore initial value must be >= 0")
self._cond = Condition(Lock())
self._value = value
def acquire(self, blocking=True, timeout=None):
"""Acquire a semaphore, decrementing the internal counter by one.
When invoked without arguments: if the internal counter is larger than
zero on entry, decrement it by one and return immediately. If it is zero
on entry, block, waiting until some other thread has called release() to
make it larger than zero. This is done with proper interlocking so that
if multiple acquire() calls are blocked, release() will wake exactly one
of them up. The implementation may pick one at random, so the order in
which blocked threads are awakened should not be relied on. There is no
return value in this case.
When invoked with blocking set to true, do the same thing as when called
without arguments, and return true.
When invoked with blocking set to false, do not block. If a call without
an argument would block, return false immediately; otherwise, do the
same thing as when called without arguments, and return true.
When invoked with a timeout other than None, it will block for at
most timeout seconds. If acquire does not complete successfully in
that interval, return false. Return true otherwise.
"""
if not blocking and timeout is not None:
raise ValueError("can't specify timeout for non-blocking acquire")
rc = False
endtime = None
with self._cond:
while self._value == 0:
if not blocking:
break
if timeout is not None:
if endtime is None:
endtime = _time() + timeout
else:
timeout = endtime - _time()
if timeout <= 0:
break
self._cond.wait(timeout)
else:
self._value -= 1
rc = True
return rc
__enter__ = acquire
def release(self, n=1):
"""Release a semaphore, incrementing the internal counter by one or more.
When the counter is zero on entry and another thread is waiting for it
to become larger than zero again, wake up that thread.
"""
if n < 1:
raise ValueError('n must be one or more')
with self._cond:
self._value += n
for i in range(n):
self._cond.notify()
def __exit__(self, t, v, tb):
self.release()
class BoundedSemaphore(Semaphore):
"""Implements a bounded semaphore.
A bounded semaphore checks to make sure its current value doesn't exceed its
initial value. If it does, ValueError is raised. In most situations
semaphores are used to guard resources with limited capacity.
If the semaphore is released too many times it's a sign of a bug. If not
given, value defaults to 1.
Like regular semaphores, bounded semaphores manage a counter representing
the number of release() calls minus the number of acquire() calls, plus an
initial value. The acquire() method blocks if necessary until it can return
without making the counter negative. If not given, value defaults to 1.
"""
def __init__(self, value=1):
Semaphore.__init__(self, value)
self._initial_value = value
def release(self, n=1):
"""Release a semaphore, incrementing the internal counter by one or more.
When the counter is zero on entry and another thread is waiting for it
to become larger than zero again, wake up that thread.
If the number of releases exceeds the number of acquires,
raise a ValueError.
"""
if n < 1:
raise ValueError('n must be one or more')
with self._cond:
if self._value + n > self._initial_value:
raise ValueError("Semaphore released too many times")
self._value += n
for i in range(n):
self._cond.notify()
class Event:
"""Class implementing event objects.
Events manage a flag that can be set to true with the set() method and reset
to false with the clear() method. The wait() method blocks until the flag is
true. The flag is initially false.
"""
# After Tim Peters' event class (without is_posted())
def __init__(self):
self._cond = Condition(Lock())
self._flag = False
def _at_fork_reinit(self):
# Private method called by Thread._reset_internal_locks()
self._cond._at_fork_reinit()
def is_set(self):
"""Return true if and only if the internal flag is true."""
return self._flag
isSet = is_set
def set(self):
"""Set the internal flag to true.
All threads waiting for it to become true are awakened. Threads
that call wait() once the flag is true will not block at all.
"""
with self._cond:
self._flag = True
self._cond.notify_all()
def clear(self):
"""Reset the internal flag to false.
Subsequently, threads calling wait() will block until set() is called to
set the internal flag to true again.
"""
with self._cond:
self._flag = False
def wait(self, timeout=None):
"""Block until the internal flag is true.
If the internal flag is true on entry, return immediately. Otherwise,
block until another thread calls set() to set the flag to true, or until
the optional timeout occurs.
When the timeout argument is present and not None, it should be a
floating point number specifying a timeout for the operation in seconds
(or fractions thereof).
This method returns the internal flag on exit, so it will always return
True except if a timeout is given and the operation times out.
"""
with self._cond:
signaled = self._flag
if not signaled:
signaled = self._cond.wait(timeout)
return signaled
# A barrier class. Inspired in part by the pthread_barrier_* api and
# the CyclicBarrier class from Java. See
# http://sourceware.org/pthreads-win32/manual/pthread_barrier_init.html and
# http://java.sun.com/j2se/1.5.0/docs/api/java/util/concurrent/
# CyclicBarrier.html
# for information.
# We maintain two main states, 'filling' and 'draining' enabling the barrier
# to be cyclic. Threads are not allowed into it until it has fully drained
# since the previous cycle. In addition, a 'resetting' state exists which is
# similar to 'draining' except that threads leave with a BrokenBarrierError,
# and a 'broken' state in which all threads get the exception.
class Barrier:
"""Implements a Barrier.
Useful for synchronizing a fixed number of threads at known synchronization
points. Threads block on 'wait()' and are simultaneously awoken once they
have all made that call.
"""
def __init__(self, parties, action=None, timeout=None):
"""Create a barrier, initialised to 'parties' threads.
'action' is a callable which, when supplied, will be called by one of
the threads after they have all entered the barrier and just prior to
releasing them all. If a 'timeout' is provided, it is used as the
default for all subsequent 'wait()' calls.
"""
self._cond = Condition(Lock())
self._action = action
self._timeout = timeout
self._parties = parties
self._state = 0 #0 filling, 1, draining, -1 resetting, -2 broken
self._count = 0
def wait(self, timeout=None):
"""Wait for the barrier.
When the specified number of threads have started waiting, they are all
simultaneously awoken. If an 'action' was provided for the barrier, one
of the threads will have executed that callback prior to returning.
Returns an individual index number from 0 to 'parties-1'.
"""
if timeout is None:
timeout = self._timeout
with self._cond:
self._enter() # Block while the barrier drains.
index = self._count
self._count += 1
try:
if index + 1 == self._parties:
# We release the barrier
self._release()
else:
# We wait until someone releases us
self._wait(timeout)
return index
finally:
self._count -= 1
# Wake up any threads waiting for barrier to drain.
self._exit()
# Block until the barrier is ready for us, or raise an exception
# if it is broken.
def _enter(self):
while self._state in (-1, 1):
# It is draining or resetting, wait until done
self._cond.wait()
#see if the barrier is in a broken state
if self._state < 0:
raise BrokenBarrierError
assert self._state == 0
# Optionally run the 'action' and release the threads waiting
# in the barrier.
def _release(self):
try:
if self._action:
self._action()
# enter draining state
self._state = 1
self._cond.notify_all()
except:
#an exception during the _action handler. Break and reraise
self._break()
raise
# Wait in the barrier until we are released. Raise an exception
# if the barrier is reset or broken.
def _wait(self, timeout):
if not self._cond.wait_for(lambda : self._state != 0, timeout):
#timed out. Break the barrier
self._break()
raise BrokenBarrierError
if self._state < 0:
raise BrokenBarrierError
assert self._state == 1
# If we are the last thread to exit the barrier, signal any threads
# waiting for the barrier to drain.
def _exit(self):
if self._count == 0:
if self._state in (-1, 1):
#resetting or draining
self._state = 0
self._cond.notify_all()
def reset(self):
"""Reset the barrier to the initial state.
Any threads currently waiting will get the BrokenBarrier exception
raised.
"""
with self._cond:
if self._count > 0:
if self._state == 0:
#reset the barrier, waking up threads
self._state = -1
elif self._state == -2:
#was broken, set it to reset state
#which clears when the last thread exits
self._state = -1
else:
self._state = 0
self._cond.notify_all()
def abort(self):
"""Place the barrier into a 'broken' state.
Useful in case of error. Any currently waiting threads and threads
attempting to 'wait()' will have BrokenBarrierError raised.
"""
with self._cond:
self._break()
def _break(self):
# An internal error was detected. The barrier is set to
# a broken state all parties awakened.
self._state = -2
self._cond.notify_all()
@property
def parties(self):
"""Return the number of threads required to trip the barrier."""
return self._parties
@property
def n_waiting(self):
"""Return the number of threads currently waiting at the barrier."""
# We don't need synchronization here since this is an ephemeral result
# anyway. It returns the correct value in the steady state.
if self._state == 0:
return self._count
return 0
@property
def broken(self):
"""Return True if the barrier is in a broken state."""
return self._state == -2
# exception raised by the Barrier class
class BrokenBarrierError(RuntimeError):
pass
# Helper to generate new thread names
_counter = _count().__next__
_counter() # Consume 0 so first non-main thread has id 1.
def _newname(template="Thread-%d"):
return template % _counter()
# Active thread administration
_active_limbo_lock = _allocate_lock()
_active = {} # maps thread id to Thread object
_limbo = {}
_dangling = WeakSet()
# Set of Thread._tstate_lock locks of non-daemon threads used by _shutdown()
# to wait until all Python thread states get deleted:
# see Thread._set_tstate_lock().
_shutdown_locks_lock = _allocate_lock()
_shutdown_locks = set()
# Main class for threads
class Thread:
"""A class that represents a thread of control.
This class can be safely subclassed in a limited fashion. There are two ways
to specify the activity: by passing a callable object to the constructor, or
by overriding the run() method in a subclass.
"""
_initialized = False
def __init__(self, group=None, target=None, name=None,
args=(), kwargs=None, *, daemon=None):
"""This constructor should always be called with keyword arguments. Arguments are:
*group* should be None; reserved for future extension when a ThreadGroup
class is implemented.
*target* is the callable object to be invoked by the run()
method. Defaults to None, meaning nothing is called.
*name* is the thread name. By default, a unique name is constructed of
the form "Thread-N" where N is a small decimal number.
*args* is the argument tuple for the target invocation. Defaults to ().
*kwargs* is a dictionary of keyword arguments for the target
invocation. Defaults to {}.
If a subclass overrides the constructor, it must make sure to invoke
the base class constructor (Thread.__init__()) before doing anything
else to the thread.
"""
assert group is None, "group argument must be None for now"
if kwargs is None:
kwargs = {}
self._target = target
self._name = str(name or _newname())
self._args = args
self._kwargs = kwargs
if daemon is not None:
self._daemonic = daemon
else:
self._daemonic = current_thread().daemon
self._ident = None
if _HAVE_THREAD_NATIVE_ID:
self._native_id = None
self._tstate_lock = None
self._started = Event()
self._is_stopped = False
self._initialized = True
# Copy of sys.stderr used by self._invoke_excepthook()
self._stderr = _sys.stderr
self._invoke_excepthook = _make_invoke_excepthook()
# For debugging and _after_fork()
_dangling.add(self)
def _reset_internal_locks(self, is_alive):
# private! Called by _after_fork() to reset our internal locks as
# they may be in an invalid state leading to a deadlock or crash.
self._started._at_fork_reinit()
if is_alive:
self._tstate_lock._at_fork_reinit()
self._tstate_lock.acquire()
else:
# The thread isn't alive after fork: it doesn't have a tstate
# anymore.
self._is_stopped = True
self._tstate_lock = None
def __repr__(self):
assert self._initialized, "Thread.__init__() was not called"
status = "initial"
if self._started.is_set():
status = "started"
self.is_alive() # easy way to get ._is_stopped set when appropriate
if self._is_stopped:
status = "stopped"
if self._daemonic:
status += " daemon"
if self._ident is not None:
status += " %s" % self._ident
return "<%s(%s, %s)>" % (self.__class__.__name__, self._name, status)
def start(self):
"""Start the thread's activity.
It must be called at most once per thread object. It arranges for the
object's run() method to be invoked in a separate thread of control.
This method will raise a RuntimeError if called more than once on the
same thread object.
"""
if not self._initialized:
raise RuntimeError("thread.__init__() not called")
if self._started.is_set():
raise RuntimeError("threads can only be started once")
with _active_limbo_lock:
_limbo[self] = self
try:
_start_new_thread(self._bootstrap, ())
except Exception:
with _active_limbo_lock:
del _limbo[self]
raise
self._started.wait()
def run(self):
"""Method representing the thread's activity.
You may override this method in a subclass. The standard run() method
invokes the callable object passed to the object's constructor as the
target argument, if any, with sequential and keyword arguments taken
from the args and kwargs arguments, respectively.
"""
try:
if self._target:
self._target(*self._args, **self._kwargs)
finally:
# Avoid a refcycle if the thread is running a function with
# an argument that has a member that points to the thread.
del self._target, self._args, self._kwargs
def _bootstrap(self):
# Wrapper around the real bootstrap code that ignores
# exceptions during interpreter cleanup. Those typically
# happen when a daemon thread wakes up at an unfortunate
# moment, finds the world around it destroyed, and raises some
# random exception *** while trying to report the exception in
# _bootstrap_inner() below ***. Those random exceptions
# don't help anybody, and they confuse users, so we suppress
# them. We suppress them only when it appears that the world
# indeed has already been destroyed, so that exceptions in
# _bootstrap_inner() during normal business hours are properly
# reported. Also, we only suppress them for daemonic threads;
# if a non-daemonic encounters this, something else is wrong.
try:
self._bootstrap_inner()
except:
if self._daemonic and _sys is None:
return
raise
def _set_ident(self):
self._ident = get_ident()
if _HAVE_THREAD_NATIVE_ID:
def _set_native_id(self):
self._native_id = get_native_id()
def _set_tstate_lock(self):
"""
Set a lock object which will be released by the interpreter when
the underlying thread state (see pystate.h) gets deleted.
"""
self._tstate_lock = _set_sentinel()
self._tstate_lock.acquire()
if not self.daemon:
with _shutdown_locks_lock:
_shutdown_locks.add(self._tstate_lock)
def _bootstrap_inner(self):
try:
self._set_ident()
self._set_tstate_lock()
if _HAVE_THREAD_NATIVE_ID:
self._set_native_id()
self._started.set()
with _active_limbo_lock:
_active[self._ident] = self
del _limbo[self]
if _trace_hook:
_sys.settrace(_trace_hook)
if _profile_hook:
_sys.setprofile(_profile_hook)
try:
self.run()
except:
self._invoke_excepthook(self)
finally:
with _active_limbo_lock:
try:
# We don't call self._delete() because it also
# grabs _active_limbo_lock.
del _active[get_ident()]
except:
pass
def _stop(self):
# After calling ._stop(), .is_alive() returns False and .join() returns
# immediately. ._tstate_lock must be released before calling ._stop().
#
# Normal case: C code at the end of the thread's life
# (release_sentinel in _threadmodule.c) releases ._tstate_lock, and
# that's detected by our ._wait_for_tstate_lock(), called by .join()
# and .is_alive(). Any number of threads _may_ call ._stop()
# simultaneously (for example, if multiple threads are blocked in
# .join() calls), and they're not serialized. That's harmless -
# they'll just make redundant rebindings of ._is_stopped and
# ._tstate_lock. Obscure: we rebind ._tstate_lock last so that the
# "assert self._is_stopped" in ._wait_for_tstate_lock() always works
# (the assert is executed only if ._tstate_lock is None).
#
# Special case: _main_thread releases ._tstate_lock via this
# module's _shutdown() function.
lock = self._tstate_lock
if lock is not None:
assert not lock.locked()
self._is_stopped = True
self._tstate_lock = None
if not self.daemon:
with _shutdown_locks_lock:
_shutdown_locks.discard(lock)
def _delete(self):
"Remove current thread from the dict of currently running threads."
with _active_limbo_lock:
del _active[get_ident()]
# There must not be any python code between the previous line
# and after the lock is released. Otherwise a tracing function
# could try to acquire the lock again in the same thread, (in
# current_thread()), and would block.
def join(self, timeout=None):
"""Wait until the thread terminates.
This blocks the calling thread until the thread whose join() method is
called terminates -- either normally or through an unhandled exception
or until the optional timeout occurs.
When the timeout argument is present and not None, it should be a
floating point number specifying a timeout for the operation in seconds
(or fractions thereof). As join() always returns None, you must call
is_alive() after join() to decide whether a timeout happened -- if the
thread is still alive, the join() call timed out.
When the timeout argument is not present or None, the operation will
block until the thread terminates.
A thread can be join()ed many times.
join() raises a RuntimeError if an attempt is made to join the current
thread as that would cause a deadlock. It is also an error to join() a
thread before it has been started and attempts to do so raises the same
exception.
"""
if not self._initialized:
raise RuntimeError("Thread.__init__() not called")
if not self._started.is_set():
raise RuntimeError("cannot join thread before it is started")
if self is current_thread():
raise RuntimeError("cannot join current thread")
if timeout is None:
self._wait_for_tstate_lock()
else:
# the behavior of a negative timeout isn't documented, but
# historically .join(timeout=x) for x<0 has acted as if timeout=0
self._wait_for_tstate_lock(timeout=max(timeout, 0))
def _wait_for_tstate_lock(self, block=True, timeout=-1):
# Issue #18808: wait for the thread state to be gone.
# At the end of the thread's life, after all knowledge of the thread
# is removed from C data structures, C code releases our _tstate_lock.
# This method passes its arguments to _tstate_lock.acquire().
# If the lock is acquired, the C code is done, and self._stop() is
# called. That sets ._is_stopped to True, and ._tstate_lock to None.
lock = self._tstate_lock
if lock is None: # already determined that the C code is done
assert self._is_stopped
elif lock.acquire(block, timeout):
lock.release()
self._stop()
@property
def name(self):
"""A string used for identification purposes only.
It has no semantics. Multiple threads may be given the same name. The
initial name is set by the constructor.
"""
assert self._initialized, "Thread.__init__() not called"
return self._name
@name.setter
def name(self, name):
assert self._initialized, "Thread.__init__() not called"
self._name = str(name)
@property
def ident(self):
"""Thread identifier of this thread or None if it has not been started.
This is a nonzero integer. See the get_ident() function. Thread
identifiers may be recycled when a thread exits and another thread is
created. The identifier is available even after the thread has exited.
"""
assert self._initialized, "Thread.__init__() not called"
return self._ident
if _HAVE_THREAD_NATIVE_ID:
@property
def native_id(self):
"""Native integral thread ID of this thread, or None if it has not been started.
This is a non-negative integer. See the get_native_id() function.
This represents the Thread ID as reported by the kernel.
"""
assert self._initialized, "Thread.__init__() not called"
return self._native_id
def is_alive(self):
"""Return whether the thread is alive.
This method returns True just before the run() method starts until just
after the run() method terminates. The module function enumerate()
returns a list of all alive threads.
"""
assert self._initialized, "Thread.__init__() not called"
if self._is_stopped or not self._started.is_set():
return False
self._wait_for_tstate_lock(False)
return not self._is_stopped
@property
def daemon(self):
"""A boolean value indicating whether this thread is a daemon thread.
This must be set before start() is called, otherwise RuntimeError is
raised. Its initial value is inherited from the creating thread; the
main thread is not a daemon thread and therefore all threads created in
the main thread default to daemon = False.
The entire Python program exits when only daemon threads are left.
"""
assert self._initialized, "Thread.__init__() not called"
return self._daemonic
@daemon.setter
def daemon(self, daemonic):
if not self._initialized:
raise RuntimeError("Thread.__init__() not called")
if self._started.is_set():
raise RuntimeError("cannot set daemon status of active thread")
self._daemonic = daemonic
def isDaemon(self):
return self.daemon
def setDaemon(self, daemonic):
self.daemon = daemonic
def getName(self):
return self.name
def setName(self, name):
self.name = name
try:
from _thread import (_excepthook as excepthook,
_ExceptHookArgs as ExceptHookArgs)
except ImportError:
# Simple Python implementation if _thread._excepthook() is not available
from traceback import print_exception as _print_exception
from collections import namedtuple
_ExceptHookArgs = namedtuple(
'ExceptHookArgs',
'exc_type exc_value exc_traceback thread')
def ExceptHookArgs(args):
return _ExceptHookArgs(*args)
def excepthook(args, /):
"""
Handle uncaught Thread.run() exception.
"""
if args.exc_type == SystemExit:
# silently ignore SystemExit
return
if _sys is not None and _sys.stderr is not None:
stderr = _sys.stderr
elif args.thread is not None:
stderr = args.thread._stderr
if stderr is None:
# do nothing if sys.stderr is None and sys.stderr was None
# when the thread was created
return
else:
# do nothing if sys.stderr is None and args.thread is None
return
if args.thread is not None:
name = args.thread.name
else:
name = get_ident()
print(f"Exception in thread {name}:",
file=stderr, flush=True)
_print_exception(args.exc_type, args.exc_value, args.exc_traceback,
file=stderr)
stderr.flush()
def _make_invoke_excepthook():
# Create a local namespace to ensure that variables remain alive
# when _invoke_excepthook() is called, even if it is called late during
# Python shutdown. It is mostly needed for daemon threads.
old_excepthook = excepthook
old_sys_excepthook = _sys.excepthook
if old_excepthook is None:
raise RuntimeError("threading.excepthook is None")
if old_sys_excepthook is None:
raise RuntimeError("sys.excepthook is None")
sys_exc_info = _sys.exc_info
local_print = print
local_sys = _sys
def invoke_excepthook(thread):
global excepthook
try:
hook = excepthook
if hook is None:
hook = old_excepthook
args = ExceptHookArgs([*sys_exc_info(), thread])
hook(args)
except Exception as exc:
exc.__suppress_context__ = True
del exc
if local_sys is not None and local_sys.stderr is not None:
stderr = local_sys.stderr
else:
stderr = thread._stderr
local_print("Exception in threading.excepthook:",
file=stderr, flush=True)
if local_sys is not None and local_sys.excepthook is not None:
sys_excepthook = local_sys.excepthook
else:
sys_excepthook = old_sys_excepthook
sys_excepthook(*sys_exc_info())
finally:
# Break reference cycle (exception stored in a variable)
args = None
return invoke_excepthook
# The timer class was contributed by Itamar Shtull-Trauring
class Timer(Thread):
"""Call a function after a specified number of seconds:
t = Timer(30.0, f, args=None, kwargs=None)
t.start()
t.cancel() # stop the timer's action if it's still waiting
"""
def __init__(self, interval, function, args=None, kwargs=None):
Thread.__init__(self)
self.interval = interval
self.function = function
self.args = args if args is not None else []
self.kwargs = kwargs if kwargs is not None else {}
self.finished = Event()
def cancel(self):
"""Stop the timer if it hasn't finished yet."""
self.finished.set()
def run(self):
self.finished.wait(self.interval)
if not self.finished.is_set():
self.function(*self.args, **self.kwargs)
self.finished.set()
# Special thread class to represent the main thread
class _MainThread(Thread):
def __init__(self):
Thread.__init__(self, name="MainThread", daemon=False)
self._set_tstate_lock()
self._started.set()
self._set_ident()
if _HAVE_THREAD_NATIVE_ID:
self._set_native_id()
with _active_limbo_lock:
_active[self._ident] = self
# Dummy thread class to represent threads not started here.
# These aren't garbage collected when they die, nor can they be waited for.
# If they invoke anything in threading.py that calls current_thread(), they
# leave an entry in the _active dict forever after.
# Their purpose is to return *something* from current_thread().
# They are marked as daemon threads so we won't wait for them
# when we exit (conform previous semantics).
class _DummyThread(Thread):
def __init__(self):
Thread.__init__(self, name=_newname("Dummy-%d"), daemon=True)
self._started.set()
self._set_ident()
if _HAVE_THREAD_NATIVE_ID:
self._set_native_id()
with _active_limbo_lock:
_active[self._ident] = self
def _stop(self):
pass
def is_alive(self):
assert not self._is_stopped and self._started.is_set()
return True
def join(self, timeout=None):
assert False, "cannot join a dummy thread"
# Global API functions
def current_thread():
"""Return the current Thread object, corresponding to the caller's thread of control.
If the caller's thread of control was not created through the threading
module, a dummy thread object with limited functionality is returned.
"""
try:
return _active[get_ident()]
except KeyError:
return _DummyThread()
currentThread = current_thread
def active_count():
"""Return the number of Thread objects currently alive.
The returned count is equal to the length of the list returned by
enumerate().
"""
with _active_limbo_lock:
return len(_active) + len(_limbo)
activeCount = active_count
def _enumerate():
# Same as enumerate(), but without the lock. Internal use only.
return list(_active.values()) + list(_limbo.values())
def enumerate():
"""Return a list of all Thread objects currently alive.
The list includes daemonic threads, dummy thread objects created by
current_thread(), and the main thread. It excludes terminated threads and
threads that have not yet been started.
"""
with _active_limbo_lock:
return list(_active.values()) + list(_limbo.values())
_threading_atexits = []
_SHUTTING_DOWN = False
def _register_atexit(func, *arg, **kwargs):
"""CPython internal: register *func* to be called before joining threads.
The registered *func* is called with its arguments just before all
non-daemon threads are joined in `_shutdown()`. It provides a similar
purpose to `atexit.register()`, but its functions are called prior to
threading shutdown instead of interpreter shutdown.
For similarity to atexit, the registered functions are called in reverse.
"""
if _SHUTTING_DOWN:
raise RuntimeError("can't register atexit after shutdown")
call = functools.partial(func, *arg, **kwargs)
_threading_atexits.append(call)
from _thread import stack_size
# Create the main thread object,
# and make it available for the interpreter
# (Py_Main) as threading._shutdown.
_main_thread = _MainThread()
def _shutdown():
"""
Wait until the Python thread state of all non-daemon threads get deleted.
"""
# Obscure: other threads may be waiting to join _main_thread. That's
# dubious, but some code does it. We can't wait for C code to release
# the main thread's tstate_lock - that won't happen until the interpreter
# is nearly dead. So we release it here. Note that just calling _stop()
# isn't enough: other threads may already be waiting on _tstate_lock.
if _main_thread._is_stopped:
# _shutdown() was already called
return
global _SHUTTING_DOWN
_SHUTTING_DOWN = True
# Main thread
tlock = _main_thread._tstate_lock
# The main thread isn't finished yet, so its thread state lock can't have
# been released.
assert tlock is not None
assert tlock.locked()
tlock.release()
_main_thread._stop()
# Call registered threading atexit functions before threads are joined.
# Order is reversed, similar to atexit.
for atexit_call in reversed(_threading_atexits):
atexit_call()
# Join all non-deamon threads
while True:
with _shutdown_locks_lock:
locks = list(_shutdown_locks)
_shutdown_locks.clear()
if not locks:
break
for lock in locks:
# mimick Thread.join()
lock.acquire()
lock.release()
# new threads can be spawned while we were waiting for the other
# threads to complete
def main_thread():
"""Return the main thread object.
In normal conditions, the main thread is the thread from which the
Python interpreter was started.
"""
return _main_thread
# get thread-local implementation, either from the thread
# module, or from the python fallback
try:
from _thread import _local as local
except ImportError:
from _threading_local import local
def _after_fork():
"""
Cleanup threading module state that should not exist after a fork.
"""
# Reset _active_limbo_lock, in case we forked while the lock was held
# by another (non-forked) thread. http://bugs.python.org/issue874900
global _active_limbo_lock, _main_thread
global _shutdown_locks_lock, _shutdown_locks
_active_limbo_lock = _allocate_lock()
# fork() only copied the current thread; clear references to others.
new_active = {}
try:
current = _active[get_ident()]
except KeyError:
# fork() was called in a thread which was not spawned
# by threading.Thread. For example, a thread spawned
# by thread.start_new_thread().
current = _MainThread()
_main_thread = current
# reset _shutdown() locks: threads re-register their _tstate_lock below
_shutdown_locks_lock = _allocate_lock()
_shutdown_locks = set()
with _active_limbo_lock:
# Dangling thread instances must still have their locks reset,
# because someone may join() them.
threads = set(_enumerate())
threads.update(_dangling)
for thread in threads:
# Any lock/condition variable may be currently locked or in an
# invalid state, so we reinitialize them.
if thread is current:
# There is only one active thread. We reset the ident to
# its new value since it can have changed.
thread._reset_internal_locks(True)
ident = get_ident()
thread._ident = ident
new_active[ident] = thread
else:
# All the others are already stopped.
thread._reset_internal_locks(False)
thread._stop()
_limbo.clear()
_active.clear()
_active.update(new_active)
assert len(_active) == 1
if hasattr(_os, "register_at_fork"):
_os.register_at_fork(after_in_child=_after_fork)
| bsd-3-clause | -4,337,143,701,189,812,000 | 33.601198 | 92 | 0.61086 | false |
darbula/django-form-designer | form_designer/migrations/0002_auto_20160216_1527.py | 1 | 1389 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('form_designer', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='formdefinition',
name='form_template_name',
field=models.CharField(blank=True, max_length=255, null=True, verbose_name='form template', choices=[(b'', 'Default'), (b'html/formdefinition/forms/as_p.html', 'as paragraphs'), (b'html/formdefinition/forms/cisco.html', 'Cisco')]),
preserve_default=True,
),
migrations.AlterField(
model_name='formdefinitionfield',
name='field_class',
field=models.CharField(max_length=100, verbose_name='field class', choices=[(b'riteh.core.form_designer.fields.TitleField', 'Title Field'), (b'django.forms.CharField', 'Text'), (b'django.forms.EmailField', 'E-mail address'), (b'django.forms.URLField', 'Web address'), (b'django.forms.IntegerField', 'Number'), (b'django.forms.DecimalField', 'Decimal number'), (b'django.forms.BooleanField', 'Yes/No'), (b'django.forms.DateField', 'Date'), (b'django.forms.ChoiceField', 'Choice'), (b'django.forms.MultipleChoiceField', 'Multiple Choice'), (b'django.forms.FileField', 'File')]),
preserve_default=True,
),
]
| bsd-3-clause | 1,623,216,130,215,411,700 | 52.423077 | 588 | 0.648668 | false |
schlegelp/pymaid | setup.py | 1 | 1845 | from setuptools import setup, find_packages
import re
VERSIONFILE = "pymaid/__init__.py"
verstrline = open(VERSIONFILE, "rt").read()
VSRE = r"^__version__ = ['\"]([^'\"]*)['\"]"
mo = re.search(VSRE, verstrline, re.M)
if mo:
verstr = mo.group(1)
else:
raise RuntimeError("Unable to find version string in %s." % (VERSIONFILE,))
with open('requirements.txt') as f:
requirements = f.read().splitlines()
requirements = [l for l in requirements if not l.startswith('#')]
setup(
name='python-catmaid',
version=verstr,
packages=find_packages(),
license='GNU GPL V3',
description='Python interface to CATMAID servers',
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
url='https://github.com/schlegelp/pymaid',
project_urls={
"Documentation": "http://pymaid.readthedocs.io",
"Source": "https://github.com/schlegelp/pymaid",
"Changelog": "https://pymaid.readthedocs.io/en/latest/source/whats_new.html",
},
author='Philipp Schlegel',
author_email='[email protected]',
keywords='CATMAID interface neuron navis',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
install_requires=requirements,
extras_require={'extras': ['fuzzywuzzy[speedup]~=0.17.0',
'ujson~=1.35']},
python_requires='>=3.6',
zip_safe=False
)
| gpl-3.0 | 5,979,263,114,792,834,000 | 33.166667 | 85 | 0.6271 | false |
Hawk94/dust | docs/conf.py | 1 | 7737 | # dust documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'dust'
copyright = """2017, Tom Miller"""
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'dustdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'dust.tex',
'dust Documentation',
"""Tom Miller""", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'dust', 'dust Documentation',
["""Tom Miller"""], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'dust', 'dust Documentation',
"""Tom Miller""", 'dust',
"""A short description of the project.""", 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
| mit | 5,812,458,023,415,810,000 | 30.839506 | 80 | 0.693421 | false |
braddockcg/internet-in-a-box | iiab/whoosh_search.py | 1 | 3339 | import os
from whoosh.qparser import MultifieldParser
from whoosh import scoring
from .whoosh_multi_field_spelling_correction import MultiFieldQueryCorrector
import pagination_helper
def index_directory_path(base_path, zim_name):
"""Returns the directory where a ZIM file's index should be located, given
a base path where all the index files are located as well as a filename
or partial filename of the zim file.
"""
index_dir = os.path.join(base_path, os.path.splitext(os.path.basename(zim_name))[0])
return index_dir
def get_query_corrections(searcher, query, qstring):
"""
Suggest alternate spelling for search terms by searching each column with
spelling correction support in turn.
:param searcher: whoosh searcher object
:param query: whoosh query object
:param qstring: search string that was passed to the query object
:returns: MultiFieldQueryCorrector with one corrector for each corrected column
"""
fieldnames = [name for name, field in searcher.schema.items() if field.spelling]
correctors = {}
for fieldname in fieldnames:
if fieldname not in correctors:
correctors[fieldname] = searcher.corrector(fieldname)
terms = []
for token in query.all_tokens():
if token.fieldname in correctors:
terms.append((token.fieldname, token.text))
return MultiFieldQueryCorrector(correctors, terms, prefix=2, maxdist=1).correct_query(query, qstring)
def deduplicate_corrections(corrections):
"""
Return list of correction that omits entries where the query is unmodified
:param corrections: list of Corrector objects
:returns: list of Corrector objects
"""
# Using values from a dictionary comprehension rather than a list comprehension in order to deduplicate
#return {c.string : c for c in corrections if c.original_query != c.query}.values()
# We can't use dictionary comprehension because we are stuck on python 2.6 for Debian stable
return dict((c.string, c) for c in corrections if c.original_query != c.query).values()
def paginated_search(ix, search_columns, query_text, page=1, pagelen=20, sort_column=None, weighting=scoring.BM25F):
"""
Return a tuple consisting of an object that emulates an SQLAlchemy pagination object and corrected query suggestion
pagelen specifies number of hits per page
page specifies page of results (first page is 1)
"""
query_text = unicode(query_text) # Must be unicode
with ix.searcher(weighting=weighting) as searcher:
query = MultifieldParser(search_columns, ix.schema).parse(query_text)
try:
# search_page returns whoosh.searching.ResultsPage
results = searcher.search_page(query, page, pagelen=pagelen, sortedby=sort_column)
total = results.total
except ValueError: # Invalid page number
results = []
total = 0
paginate = pagination_helper.Pagination(page, pagelen, total, [dict(r.items()) for r in results])
corrections = deduplicate_corrections(get_query_corrections(searcher, query, query_text)) # list of Corrector objects
#hf = whoosh.highlight.HtmlFormatter(classname="change")
#html = corrections.format_string(hf)
return (paginate, [c.string for c in corrections])
| bsd-2-clause | -3,125,193,210,656,872,000 | 43.52 | 126 | 0.712788 | false |
copotron/car-control | datacollection/prius/log.py | 1 | 1840 | # Copyright (C) 2017 Swift Navigation Inc.
# Contact: Swift Navigation <[email protected]>
#
# This source is subject to the license found in the file 'LICENSE' which must
# be be distributed together with this source. All other rights reserved.
#
# THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND,
# EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A PARTICULAR PURPOSE.
"""
the :mod:`sbp.client.examples.simple` module contains a basic example of
reading SBP messages from a serial port, decoding BASELINE_NED messages and
printing them out.
"""
import argparse
from sbp.client.drivers.network_drivers import TCPDriver
from sbp.client import Handler, Framer
from sbp.navigation import SBP_MSG_BASELINE_NED, SBP_MSG_POS_LLH
def main():
parser = argparse.ArgumentParser(
description="Swift Navigation SBP Example.")
parser.add_argument(
"-a",
"--host",
default='localhost',
help="specify the host address.")
parser.add_argument(
"-p",
"--port",
default=55555,
help="specify the port to use.")
args = parser.parse_args()
# Open a connection to Piksi using TCP
with TCPDriver(args.host, args.port) as driver:
with Handler(Framer(driver.read, None, verbose=True)) as source:
try:
for msg, metadata in source.filter(SBP_MSG_POS_LLH):
# Print out the N, E, D coordinates of the baseline
print("%d,%.16f,%.16f,%.16f,%d,%d,%d,%d" % (msg.tow, msg.lat, msg.lon,
msg.height, msg.h_accuracy, msg.v_accuracy, msg.n_sats, msg.flags))
except KeyboardInterrupt:
pass
if __name__ == "__main__":
main()
| gpl-3.0 | -1,142,508,516,065,610,600 | 35.8 | 113 | 0.643478 | false |
PDXCodeCoop/account | models.py | 1 | 1688 | from django.db import models
from django import forms
from django.contrib.auth.models import User
from django.utils import timezone
from easy_thumbnails.fields import ThumbnailerImageField
#A User's Personal Info
class Profile(models.Model):
user = models.OneToOneField(User)
photo = ThumbnailerImageField(upload_to='profiles', blank=True)
#Location`
city = models.CharField(max_length=50, null=True, blank=True)
state = models.CharField(max_length=50, null=True, blank=True)
#Personal Info
about = models.TextField(max_length=1000, null=True, blank=True)
def __unicode__(self):
return '%s: %s %s' % (self.user.username,self.user.first_name, self.user.last_name)
class Setting(models.Model):
PALETTE_THEMES = (
('DARK', 'Dark Theme'),
('LITE', 'Light Theme'),
)
user = models.OneToOneField(User)
color_palette = models.CharField(max_length=4, choices=PALETTE_THEMES, default='DARK')
def __unicode__(self):
return self.user.username
#Forms
class UserForm(forms.ModelForm):
confirm_password = forms.CharField(widget=forms.PasswordInput())
class Meta:
model = User
widgets = {
'password': forms.PasswordInput(),
}
fields = ('username', 'email', 'password')
class ProfileForm(forms.ModelForm):
class Meta:
model = Profile
fields = ('about', 'city', 'state',)
widgets = {
'about': forms.Textarea(attrs={'class':'form-control', 'rows':'5'}),
'city': forms.TextInput(attrs={'class':'form-control'}),
'state': forms.TextInput(attrs={'class':'form-control'}),
}
| apache-2.0 | 601,757,445,029,764,600 | 33.44898 | 91 | 0.637441 | false |
Arkshine/AdminFreeLook | support/generate_headers.py | 1 | 2611 | # vim: set ts=8 sts=2 sw=2 tw=99 et:
import re
import os, sys
import subprocess
argv = sys.argv[1:]
if len(argv) < 2:
sys.stderr.write('Usage: generate_headers.py <source_path> <output_folder>\n')
sys.exit(1)
SourceFolder = os.path.abspath(os.path.normpath(argv[0]))
OutputFolder = os.path.normpath(argv[1])
class FolderChanger:
def __init__(self, folder):
self.old = os.getcwd()
self.new = folder
def __enter__(self):
if self.new:
os.chdir(self.new)
def __exit__(self, type, value, traceback):
os.chdir(self.old)
def run_and_return(argv):
# Python 2.6 doesn't have check_output.
if hasattr(subprocess, 'check_output'):
text = subprocess.check_output(argv)
if str != bytes:
text = str(text, 'utf-8')
else:
p = subprocess.Popen(argv, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, ignored = p.communicate()
rval = p.poll()
if rval:
raise subprocess.CalledProcessError(rval, argv)
text = output.decode('utf8')
return text.strip()
def get_git_version():
revision_count = run_and_return(['git', 'rev-list', '--count', 'HEAD'])
revision_hash = run_and_return(['git', 'log', '--pretty=format:%h:%H', '-n', '1'])
shorthash, longhash = revision_hash.split(':')
return revision_count, shorthash, longhash
def output_version_headers():
with FolderChanger(SourceFolder):
count, shorthash, longhash = get_git_version()
with open(os.path.join(SourceFolder, 'product.version')) as fp:
contents = fp.read()
m = re.match('(\d+)\.(\d+)\.(\d+)-?(.*)', contents)
if m == None:
raise Exception('Could not detremine product version')
major, minor, release, tag = m.groups()
product = "{0}.{1}.{2}-rev.{3}".format(major, minor, release, count)
fullstring = product
if tag != "":
fullstring += "-{0}".format(tag)
with open(os.path.join(OutputFolder, 'module_version_auto.h'), 'w') as fp:
fp.write("""
#ifndef _EXTENSION_AUTO_VERSION_INFORMATION_H_
#define _EXTENSION_AUTO_VERSION_INFORMATION_H_
#define EXTENSION_BUILD_TAG \"{0}\"
#define EXTENSION_BUILD_CSET \"{1}\"
#define EXTENSION_BUILD_MAJOR \"{2}\"
#define EXTENSION_BUILD_MINOR \"{3}\"
#define EXTENSION_BUILD_RELEASE \"{4}\"
#define EXTENSION_BUILD_LOCAL_REV \"{6}\"
#define EXTENSION_BUILD_UNIQUEID EXTENSION_BUILD_LOCAL_REV \":\" EXTENSION_BUILD_CSET
#define EXTENSION_VERSION_STRING \"{5}\"
#define EXTENSION_VERSION_FILE {2},{3},{4},{6}
#endif // _EXTENSION_AUTO_VERSION_INFORMATION_H_
""".format(tag, shorthash, major, minor, release, fullstring, count))
output_version_headers()
| gpl-2.0 | -469,546,346,124,041,700 | 30.457831 | 87 | 0.654538 | false |
genonfire/bbgo | recipes/migrations/0001_initial.py | 1 | 1108 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=16)),
('order', models.IntegerField(default=1)),
],
),
migrations.CreateModel(
name='Recipe',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=32)),
('order', models.IntegerField(default=1)),
('recipe', models.TextField()),
('image', models.ImageField(upload_to=b'recipes/', blank=True)),
('category', models.ForeignKey(to='recipes.Category', on_delete=models.CASCADE)),
],
),
]
| mit | 5,277,985,640,252,880,000 | 33.625 | 114 | 0.544224 | false |
anarcoder/google_explorer | plugins/apache_rce_struts2_cve_2017_5638.py | 1 | 3545 | import os
from lxml import html as lh
from queue import Queue
from urllib.parse import urlparse
from threading import Thread
import requests
import threading
from requests import get
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
lock = threading.Lock()
class ApacheStruts2_CVE_2017_5638():
def __init__(self, filename):
self.filename = filename
self.urls = self.ap_cve()
@staticmethod
def banner():
os.system('clear')
print("\n")
print(" █████╗ ███╗ ██╗ █████╗ ██████╗ ██████╗ ██████╗ ██████╗ ███████╗██████╗ ")
print("██╔══██╗████╗ ██║██╔══██╗██╔══██╗██╔════╝██╔═══██╗██╔══██╗██╔════╝██╔══██╗")
print("███████║██╔██╗ ██║███████║██████╔╝██║ ██║ ██║██║ ██║█████╗ ██████╔╝")
print("██╔══██║██║╚██╗██║██╔══██║██╔══██╗██║ ██║ ██║██║ ██║██╔══╝ ██╔══██╗")
print("██║ ██║██║ ╚████║██║ ██║██║ ██║╚██████╗╚██████╔╝██████╔╝███████╗██║ ██║")
print("╚═╝ ╚═╝╚═╝ ╚═══╝╚═╝ ╚═╝╚═╝ ╚═╝ ╚═════╝ ╚═════╝ ╚═════╝ ╚══════╝╚═╝ ╚═╝")
print(" Apache Struts2 CVE 2017 5638 Checker - anarcoder at protonmail.com\n")
def remove_duplicate_targets(self):
results = [line.rstrip('\n') for line in open(self.filename)]
url_lists = []
for url in results:
try:
vuln = ['.action', '.do']
for v in vuln:
if v in url:
urlp = url.split(v)[0]
url_lists.append('python2 exploits/struntsrce.py --target='+urlp+v+' --test')
except:
pass
url_lists = set(url_lists)
url_lists = list(url_lists)
return url_lists
def check_vuln(self, q):
while True:
#with lock:
url = q.get()
os.system(url)
q.task_done()
def ap_cve(self):
self.banner()
# Removing duplicate targets
url_lists = self.remove_duplicate_targets()
print(len(url_lists))
#for url in url_lists:
# print(url.rstrip())
# My Queue
q = Queue(maxsize=0)
# Number of threads
num_threads = 10
for url in url_lists:
q.put(url)
# My threads
print('[*] Starting evil threads =)...\n')
for i in range(num_threads):
worker = Thread(target=self.check_vuln, args=(q,))
worker.setDaemon(True)
worker.start()
q.join()
def main():
filename = 'results_google_search.txt'
#print(os.getcwd())
ApacheStruts2_CVE_2017_5638(filename)
if __name__ == '__main__':
main()
| mit | -3,332,205,755,041,157,000 | 29.450549 | 101 | 0.452183 | false |
ChristosChristofidis/h2o-3 | scripts/run.py | 1 | 66029 | #!/usr/bin/python
import sys
import os
import shutil
import signal
import time
import random
import getpass
import re
import subprocess
import ConfigParser
def is_python_test_file(file_name):
"""
Return True if file_name matches a regexp for a python test. False otherwise.
"""
if (file_name == "test_config.py"):
return False
if re.match("^pyunit.*\.py$", file_name):
return True
if (re.match("^test.*\.py$", file_name)):
return True
return False
def is_python_file(file_name):
"""
Return True if file_name matches a regexp for a python program in general. False otherwise.
This is a separate function because it's useful to have the scan-for-test operation in
build_test_list() be separated from running the test.
That allows us to run things explicitly named using the --test option. Such as:
run.py --wipeall --numclouds 1 --test generate_rest_api_docs.py
"""
if (file_name == "test_config.py"):
return False
if (re.match("^.*\.py$", file_name)):
return True
return False
def is_javascript_test_file(file_name):
"""
Return True if file_name matches a regexp for a javascript test. False otherwise.
"""
if (re.match("^.*test.*\.js$", file_name)):
return True
return False
def is_runit_test_file(file_name):
"""
Return True if file_name matches a regexp for a R test. False otherwise.
"""
if (file_name == "h2o-runit.R"):
return False
if (re.match("^runit.*\.[rR]$", file_name)):
return True
return False
class H2OUseCloudNode:
"""
A class representing one node in an H2O cloud which was specified by the user.
Don't try to build or tear down this kind of node.
use_ip: The given ip of the cloud.
use_port: The given port of the cloud.
"""
def __init__(self, use_ip, use_port):
self.use_ip = use_ip
self.use_port = use_port
def start(self):
pass
def stop(self):
pass
def terminate(self):
pass
def get_ip(self):
return self.use_ip
def get_port(self):
return self.use_port
class H2OUseCloud:
"""
A class representing an H2O clouds which was specified by the user.
Don't try to build or tear down this kind of cloud.
"""
def __init__(self, cloud_num, use_ip, use_port):
self.cloud_num = cloud_num
self.use_ip = use_ip
self.use_port = use_port
self.nodes = []
node = H2OUseCloudNode(self.use_ip, self.use_port)
self.nodes.append(node)
def start(self):
pass
def wait_for_cloud_to_be_up(self):
pass
def stop(self):
pass
def terminate(self):
pass
def get_ip(self):
node = self.nodes[0]
return node.get_ip()
def get_port(self):
node = self.nodes[0]
return node.get_port()
class H2OCloudNode:
"""
A class representing one node in an H2O cloud.
Note that the base_port is only a request for H2O.
H2O may choose to ignore our request and pick any port it likes.
So we have to scrape the real port number from stdout as part of cloud startup.
port: The actual port chosen at run time.
pid: The process id of the node.
output_file_name: Where stdout and stderr go. They are merged.
child: subprocess.Popen object.
terminated: Only from a signal. Not normal shutdown.
"""
def __init__(self, is_client, cloud_num, nodes_per_cloud, node_num, cloud_name, h2o_jar, ip, base_port,
xmx, output_dir):
"""
Create a node in a cloud.
@param is_client: Whether this node is an H2O client node (vs a worker node) or not.
@param cloud_num: Dense 0-based cloud index number.
@param nodes_per_cloud: How many H2O java instances are in a cloud. Clouds are symmetric.
@param node_num: This node's dense 0-based node index number.
@param cloud_name: The H2O -name command-line argument.
@param h2o_jar: Path to H2O jar file.
@param base_port: The starting port number we are trying to get our nodes to listen on.
@param xmx: Java memory parameter.
@param output_dir: The directory where we can create an output file for this process.
@return: The node object.
"""
self.is_client = is_client
self.cloud_num = cloud_num
self.nodes_per_cloud = nodes_per_cloud
self.node_num = node_num
self.cloud_name = cloud_name
self.h2o_jar = h2o_jar
self.ip = ip
self.base_port = base_port
self.xmx = xmx
self.output_dir = output_dir
self.port = -1
self.pid = -1
self.output_file_name = ""
self.child = None
self.terminated = False
# Choose my base port number here. All math is done here. Every node has the same
# base_port and calculates it's own my_base_port.
ports_per_node = 2
self.my_base_port = \
self.base_port + \
(self.cloud_num * self.nodes_per_cloud * ports_per_node) + \
(self.node_num * ports_per_node)
def start(self):
"""
Start one node of H2O.
(Stash away the self.child and self.pid internally here.)
@return: none
"""
# there is no hdfs currently in ec2, except s3n/hdfs
# the core-site.xml provides s3n info
# it's possible that we can just always hardware the hdfs version
# to match the cdh3 cluster we're hard-wiring tests to
# i.e. it won't make s3n/s3 break on ec2
if (self.is_client):
main_class = "water.H2OClientApp"
else:
main_class = "water.H2OApp"
if "JAVA_HOME" in os.environ:
java = os.environ["JAVA_HOME"] + "/bin/java"
else:
java = "java"
cmd = [java,
# "-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=5005",
"-Xmx" + self.xmx,
"-ea",
"-cp", self.h2o_jar,
main_class,
"-name", self.cloud_name,
"-baseport", str(self.my_base_port),
"-ga_opt_out"]
# Add S3N credentials to cmd if they exist.
# ec2_hdfs_config_file_name = os.path.expanduser("~/.ec2/core-site.xml")
# if (os.path.exists(ec2_hdfs_config_file_name)):
# cmd.append("-hdfs_config")
# cmd.append(ec2_hdfs_config_file_name)
self.output_file_name = \
os.path.join(self.output_dir, "java_" + str(self.cloud_num) + "_" + str(self.node_num) + ".out.txt")
f = open(self.output_file_name, "w")
if g_convenient:
cwd = os.getcwd()
here = os.path.abspath(os.path.dirname(__file__))
there = os.path.abspath(os.path.join(here, ".."))
os.chdir(there)
self.child = subprocess.Popen(args=cmd,
stdout=f,
stderr=subprocess.STDOUT,
cwd=there)
os.chdir(cwd)
else:
self.child = subprocess.Popen(args=cmd,
stdout=f,
stderr=subprocess.STDOUT,
cwd=self.output_dir)
self.pid = self.child.pid
print("+ CMD: " + ' '.join(cmd))
def scrape_port_from_stdout(self):
"""
Look at the stdout log and figure out which port the JVM chose.
Write this to self.port.
This call is blocking.
Exit if this fails.
@return: none
"""
retries = 30
while (retries > 0):
if (self.terminated):
return
f = open(self.output_file_name, "r")
s = f.readline()
while (len(s) > 0):
if (self.terminated):
return
match_groups = re.search(r"Listening for HTTP and REST traffic on http.*://(\S+):(\d+)", s)
if (match_groups is not None):
port = match_groups.group(2)
if (port is not None):
self.port = port
f.close()
print("H2O Cloud {} Node {} started with output file {}".format(self.cloud_num,
self.node_num,
self.output_file_name))
return
s = f.readline()
f.close()
retries -= 1
if (self.terminated):
return
time.sleep(1)
print("")
print("ERROR: Too many retries starting cloud.")
print("")
sys.exit(1)
def scrape_cloudsize_from_stdout(self, nodes_per_cloud):
"""
Look at the stdout log and wait until the cloud of proper size is formed.
This call is blocking.
Exit if this fails.
@return: none
"""
retries = 60
while (retries > 0):
if (self.terminated):
return
f = open(self.output_file_name, "r")
s = f.readline()
while (len(s) > 0):
if (self.terminated):
return
match_groups = re.search(r"Cloud of size (\d+) formed", s)
if (match_groups is not None):
size = match_groups.group(1)
if (size is not None):
size = int(size)
if (size == nodes_per_cloud):
f.close()
return
s = f.readline()
f.close()
retries -= 1
if (self.terminated):
return
time.sleep(1)
print("")
print("ERROR: Too many retries starting cloud.")
print("")
sys.exit(1)
def stop(self):
"""
Normal node shutdown.
Ignore failures for now.
@return: none
"""
if (self.pid > 0):
print("Killing JVM with PID {}".format(self.pid))
try:
self.child.terminate()
self.child.wait()
except OSError:
pass
self.pid = -1
def terminate(self):
"""
Terminate a running node. (Due to a signal.)
@return: none
"""
self.terminated = True
self.stop()
def get_ip(self):
""" Return the ip address this node is really listening on. """
return self.ip
def get_port(self):
""" Return the port this node is really listening on. """
return self.port
def __str__(self):
s = ""
s += " node {}\n".format(self.node_num)
s += " xmx: {}\n".format(self.xmx)
s += " my_base_port: {}\n".format(self.my_base_port)
s += " port: {}\n".format(self.port)
s += " pid: {}\n".format(self.pid)
return s
class H2OCloud:
"""
A class representing one of the H2O clouds.
"""
def __init__(self, cloud_num, use_client, nodes_per_cloud, h2o_jar, base_port, xmx, output_dir):
"""
Create a cloud.
See node definition above for argument descriptions.
@return: The cloud object.
"""
self.use_client = use_client
self.cloud_num = cloud_num
self.nodes_per_cloud = nodes_per_cloud
self.h2o_jar = h2o_jar
self.base_port = base_port
self.xmx = xmx
self.output_dir = output_dir
# Randomly choose a seven digit cloud number.
n = random.randint(1000000, 9999999)
user = getpass.getuser()
user = ''.join(user.split())
self.cloud_name = "H2O_runit_{}_{}".format(user, n)
self.nodes = []
self.client_nodes = []
self.jobs_run = 0
if (use_client):
actual_nodes_per_cloud = self.nodes_per_cloud + 1
else:
actual_nodes_per_cloud = self.nodes_per_cloud
for node_num in range(actual_nodes_per_cloud):
is_client = False
if (use_client):
if (node_num == (actual_nodes_per_cloud - 1)):
is_client = True
node = H2OCloudNode(is_client,
self.cloud_num, actual_nodes_per_cloud, node_num,
self.cloud_name,
self.h2o_jar,
"127.0.0.1", self.base_port,
self.xmx, self.output_dir)
if (is_client):
self.client_nodes.append(node)
else:
self.nodes.append(node)
def start(self):
"""
Start H2O cloud.
The cloud is not up until wait_for_cloud_to_be_up() is called and returns.
@return: none
"""
for node in self.nodes:
node.start()
for node in self.client_nodes:
node.start()
def wait_for_cloud_to_be_up(self):
"""
Blocking call ensuring the cloud is available.
@return: none
"""
self._scrape_port_from_stdout()
self._scrape_cloudsize_from_stdout()
def stop(self):
"""
Normal cloud shutdown.
@return: none
"""
for node in self.nodes:
node.stop()
for node in self.client_nodes:
node.stop()
def terminate(self):
"""
Terminate a running cloud. (Due to a signal.)
@return: none
"""
for node in self.client_nodes:
node.terminate()
for node in self.nodes:
node.terminate()
def get_ip(self):
""" Return an ip to use to talk to this cloud. """
if (len(self.client_nodes) > 0):
node = self.client_nodes[0]
else:
node = self.nodes[0]
return node.get_ip()
def get_port(self):
""" Return a port to use to talk to this cloud. """
if (len(self.client_nodes) > 0):
node = self.client_nodes[0]
else:
node = self.nodes[0]
return node.get_port()
def _scrape_port_from_stdout(self):
for node in self.nodes:
node.scrape_port_from_stdout()
for node in self.client_nodes:
node.scrape_port_from_stdout()
def _scrape_cloudsize_from_stdout(self):
for node in self.nodes:
node.scrape_cloudsize_from_stdout(self.nodes_per_cloud)
for node in self.client_nodes:
node.scrape_cloudsize_from_stdout(self.nodes_per_cloud)
def __str__(self):
s = ""
s += "cloud {}\n".format(self.cloud_num)
s += " name: {}\n".format(self.cloud_name)
s += " jobs_run: {}\n".format(self.jobs_run)
for node in self.nodes:
s += str(node)
for node in self.client_nodes:
s += str(node)
return s
class Test:
"""
A class representing one Test.
cancelled: Don't start this test.
terminated: Test killed due to signal.
returncode: Exit code of child.
pid: Process id of the test.
ip: IP of cloud to run test.
port: Port of cloud to run test.
child: subprocess.Popen object.
"""
@staticmethod
def test_did_not_complete():
"""
returncode marker to know if the test ran or not.
"""
return -9999999
def __init__(self, test_dir, test_short_dir, test_name, output_dir):
"""
Create a Test.
@param test_dir: Full absolute path to the test directory.
@param test_short_dir: Path from h2o/R/tests to the test directory.
@param test_name: Test filename with the directory removed.
@param output_dir: The directory where we can create an output file for this process.
@return: The test object.
"""
self.test_dir = test_dir
self.test_short_dir = test_short_dir
self.test_name = test_name
self.output_dir = output_dir
self.output_file_name = ""
self.cancelled = False
self.terminated = False
self.returncode = Test.test_did_not_complete()
self.start_seconds = -1
self.pid = -1
self.ip = None
self.port = -1
self.child = None
def start(self, ip, port):
"""
Start the test in a non-blocking fashion.
@param ip: IP address of cloud to run on.
@param port: Port of cloud to run on.
@return: none
"""
if (self.cancelled or self.terminated):
return
self.start_seconds = time.time()
self.ip = ip
self.port = port
if (is_python_test_file(self.test_name)):
cmd = ["python",
self.test_name,
"--usecloud",
self.ip + ":" + str(self.port)]
elif (is_python_file(self.test_name)):
cmd = ["python",
self.test_name,
"--usecloud",
self.ip + ":" + str(self.port)]
elif (is_runit_test_file(self.test_name)):
cmd = ["R",
"-f",
self.test_name,
"--args",
self.ip + ":" + str(self.port)]
elif (is_javascript_test_file(self.test_name)):
cmd = ["phantomjs",
self.test_name,
"--host",
self.ip + ":" + str(self.port),
"--timeout",
str(g_phantomjs_to),
"--packs",
g_phantomjs_packs]
else:
print("")
print("ERROR: Test runner failure with test: " + self.test_name)
print("")
sys.exit(1)
test_short_dir_with_no_slashes = re.sub(r'[\\/]', "_", self.test_short_dir)
if (len(test_short_dir_with_no_slashes) > 0):
test_short_dir_with_no_slashes += "_"
self.output_file_name = \
os.path.join(self.output_dir, test_short_dir_with_no_slashes + self.test_name + ".out.txt")
f = open(self.output_file_name, "w")
self.child = subprocess.Popen(args=cmd,
stdout=f,
stderr=subprocess.STDOUT,
cwd=self.test_dir)
self.pid = self.child.pid
# print("+ CMD: " + ' '.join(cmd))
def is_completed(self):
"""
Check if test has completed.
This has side effects and MUST be called for the normal test queueing to work.
Specifically, child.poll().
@return: True if the test completed, False otherwise.
"""
child = self.child
if (child is None):
return False
child.poll()
if (child.returncode is None):
return False
self.pid = -1
self.returncode = child.returncode
return True
def cancel(self):
"""
Mark this test as cancelled so it never tries to start.
@return: none
"""
if (self.pid <= 0):
self.cancelled = True
def terminate_if_started(self):
"""
Terminate a running test. (Due to a signal.)
@return: none
"""
if (self.pid > 0):
self.terminate()
def terminate(self):
"""
Terminate a running test. (Due to a signal.)
@return: none
"""
self.terminated = True
if (self.pid > 0):
print("Killing Test {} with PID {}".format(os.path.join(self.test_short_dir, self.test_name), self.pid))
try:
self.child.terminate()
except OSError:
pass
self.pid = -1
def get_test_dir_file_name(self):
"""
@return: The full absolute path of this test.
"""
return os.path.join(self.test_dir, self.test_name)
def get_test_name(self):
"""
@return: The file name (no directory) of this test.
"""
return self.test_name
def get_seed_used(self):
"""
@return: The seed used by this test.
"""
return self._scrape_output_for_seed()
def get_ip(self):
"""
@return: IP of the cloud where this test ran.
"""
return self.ip
def get_port(self):
"""
@return: Integer port number of the cloud where this test ran.
"""
return int(self.port)
def get_passed(self):
"""
@return: True if the test passed, False otherwise.
"""
return (self.returncode == 0)
def get_nopass(self, nopass):
"""
Some tests are known not to fail and even if they don't pass we don't want
to fail the overall regression PASS/FAIL status.
@return: True if the test has been marked as NOPASS, False otherwise.
"""
a = re.compile("NOPASS")
return a.search(self.test_name) and not nopass
def get_nofeature(self, nopass):
"""
Some tests are known not to fail and even if they don't pass we don't want
to fail the overall regression PASS/FAIL status.
@return: True if the test has been marked as NOFEATURE, False otherwise.
"""
a = re.compile("NOFEATURE")
return a.search(self.test_name) and not nopass
def get_completed(self):
"""
@return: True if the test completed (pass or fail), False otherwise.
"""
return (self.returncode > Test.test_did_not_complete())
def get_terminated(self):
"""
For a test to be terminated it must have started and had a PID.
@return: True if the test was terminated, False otherwise.
"""
return self.terminated
def get_output_dir_file_name(self):
"""
@return: Full path to the output file which you can paste to a terminal window.
"""
return (os.path.join(self.output_dir, self.output_file_name))
def _scrape_output_for_seed(self):
"""
@return: The seed scraped from the output file.
"""
res = ""
with open(self.get_output_dir_file_name(), "r") as f:
for line in f:
if "SEED used" in line:
line = line.strip().split(' ')
res = line[-1]
break
return res
def __str__(self):
s = ""
s += "Test: {}/{}\n".format(self.test_dir, self.test_name)
return s
class TestRunner:
"""
A class for running tests.
The tests list contains an object for every test.
The tests_not_started list acts as a job queue.
The tests_running list is polled for jobs that have finished.
"""
def __init__(self,
test_root_dir,
use_cloud, use_cloud2, use_client, cloud_config, use_ip, use_port,
num_clouds, nodes_per_cloud, h2o_jar, base_port, xmx, output_dir,
failed_output_dir, path_to_tar, path_to_whl, produce_unit_reports, testreport_dir):
"""
Create a runner.
@param test_root_dir: h2o/R/tests directory.
@param use_cloud: Use this one user-specified cloud. Overrides num_clouds.
@param use_cloud2: Use the cloud_config to define the list of H2O clouds.
@param cloud_config: (if use_cloud2) the config file listing the H2O clouds.
@param use_ip: (if use_cloud) IP of one cloud to use.
@param use_port: (if use_cloud) Port of one cloud to use.
@param num_clouds: Number of H2O clouds to start.
@param nodes_per_cloud: Number of H2O nodes to start per cloud.
@param h2o_jar: Path to H2O jar file to run.
@param base_port: Base H2O port (e.g. 54321) to start choosing from.
@param xmx: Java -Xmx parameter.
@param output_dir: Directory for output files.
@param failed_output_dir: Directory to copy failed test output.
@param path_to_tar: NA
@param path_to_whl: NA
@param produce_unit_reports: if true then runner produce xUnit test reports for Jenkins
@param testreport_dir: directory to put xUnit test reports for Jenkins (should follow build system conventions)
@return: The runner object.
"""
self.test_root_dir = test_root_dir
self.use_cloud = use_cloud
self.use_cloud2 = use_cloud2
self.use_client = use_client
# Valid if use_cloud is True
self.use_ip = use_ip
self.use_port = use_port
# Valid if use_cloud is False
self.num_clouds = num_clouds
self.nodes_per_cloud = nodes_per_cloud
self.h2o_jar = h2o_jar
self.base_port = base_port
self.output_dir = output_dir
self.failed_output_dir = failed_output_dir
self.produce_unit_reports = produce_unit_reports
self.testreport_dir = testreport_dir
self.start_seconds = time.time()
self.terminated = False
self.clouds = []
self.tests = []
self.tests_not_started = []
self.tests_running = []
self.regression_passed = False
self._create_output_dir()
self._create_failed_output_dir()
if produce_unit_reports:
self._create_testreport_dir()
self.nopass_counter = 0
self.nofeature_counter = 0
self.path_to_tar = path_to_tar
self.path_to_whl = path_to_whl
if (use_cloud):
node_num = 0
cloud = H2OUseCloud(node_num, use_ip, use_port)
self.clouds.append(cloud)
elif (use_cloud2):
clouds = TestRunner.read_config(cloud_config)
node_num = 0
for c in clouds:
cloud = H2OUseCloud(node_num, c[0], c[1])
self.clouds.append(cloud)
node_num += 1
else:
for i in range(self.num_clouds):
cloud = H2OCloud(i, self.use_client, self.nodes_per_cloud, h2o_jar, self.base_port, xmx,
self.output_dir)
self.clouds.append(cloud)
@staticmethod
def find_test(test_to_run):
"""
Be nice and try to help find the test if possible.
If the test is actually found without looking, then just use it.
Otherwise, search from the script's down directory down.
"""
if (os.path.exists(test_to_run)):
abspath_test = os.path.abspath(test_to_run)
return abspath_test
for d, subdirs, files in os.walk(os.getcwd()):
for f in files:
if (f == test_to_run):
return os.path.join(d, f)
# Not found, return the file, which will result in an error downstream when it can't be found.
print("")
print("ERROR: Test does not exist: " + test_to_run)
print("")
sys.exit(1)
@staticmethod
def read_config(config_file):
clouds = [] # a list of lists. Inner lists have [node_num, ip, port]
cfg = ConfigParser.RawConfigParser()
cfg.read(config_file)
for s in cfg.sections():
items = cfg.items(s)
cloud = [items[0][1], int(items[1][1])]
clouds.append(cloud)
return clouds
def read_test_list_file(self, test_list_file):
"""
Read in a test list file line by line. Each line in the file is a test
to add to the test run.
@param test_list_file: Filesystem path to a file with a list of tests to run.
@return: none
"""
try:
f = open(test_list_file, "r")
s = f.readline()
while (len(s) != 0):
stripped = s.strip()
if (len(stripped) == 0):
s = f.readline()
continue
if (stripped.startswith("#")):
s = f.readline()
continue
found_stripped = TestRunner.find_test(stripped)
self.add_test(found_stripped)
s = f.readline()
f.close()
except IOError as e:
print("")
print("ERROR: Failure reading test list: " + test_list_file)
print(" (errno {0}): {1}".format(e.errno, e.strerror))
print("")
sys.exit(1)
def build_test_list(self, test_group, run_small, run_medium, run_large, run_xlarge, nopass):
"""
Recursively find the list of tests to run and store them in the object.
Fills in self.tests and self.tests_not_started.
@param test_group: Name of the test group of tests to run.
@return: none
"""
if (self.terminated):
return
for root, dirs, files in os.walk(self.test_root_dir):
if (root.endswith("Util")):
continue
for f in files:
# Figure out if the current file under consideration is a test.
is_test = False
if (is_python_test_file(f)):
is_test = True
if (is_runit_test_file(f)):
is_test = True
if (not is_test):
continue
is_small = False
is_medium = False
is_large = False
is_xlarge = False
is_nopass = False
is_nofeature = False
if "xlarge" in f:
is_xlarge = True
elif "medium" in f:
is_medium = True
elif "large" in f:
is_large = True
else:
is_small = True
if "NOPASS" in f:
is_nopass = True
if "NOFEATURE" in f:
is_nofeature = True
if is_small and not run_small:
continue
if is_medium and not run_medium:
continue
if is_large and not run_large:
continue
if is_xlarge and not run_xlarge:
continue
if is_nopass and not nopass:
# skip all NOPASS tests for regular runs but still count the number of NOPASS tests
self.nopass_counter += 1
continue
if is_nofeature and not nopass:
# skip all NOFEATURE tests for regular runs but still count the number of NOFEATURE tests
self.nofeature_counter += 1
continue
if nopass and not is_nopass and not is_nofeature:
# if g_nopass flag is set, then ONLY run the NOPASS and NOFEATURE tests (skip all other tests)
continue
if test_group is not None:
test_short_dir = self._calc_test_short_dir(os.path.join(root, f))
if (test_group.lower() not in test_short_dir) and test_group.lower() not in f:
continue
self.add_test(os.path.join(root, f))
def add_test(self, test_path):
"""
Add one test to the list of tests to run.
@param test_path: File system path to the test.
@return: none
"""
abs_test_path = os.path.abspath(test_path)
abs_test_dir = os.path.dirname(abs_test_path)
test_file = os.path.basename(abs_test_path)
if (not os.path.exists(abs_test_path)):
print("")
print("ERROR: Test does not exist: " + abs_test_path)
print("")
sys.exit(1)
test_short_dir = self._calc_test_short_dir(test_path)
test = Test(abs_test_dir, test_short_dir, test_file, self.output_dir)
self.tests.append(test)
self.tests_not_started.append(test)
def start_clouds(self):
"""
Start all H2O clouds.
@return: none
"""
if (self.terminated):
return
if (self.use_cloud):
return
print("")
print("Starting clouds...")
print("")
for cloud in self.clouds:
if (self.terminated):
return
cloud.start()
print("")
print("Waiting for H2O nodes to come up...")
print("")
for cloud in self.clouds:
if (self.terminated):
return
cloud.wait_for_cloud_to_be_up()
def run_tests(self, nopass):
"""
Run all tests.
@return: none
"""
if (self.terminated):
return
if (self._have_some_r_tests()):
self._log("")
self._log("Setting up R H2O package...")
out_file_name = os.path.join(self.output_dir, "runnerSetupPackage.out.txt")
out = open(out_file_name, "w")
runner_setup_package_r = None
if (True):
possible_utils_parent_dir = self.test_root_dir
while (True):
possible_utils_dir = os.path.join(possible_utils_parent_dir,
os.path.join("h2o-r",
os.path.join("tests", "Utils")))
possible_runner_setup_package_r = os.path.join(possible_utils_dir, "runnerSetupPackage.R")
if (os.path.exists(possible_runner_setup_package_r)):
runner_setup_package_r = possible_runner_setup_package_r
break
next_possible_utils_parent_dir = os.path.dirname(possible_utils_parent_dir)
if (next_possible_utils_parent_dir == possible_utils_parent_dir):
break
possible_utils_parent_dir = next_possible_utils_parent_dir
if (runner_setup_package_r is None):
print("")
print("ERROR: runnerSetupPackage.R not found.")
print("")
sys.exit(1)
cmd = ["R",
"--quiet",
"-f",
runner_setup_package_r]
if self.path_to_tar is not None:
print "Using R TAR located at: " + self.path_to_tar
cmd += ["--args", self.path_to_tar]
child = subprocess.Popen(args=cmd,
stdout=out,
stderr=subprocess.STDOUT)
rv = child.wait()
if (self.terminated):
return
if (rv != 0):
print("")
print("ERROR: " + runner_setup_package_r + " failed.")
print(" (See " + out_file_name + ")")
print("")
sys.exit(1)
out.close()
elif self._have_some_py_tests() and self.path_to_whl is not None:
# basically only do this if we have a whl to install
self._log("")
self._log("Setting up Python H2O package...")
out_file_name = os.path.join(self.output_dir, "pythonSetup.out.txt")
out = open(out_file_name, "w")
cmd = ["pip", "install", self.path_to_whl, "--force-reinstall"]
child = subprocess.Popen(args=cmd,
stdout=out,
stderr=subprocess.STDOUT)
rv = child.wait()
if (self.terminated):
return
if (rv != 0):
print("")
print("ERROR: Python setup failed.")
print(" (See " + out_file_name + ")")
print("")
sys.exit(1)
out.close()
num_tests = len(self.tests)
num_nodes = self.num_clouds * self.nodes_per_cloud
self._log("")
if (self.use_client):
client_message = " (+ client mode)"
else:
client_message = ""
if (self.use_cloud):
self._log("Starting {} tests...".format(num_tests))
elif (self.use_cloud2):
self._log("Starting {} tests on {} clouds...".format(num_tests, len(self.clouds)))
else:
self._log("Starting {} tests on {} clouds with {} total H2O worker nodes{}...".format(num_tests,
self.num_clouds,
num_nodes,
client_message))
self._log("")
# Start the first n tests, where n is the lesser of the total number of tests and the total number of clouds.
start_count = min(len(self.tests_not_started), len(self.clouds), 30)
if (g_use_cloud2):
start_count = min(start_count, 75) # only open up 30 processes locally
for i in range(start_count):
cloud = self.clouds[i]
ip = cloud.get_ip()
port = cloud.get_port()
self._start_next_test_on_ip_port(ip, port)
# As each test finishes, send a new one to the cloud that just freed up.
while (len(self.tests_not_started) > 0):
if (self.terminated):
return
completed_test = self._wait_for_one_test_to_complete()
if (self.terminated):
return
self._report_test_result(completed_test, nopass)
ip_of_completed_test = completed_test.get_ip()
port_of_completed_test = completed_test.get_port()
self._start_next_test_on_ip_port(ip_of_completed_test, port_of_completed_test)
# Wait for remaining running tests to complete.
while (len(self.tests_running) > 0):
if (self.terminated):
return
completed_test = self._wait_for_one_test_to_complete()
if (self.terminated):
return
self._report_test_result(completed_test, nopass)
def stop_clouds(self):
"""
Stop all H2O clouds.
@return: none
"""
if (self.terminated):
return
if (self.use_cloud or self.use_cloud2):
print("")
print("All tests completed...")
print("")
return
print("")
print("All tests completed; tearing down clouds...")
print("")
for cloud in self.clouds:
cloud.stop()
def report_summary(self, nopass):
"""
Report some summary information when the tests have finished running.
@return: none
"""
passed = 0
nopass_but_tolerate = 0
nofeature_but_tolerate = 0
failed = 0
notrun = 0
total = 0
true_fail_list = []
terminated_list = []
for test in self.tests:
if (test.get_passed()):
passed += 1
else:
if (test.get_nopass(nopass)):
nopass_but_tolerate += 1
if (test.get_nofeature(nopass)):
nofeature_but_tolerate += 1
if (test.get_completed()):
failed += 1
if (not test.get_nopass(nopass) and not test.get_nofeature(nopass)):
true_fail_list.append(test.get_test_name())
else:
notrun += 1
if (test.get_terminated()):
terminated_list.append(test.get_test_name())
total += 1
if ((passed + nopass_but_tolerate + nofeature_but_tolerate) == total):
self.regression_passed = True
else:
self.regression_passed = False
end_seconds = time.time()
delta_seconds = end_seconds - self.start_seconds
run = total - notrun
self._log("")
self._log("----------------------------------------------------------------------")
self._log("")
self._log("SUMMARY OF RESULTS")
self._log("")
self._log("----------------------------------------------------------------------")
self._log("")
self._log("Total tests: " + str(total))
self._log("Passed: " + str(passed))
self._log("Did not pass: " + str(failed))
self._log("Did not complete: " + str(notrun))
self._log("Tolerated NOPASS: " + str(nopass_but_tolerate))
self._log("Tolerated NOFEATURE: " + str(nofeature_but_tolerate))
self._log("NOPASS tests skipped: " + str(self.nopass_counter))
self._log("NOFEATURE tests skipped: " + str(self.nofeature_counter))
self._log("")
self._log("Total time: %.2f sec" % delta_seconds)
if (run > 0):
self._log("Time/completed test: %.2f sec" % (delta_seconds / run))
else:
self._log("Time/completed test: N/A")
self._log("")
if (len(true_fail_list) > 0):
self._log("True fail list: " + ", ".join(true_fail_list))
if (len(terminated_list) > 0):
self._log("Terminated list: " + ", ".join(terminated_list))
self._log("")
def terminate(self):
"""
Terminate all running clouds. (Due to a signal.)
@return: none
"""
self.terminated = True
for test in self.tests:
test.cancel()
for test in self.tests:
test.terminate_if_started()
for cloud in self.clouds:
cloud.terminate()
def get_regression_passed(self):
"""
Return whether the overall regression passed or not.
@return: true if the exit value should be 0, false otherwise.
"""
return self.regression_passed
# --------------------------------------------------------------------
# Private methods below this line.
# --------------------------------------------------------------------
def _calc_test_short_dir(self, test_path):
"""
Calculate directory of test relative to test_root_dir.
@param test_path: Path to test file.
@return: test_short_dir, relative directory containing test (relative to test_root_dir).
"""
abs_test_root_dir = os.path.abspath(self.test_root_dir)
abs_test_path = os.path.abspath(test_path)
abs_test_dir = os.path.dirname(abs_test_path)
test_short_dir = abs_test_dir
# Look to elide longest prefix first.
prefix = os.path.join(abs_test_root_dir, "")
if (test_short_dir.startswith(prefix)):
test_short_dir = test_short_dir.replace(prefix, "", 1)
prefix = abs_test_root_dir
if (test_short_dir.startswith(prefix)):
test_short_dir = test_short_dir.replace(prefix, "", 1)
return test_short_dir
def _have_some_r_tests(self):
"""
Do we have any R tests to run at all?
(There might be tests of a different language to run, even if there are no R tests.)
"""
for test in self.tests:
test_name = test.get_test_name()
if (is_runit_test_file(test_name)):
return True
return False
def _have_some_py_tests(self):
"""
dumb check for pyunits
"""
for test in self.tests:
test_name = test.get_test_name()
if is_python_test_file(test_name):
return True
return False
def _create_failed_output_dir(self):
try:
os.makedirs(self.failed_output_dir)
except OSError as e:
print("")
print("mkdir failed (errno {0}): {1}".format(e.errno, e.strerror))
print(" " + self.failed_output_dir)
print("")
print("(try adding --wipe)")
print("")
sys.exit(1)
def _create_output_dir(self):
try:
os.makedirs(self.output_dir)
except OSError as e:
print("")
print("mkdir failed (errno {0}): {1}".format(e.errno, e.strerror))
print(" " + self.output_dir)
print("")
print("(try adding --wipe)")
print("")
sys.exit(1)
def _create_testreport_dir(self):
try:
if not os.path.exists(self.testreport_dir):
os.makedirs(self.testreport_dir)
except OSError as e:
print("")
print("mkdir failed (errno {0}): {1}".format(e.errno, e.strerror))
print(" " + self.testreport_dir)
print("")
sys.exit(1)
def _start_next_test_on_ip_port(self, ip, port):
test = self.tests_not_started.pop(0)
self.tests_running.append(test)
test.start(ip, port)
def _wait_for_one_test_to_complete(self):
while (True):
for test in self.tests_running:
if (self.terminated):
return None
if (test.is_completed()):
self.tests_running.remove(test)
return test
if (self.terminated):
return
time.sleep(1)
def _report_test_result(self, test, nopass):
port = test.get_port()
now = time.time()
duration = now - test.start_seconds
test_name = test.get_test_name()
if (test.get_passed()):
s = "PASS %d %4ds %-60s" % (port, duration, test_name)
self._log(s)
if self.produce_unit_reports:
self._report_xunit_result("r_suite", test_name, duration, False)
else:
s = " FAIL %d %4ds %-60s %s %s" % \
(port, duration, test.get_test_name(), test.get_output_dir_file_name(), test.get_seed_used())
self._log(s)
f = self._get_failed_filehandle_for_appending()
f.write(test.get_test_dir_file_name() + "\n")
f.close()
# Report junit
if self.produce_unit_reports:
if not test.get_nopass(nopass):
self._report_xunit_result("r_suite", test_name, duration, False, "TestFailure", "Test failed",
"See {}".format(test.get_output_dir_file_name()))
else:
self._report_xunit_result("r_suite", test_name, duration, True)
# Copy failed test output into directory failed
if not test.get_nopass(nopass) and not test.get_nofeature(nopass):
shutil.copy(test.get_output_dir_file_name(), self.failed_output_dir)
# XSD schema for xunit reports is here; http://windyroad.com.au/dl/Open%20Source/JUnit.xsd
def _report_xunit_result(self, testsuite_name, testcase_name, testcase_runtime,
skipped=False, failure_type=None, failure_message=None, failure_description=None):
errors = 0
failures = 1 if failure_type else 0
skip = 1 if skipped else 0
failure = "" if not failure_type else """"<failure type="{}" message="{}">{}</failure>""" \
.format(failure_type, failure_message, failure_description)
xml_report = """<?xml version="1.0" encoding="UTF-8"?>
<testsuite name="{testsuiteName}" tests="1" errors="{errors}" failures="{failures}" skip="{skip}">
<testcase classname="{testcaseClassName}" name="{testcaseName}" time="{testcaseRuntime}">
{failure}
</testcase>
</testsuite>
""".format(testsuiteName=testsuite_name, testcaseClassName=testcase_name, testcaseName=testcase_name,
testcaseRuntime=testcase_runtime, failure=failure,
errors=errors, failures=failures, skip=skip)
self._save_xunit_report(testsuite_name, testcase_name, xml_report)
def _save_xunit_report(self, testsuite, testcase, report):
f = self._get_testreport_filehandle(testsuite, testcase)
f.write(report)
f.close()
def _log(self, s):
f = self._get_summary_filehandle_for_appending()
print(s)
sys.stdout.flush()
f.write(s + "\n")
f.close()
def _get_summary_filehandle_for_appending(self):
summary_file_name = os.path.join(self.output_dir, "summary.txt")
f = open(summary_file_name, "a+")
return f
def _get_failed_filehandle_for_appending(self):
summary_file_name = os.path.join(self.output_dir, "failed.txt")
f = open(summary_file_name, "a+")
return f
def _get_testreport_filehandle(self, testsuite, testcase):
testreport_file_name = os.path.join(self.testreport_dir, "TEST_{0}_{1}.xml".format(testsuite, testcase))
f = open(testreport_file_name, "w+")
return f
def __str__(self):
s = "\n"
s += "test_root_dir: {}\n".format(self.test_root_dir)
s += "output_dir: {}\n".format(self.output_dir)
s += "h2o_jar: {}\n".format(self.h2o_jar)
s += "num_clouds: {}\n".format(self.num_clouds)
s += "nodes_per_cloud: {}\n".format(self.nodes_per_cloud)
s += "base_port: {}\n".format(self.base_port)
s += "\n"
for c in self.clouds:
s += str(c)
s += "\n"
# for t in self.tests:
# s += str(t)
return s
# --------------------------------------------------------------------
# Main program
# --------------------------------------------------------------------
# Global variables that can be set by the user.
g_script_name = ""
g_base_port = 40000
g_num_clouds = 5
g_nodes_per_cloud = 1
g_wipe_test_state = False
g_wipe_output_dir = False
g_test_to_run = None
g_test_list_file = None
g_test_group = None
g_run_small = True
g_run_medium = True
g_run_large = True
g_run_xlarge = True
g_use_cloud = False
g_use_cloud2 = False
g_use_client = False
g_config = None
g_use_ip = None
g_use_port = None
g_no_run = False
g_jvm_xmx = "1g"
g_nopass = False
g_convenient = False
g_path_to_h2o_jar = None
g_path_to_tar = None
g_path_to_whl = None
g_produce_unit_reports = True
g_phantomjs_to = 3600
g_phantomjs_packs = "examples"
# Global variables that are set internally.
g_output_dir = None
g_runner = None
g_handling_signal = False
def use(x):
""" Hack to remove compiler warning. """
if False:
print(x)
def signal_handler(signum, stackframe):
global g_runner
global g_handling_signal
use(stackframe)
if (g_handling_signal):
# Don't do this recursively.
return
g_handling_signal = True
print("")
print("----------------------------------------------------------------------")
print("")
print("SIGNAL CAUGHT (" + str(signum) + "). TEARING DOWN CLOUDS.")
print("")
print("----------------------------------------------------------------------")
g_runner.terminate()
def usage():
print("")
print("Usage: " + g_script_name + " [...options...]")
print("")
print(" (Output dir is: " + g_output_dir + ")")
print(" (Default number of clouds is: " + str(g_num_clouds) + ")")
print("")
print(" --wipeall Remove all prior test state before starting, particularly")
print(" random seeds.")
print(" (Removes master_seed file and all Rsandbox directories.")
print(" Also wipes the output dir before starting.)")
print("")
print(" --wipe Wipes the output dir before starting. Keeps old random seeds.")
print("")
print(" --baseport The first port at which H2O starts searching for free ports.")
print("")
print(" --numclouds The number of clouds to start.")
print(" Each test is randomly assigned to a cloud.")
print("")
print(" --numnodes The number of nodes in the cloud.")
print(" When this is specified, numclouds must be 1.")
print("")
print(" --test If you only want to run one test, specify it like this.")
print("")
print(" --testlist A file containing a list of tests to run (for example the")
print(" 'failed.txt' file from the output directory).")
print("")
print(" --testgroup Test a group of tests by function:")
print(" pca, glm, kmeans, gbm, rf, deeplearning, algos, golden, munging")
print("")
print(" --testsize Sizes (and by extension length) of tests to run:")
print(" s=small (seconds), m=medium (a minute or two), l=large (longer), x=xlarge (very big)")
print(" (Default is to run all tests.)")
print("")
print(" --usecloud ip:port of cloud to send tests to instead of starting clouds.")
print(" (When this is specified, numclouds is ignored.)")
print("")
print(" --usecloud2 cloud.cfg: Use a set clouds defined in cloud.config to run tests on.")
print(" (When this is specified, numclouds, numnodes, and usecloud are ignored.)")
print("")
print(" --client Send REST API commands through client mode.")
print("")
print(" --norun Perform side effects like wipe, but don't actually run tests.")
print("")
print(" --jvm.xmx Configure size of launched JVM running H2O. E.g. '--jvm.xmx 3g'")
print("")
print(" --nopass Run the NOPASS and NOFEATURE tests only and do not ignore any failures.")
print("")
print(" --c Start the JVMs in a convenient location.")
print("")
print(" --h2ojar Supply a path to the H2O jar file.")
print("")
print(" --tar Supply a path to the R TAR.")
print("")
print(" --pto The phantomjs timeout in seconds. Default is 3600 (1hr).")
print("")
print(" --noxunit Do not produce xUnit reports.")
print("")
print(" If neither --test nor --testlist is specified, then the list of tests is")
print(" discovered automatically as files matching '*runit*.R'.")
print("")
print("")
print("Examples:")
print("")
print(" Just accept the defaults and go (note: output dir must not exist):")
print(" "+g_script_name)
print("")
print(" Remove all random seeds (i.e. make new ones) but don't run any tests:")
print(" "+g_script_name+" --wipeall --norun")
print("")
print(" For a powerful laptop with 8 cores (keep default numclouds):")
print(" "+g_script_name+" --wipeall")
print("")
print(" For a big server with 32 cores:")
print(" "+g_script_name+" --wipeall --numclouds 16")
print("")
print(" Just run the tests that finish quickly")
print(" "+g_script_name+" --wipeall --testsize s")
print("")
print(" Run one specific test, keeping old random seeds:")
print(" "+g_script_name+" --wipe --test path/to/test.R")
print("")
print(" Rerunning failures from a previous run, keeping old random seeds:")
print(" # Copy failures.txt, otherwise --wipe removes the directory with the list!")
print(" cp " + os.path.join(g_output_dir, "failures.txt") + " .")
print(" "+g_script_name+" --wipe --numclouds 16 --testlist failed.txt")
print("")
print(" Run tests on a pre-existing cloud (e.g. in a debugger), keeping old random seeds:")
print(" "+g_script_name+" --wipe --usecloud ip:port")
sys.exit(1)
def unknown_arg(s):
print("")
print("ERROR: Unknown argument: " + s)
print("")
usage()
def bad_arg(s):
print("")
print("ERROR: Illegal use of (otherwise valid) argument: " + s)
print("")
usage()
def error(s):
print("")
print("ERROR: " + s)
print("")
usage()
def parse_args(argv):
global g_base_port
global g_num_clouds
global g_nodes_per_cloud
global g_wipe_test_state
global g_wipe_output_dir
global g_test_to_run
global g_test_list_file
global g_test_group
global g_run_small
global g_run_medium
global g_run_large
global g_run_xlarge
global g_use_cloud
global g_use_cloud2
global g_use_client
global g_config
global g_use_ip
global g_use_port
global g_no_run
global g_jvm_xmx
global g_nopass
global g_convenient
global g_path_to_h2o_jar
global g_path_to_tar
global g_path_to_whl
global g_produce_unit_reports
global g_phantomjs_to
global g_phantomjs_packs
i = 1
while (i < len(argv)):
s = argv[i]
if (s == "--baseport"):
i += 1
if (i > len(argv)):
usage()
g_base_port = int(argv[i])
elif (s == "--numclouds"):
i += 1
if (i > len(argv)):
usage()
g_num_clouds = int(argv[i])
elif (s == "--numnodes"):
i += 1
if (i > len(argv)):
usage()
g_nodes_per_cloud = int(argv[i])
elif (s == "--wipeall"):
g_wipe_test_state = True
g_wipe_output_dir = True
elif (s == "--wipe"):
g_wipe_output_dir = True
elif (s == "--test"):
i += 1
if (i > len(argv)):
usage()
g_test_to_run = TestRunner.find_test(argv[i])
elif (s == "--testlist"):
i += 1
if (i > len(argv)):
usage()
g_test_list_file = argv[i]
elif (s == "--testgroup"):
i += 1
if (i > len(argv)):
usage()
g_test_group = argv[i]
elif (s == "--testsize"):
i += 1
if (i > len(argv)):
usage()
v = argv[i]
if (re.match(r'(s)?(m)?(l)?', v)):
if 's' not in v:
g_run_small = False
if 'm' not in v:
g_run_medium = False
if 'l' not in v:
g_run_large = False
if 'x' not in v:
g_run_xlarge = False
else:
bad_arg(s)
elif (s == "--usecloud"):
i += 1
if (i > len(argv)):
usage()
s = argv[i]
m = re.match(r'(\S+):([1-9][0-9]*)', s)
if (m is None):
unknown_arg(s)
g_use_cloud = True
g_use_ip = m.group(1)
port_string = m.group(2)
g_use_port = int(port_string)
elif (s == "--usecloud2"):
i += 1
if (i > len(argv)):
usage()
s = argv[i]
if (s is None):
unknown_arg(s)
g_use_cloud2 = True
g_config = s
elif (s == "--client"):
g_use_client = True
elif (s == "--nopass"):
g_nopass = True
elif s == "--c":
g_convenient = True
elif s == "--h2ojar":
i += 1
g_path_to_h2o_jar = os.path.abspath(argv[i])
elif s == "--pto":
i += 1
g_phantomjs_to = int(argv[i])
elif s == "--ptt":
i += 1
g_phantomjs_packs = argv[i]
elif s == "--tar":
i += 1
g_path_to_tar = os.path.abspath(argv[i])
elif s == "--whl":
i += 1
g_path_to_whl = os.path.abspath(argv[i])
elif (s == "--jvm.xmx"):
i += 1
if (i > len(argv)):
usage()
g_jvm_xmx = argv[i]
elif (s == "--norun"):
g_no_run = True
elif (s == "--noxunit"):
g_produce_unit_reports = False
elif (s == "-h" or s == "--h" or s == "-help" or s == "--help"):
usage()
else:
unknown_arg(s)
i += 1
if ((int(g_use_client) + int(g_use_cloud) + int(g_use_cloud2)) > 1):
print("")
print("ERROR: --client, --usecloud and --usecloud2 are mutually exclusive.")
print("")
sys.exit(1)
def wipe_output_dir():
print("")
print("Wiping output directory...")
try:
if (os.path.exists(g_output_dir)):
shutil.rmtree(g_output_dir)
except OSError as e:
print("")
print("ERROR: Removing output directory failed: " + g_output_dir)
print(" (errno {0}): {1}".format(e.errno, e.strerror))
print("")
sys.exit(1)
def wipe_test_state(test_root_dir):
print("")
print("Wiping test state (including random seeds)...")
if (True):
possible_seed_file = os.path.join(test_root_dir, str("master_seed"))
if (os.path.exists(possible_seed_file)):
try:
os.remove(possible_seed_file)
except OSError as e:
print("")
print("ERROR: Removing seed file failed: " + possible_seed_file)
print(" (errno {0}): {1}".format(e.errno, e.strerror))
print("")
sys.exit(1)
for d, subdirs, files in os.walk(test_root_dir):
for s in subdirs:
if ("Rsandbox" in s):
rsandbox_dir = os.path.join(d, s)
try:
shutil.rmtree(rsandbox_dir)
except OSError as e:
print("")
print("ERROR: Removing RSandbox directory failed: " + rsandbox_dir)
print(" (errno {0}): {1}".format(e.errno, e.strerror))
print("")
sys.exit(1)
def main(argv):
"""
Main program.
@return: none
"""
global g_script_name
global g_num_clouds
global g_nodes_per_cloud
global g_output_dir
global g_failed_output_dir
global g_test_to_run
global g_test_list_file
global g_test_group
global g_runner
global g_nopass
global g_path_to_tar
global g_path_to_whl
g_script_name = os.path.basename(argv[0])
# Calculate test_root_dir.
test_root_dir = os.path.realpath(os.getcwd())
# Calculate global variables.
g_output_dir = os.path.join(test_root_dir, str("results"))
g_failed_output_dir = os.path.join(g_output_dir, str("failed"))
testreport_dir = os.path.join(test_root_dir, str("../build/test-results"))
# Override any defaults with the user's choices.
parse_args(argv)
# Look for h2o jar file.
h2o_jar = g_path_to_h2o_jar
if (h2o_jar is None):
possible_h2o_jar_parent_dir = test_root_dir
while (True):
possible_h2o_jar_dir = os.path.join(possible_h2o_jar_parent_dir, "build")
possible_h2o_jar = os.path.join(possible_h2o_jar_dir, "h2o.jar")
if (os.path.exists(possible_h2o_jar)):
h2o_jar = possible_h2o_jar
break
next_possible_h2o_jar_parent_dir = os.path.dirname(possible_h2o_jar_parent_dir)
if (next_possible_h2o_jar_parent_dir == possible_h2o_jar_parent_dir):
break
possible_h2o_jar_parent_dir = next_possible_h2o_jar_parent_dir
# Wipe output directory if requested.
if (g_wipe_output_dir):
wipe_output_dir()
# Wipe persistent test state if requested.
if (g_wipe_test_state):
wipe_test_state(test_root_dir)
# Create runner object.
# Just create one cloud if we're only running one test, even if the user specified more.
if (g_test_to_run is not None):
g_num_clouds = 1
g_runner = TestRunner(test_root_dir,
g_use_cloud, g_use_cloud2, g_use_client, g_config, g_use_ip, g_use_port,
g_num_clouds, g_nodes_per_cloud, h2o_jar, g_base_port, g_jvm_xmx,
g_output_dir, g_failed_output_dir, g_path_to_tar, g_path_to_whl, g_produce_unit_reports,
testreport_dir)
# Build test list.
if (g_test_to_run is not None):
g_runner.add_test(g_test_to_run)
elif (g_test_list_file is not None):
g_runner.read_test_list_file(g_test_list_file)
else:
# Test group can be None or not.
g_runner.build_test_list(g_test_group, g_run_small, g_run_medium, g_run_large, g_run_xlarge, g_nopass)
# If no run is specified, then do an early exit here.
if (g_no_run):
sys.exit(0)
# Handle killing the runner.
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
# Sanity check existence of H2O jar file before starting the cloud.
if ((h2o_jar is None) or (not os.path.exists(h2o_jar))):
print("")
print("ERROR: H2O jar not found")
print("")
sys.exit(1)
# Run.
try:
g_runner.start_clouds()
g_runner.run_tests(g_nopass)
finally:
g_runner.stop_clouds()
g_runner.report_summary(g_nopass)
# If the overall regression did not pass then exit with a failure status code.
if (not g_runner.get_regression_passed()):
sys.exit(1)
if __name__ == "__main__":
main(sys.argv)
| apache-2.0 | -4,628,322,847,902,796,000 | 32.517259 | 119 | 0.513729 | false |
shollingsworth/HackerRank | python/re-sub-regex-substitution/main.py | 1 | 1291 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import __future__
import sys
import json
def banner():
ban = '====' * 30
print("{}\nSAMPLE INP:\n{}\n{}".format(ban,ban,open(ip, 'r').read()))
print("{}\nSAMPLE OUT:\n{}\n{}".format(ban,ban,open(op, 'r').read()))
print("{}\nSTART:\n{}".format(ban,ban))
sys.stdin = open(ip, 'r')
cnt = -1
def comp(inp,ln):
outl = output_arr[ln]
if str(inp) != outl:
raise Exception("Error input output: line {}, file: {}\ngot: {} expected: {}".format(ln,op,inp,outl))
ip = "./input06.txt"
op = "./output06.txt"
ip = "./challenge_sample_input"
op = "./challenge_sample_output"
output_arr = map(str,open(op,'r').read().split('\n'))
banner()
# https://www.hackerrank.com/challenges/re-sub-regex-substitution/problem
import re
s = "\n".join([raw_input() for _ in range(int(raw_input()))])
regexes = map(re.escape,[' && ',' || '])
replace = [' and ',' or ']
while True in [ bool(re.search(regex,s)) for regex in regexes ]:
a1,b1 = regexes
a2,b2 = replace
s = re.sub(a1,a2,s)
s = re.sub(b1,b2,s)
print(s)
"""
solution #1
mmap = {
}
for repl, arr in mmap.items():
regex, orig = arr
for m in re.finditer(regex,s):
if re.match(' ',m.group(1)):
s = re.sub(orig,repl,s)
print(s)
"""
| apache-2.0 | 8,672,584,622,276,070,000 | 22.907407 | 109 | 0.573974 | false |
alexholehouse/SBMLIntegrator | libsbml-5.0.0/src/bindings/python/test/sbml/TestReadFromFile5.py | 1 | 6830 | #
# @file TestReadFromFile5.py
# @brief Reads test-data/l2v1-assignment.xml into memory and tests it.
#
# @author Akiya Jouraku (Python conversion)
# @author Ben Bornstein
#
# $Id$
# $HeadURL$
#
# ====== WARNING ===== WARNING ===== WARNING ===== WARNING ===== WARNING ======
#
# DO NOT EDIT THIS FILE.
#
# This file was generated automatically by converting the file located at
# src/sbml/test/TestReadFromFile5.cpp
# using the conversion program dev/utilities/translateTests/translateTests.pl.
# Any changes made here will be lost the next time the file is regenerated.
#
# -----------------------------------------------------------------------------
# This file is part of libSBML. Please visit http://sbml.org for more
# information about SBML, and the latest version of libSBML.
#
# Copyright 2005-2010 California Institute of Technology.
# Copyright 2002-2005 California Institute of Technology and
# Japan Science and Technology Corporation.
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation. A copy of the license agreement is provided
# in the file named "LICENSE.txt" included with this software distribution
# and also available online as http://sbml.org/software/libsbml/license.html
# -----------------------------------------------------------------------------
import sys
import unittest
import libsbml
class TestReadFromFile5(unittest.TestCase):
def test_read_l2v1_assignment(self):
reader = libsbml.SBMLReader()
filename = "../../sbml/test/test-data/"
filename += "l2v1-assignment.xml"
d = reader.readSBML(filename)
if (d == None):
pass
self.assert_( d.getLevel() == 2 )
self.assert_( d.getVersion() == 1 )
m = d.getModel()
self.assert_( m != None )
self.assert_( m.getNumCompartments() == 1 )
c = m.getCompartment(0)
self.assert_( c != None )
self.assert_( c.getId() == "cell" )
ud = c.getDerivedUnitDefinition()
self.assert_( ud.getNumUnits() == 1 )
self.assert_( ud.getUnit(0).getKind() == libsbml.UNIT_KIND_LITRE )
loc = m.getListOfCompartments()
c1 = loc.get(0)
self.assert_( c1 == c )
c1 = loc.get("cell")
self.assert_( c1 == c )
self.assert_( m.getNumSpecies() == 5 )
s = m.getSpecies(0)
self.assert_( s != None )
self.assert_( s.getId() == "X0" )
self.assert_( s.getCompartment() == "cell" )
self.assert_( s.getInitialConcentration() == 1.0 )
los = m.getListOfSpecies()
s1 = los.get(0)
self.assert_( s1 == s )
s1 = los.get("X0")
self.assert_( s1 == s )
s = m.getSpecies(1)
self.assert_( s != None )
self.assert_( s.getId() == "X1" )
self.assert_( s.getCompartment() == "cell" )
self.assert_( s.getInitialConcentration() == 0.0 )
s = m.getSpecies(2)
self.assert_( s != None )
self.assert_( s.getId() == "T" )
self.assert_( s.getCompartment() == "cell" )
self.assert_( s.getInitialConcentration() == 0.0 )
s = m.getSpecies(3)
self.assert_( s != None )
self.assert_( s.getId() == "S1" )
self.assert_( s.getCompartment() == "cell" )
self.assert_( s.getInitialConcentration() == 0.0 )
s = m.getSpecies(4)
self.assert_( s != None )
self.assert_( s.getId() == "S2" )
self.assert_( s.getCompartment() == "cell" )
self.assert_( s.getInitialConcentration() == 0.0 )
self.assert_( m.getNumParameters() == 1 )
p = m.getParameter(0)
self.assert_( p != None )
self.assert_( p.getId() == "Keq" )
self.assert_( p.getValue() == 2.5 )
lop = m.getListOfParameters()
p1 = lop.get(0)
self.assert_( p1 == p )
p1 = lop.get("Keq")
self.assert_( p1 == p )
ud = p.getDerivedUnitDefinition()
self.assert_( ud.getNumUnits() == 0 )
self.assert_( m.getNumRules() == 2 )
ar = m.getRule(0)
self.assert_( ar != None )
self.assert_( ar.getVariable() == "S1" )
self.assert_( ar.getFormula() == "T / (1 + Keq)" )
ud = ar.getDerivedUnitDefinition()
self.assert_( ud.getNumUnits() == 2 )
self.assert_( ud.getUnit(0).getKind() == libsbml.UNIT_KIND_MOLE )
self.assert_( ud.getUnit(0).getExponent() == 1 )
self.assert_( ud.getUnit(1).getKind() == libsbml.UNIT_KIND_LITRE )
self.assert_( ud.getUnit(1).getExponent() == -1 )
self.assert_( ar.containsUndeclaredUnits() == True )
lor = m.getListOfRules()
ar1 = lor.get(0)
self.assert_( ar1 == ar )
ar1 = lor.get("S1")
self.assert_( ar1 == ar )
ar = m.getRule(1)
self.assert_( ar != None )
self.assert_( ar.getVariable() == "S2" )
self.assert_( ar.getFormula() == "Keq * S1" )
self.assert_( m.getNumReactions() == 2 )
r = m.getReaction(0)
self.assert_( r != None )
self.assert_( r.getId() == "in" )
self.assert_( r.getNumReactants() == 1 )
self.assert_( r.getNumProducts() == 1 )
sr = r.getReactant(0)
self.assert_( sr != None )
self.assert_( sr.getSpecies() == "X0" )
sr = r.getProduct(0)
self.assert_( sr != None )
self.assert_( sr.getSpecies() == "T" )
kl = r.getKineticLaw()
self.assert_( kl != None )
self.assert_( kl.getFormula() == "k1 * X0" )
self.assert_( kl.getNumParameters() == 1 )
r1 = kl.getParentSBMLObject()
self.assert_( r1 != None )
self.assert_( r1.getId() == "in" )
self.assert_( r1.getNumReactants() == 1 )
self.assert_( r1.getNumProducts() == 1 )
p = kl.getParameter(0)
self.assert_( p != None )
self.assert_( p.getId() == "k1" )
self.assert_( p.getValue() == 0.1 )
kl = p.getParentSBMLObject().getParentSBMLObject()
self.assert_( kl != None )
self.assert_( kl.getFormula() == "k1 * X0" )
self.assert_( kl.getNumParameters() == 1 )
r = m.getReaction(1)
self.assert_( r != None )
self.assert_( r.getId() == "out" )
self.assert_( r.getNumReactants() == 1 )
self.assert_( r.getNumProducts() == 1 )
sr = r.getReactant(0)
self.assert_( sr != None )
self.assert_( sr.getSpecies() == "T" )
sr = r.getProduct(0)
self.assert_( sr != None )
self.assert_( sr.getSpecies() == "X1" )
kl = r.getKineticLaw()
self.assert_( kl != None )
self.assert_( kl.getFormula() == "k2 * T" )
self.assert_( kl.getNumParameters() == 1 )
p = kl.getParameter(0)
self.assert_( p != None )
self.assert_( p.getId() == "k2" )
self.assert_( p.getValue() == 0.15 )
d = None
pass
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestReadFromFile5))
return suite
if __name__ == "__main__":
if unittest.TextTestRunner(verbosity=1).run(suite()).wasSuccessful() :
sys.exit(0)
else:
sys.exit(1)
| gpl-3.0 | 6,617,555,545,422,540,000 | 34.759162 | 79 | 0.585066 | false |
wfwei/ReadWeibo | classifier/DataProcess.py | 1 | 3547 | # !/usr/bin/python
# -*- coding: utf-8 -*-
'''
Created on 2013-9-2
@author: plex
'''
from ReadWeibo.mainapp.models import Weibo
from ReadWeibo.account.models import Account
from djangodb.WeiboDao import WeiboDao
from datetime import datetime
from sets import Set
import numpy as np
import re
import jieba
jieba.load_userdict("/etc/jieba/jieba.dic")
DIC_FILE = "../data/dic/weibo.dic"
TRAIN_FILE = "../data/train/weibo_vec.tr"
#TODO 去停用词 规范化单词 添加词典http://www.sogou.com/labs/dl/w.html
def generate_user_dict(dic_file=DIC_FILE):
wbs = Weibo.objects.all()[:5000]
wordset = Set()
print 'Generating dict with %d weibo' % len(wbs)
for wb in wbs:
for word in jieba.cut(wb.text.encode('utf-8','ignore')):
if len(word)>6: #TODO filter by Cixing
wordset.add(word.lower().strip())
with open(dic_file, "w") as f:
for word in wordset:
f.write("%s\n" % word)
def load_dict(dic_file=DIC_FILE):
print 'loading dict from ', DIC_FILE
dict = {}
with open(dic_file, "r") as f:
id = 0
for word in f:
dict[word.strip().encode("utf-8", "ignore")] = id
id += 1
return dict
def generate_feature(wb, dict):
fea = [0]*len(dict)
# 微博文本
for wd in jieba.cut(wb.text.encode('utf-8','ignore')):
word_count = 0
wd = wd.lower().strip()
if len(wd)>3 and wd in dict:
fea[dict[wd]] += 1
word_count += 1
# print 'found %d word in a weibo' % word_count
# add user features
owner = wb.owner
fea.append(int(owner.w_province))
fea.append(int(owner.w_city))
if owner.w_url:
fea.append(1)
else:
fea.append(0)
fea.append(len(owner.w_description))
if 'm' in owner.w_gender:
fea.append(1)
else:
fea.append(0)
fea.append(int(owner.w_followers_count))
fea.append(int(owner.w_friends_count))
fea.append(int(owner.w_statuses_count))
fea.append(int(owner.w_favourites_count))
fea.append(int(owner.w_bi_followers_count))
fea.append((datetime.now()-owner.w_created_at).days/100)
if owner.w_verified:
fea.append(1)
else:
fea.append(0)
# add weibo features
fea.append(int(wb.reposts_count))
fea.append(int(wb.comments_count))
fea.append(int(wb.attitudes_count))
if re.search("#.*?#", wb.text):
fea.append(1)
else:
fea.append(0)
fea.append(len(wb.text))
own_text = re.search("(.*?)//@", wb.text)
if own_text:
fea.append(len(own_text.group(1)))
else:
fea.append(len(wb.text))
#TODO 对source归类
fea.append(len(wb.source))
if wb.retweeted_status:
fea.append(0)
else:
fea.append(1)
if wb.thumbnail_pic:
fea.append(1)
else:
fea.append(0)
fea.append(wb.created_at.hour)
fea.append(wb.created_at.weekday())
# TODO 计算微博转发评论的衰减公式
return fea
def generate_train_file():
print 'Generating train file...'
wbs = Weibo.objects.filter(real_category__gt=0)
word_dic = load_dict()
print 'Train set size: %d, dic size:%d' % (len(wbs), len(word_dic))
with open(TRAIN_FILE, "w") as train_file:
for wb in wbs:
for fea in generate_feature(wb, word_dic):
train_file.write("%s\t" % fea)
train_file.write("%s\n" % wb.real_category)
def get_weibo_to_predict(count=1000):
wbs = Weibo.objects.filter(real_category__exact = 0)[:count]
word_dic = load_dict()
wb_feas_list = list()
for wb in wbs:
try:
wb_feas_list.append((wb, [1.0] + generate_feature(wb, word_dic)));
except:
print 'generate feature fail for weibo:', wb.w_id
return wb_feas_list
if __name__ == '__main__':
generate_user_dict()
generate_train_file()
# print generate_feature(Weibo.objects.get(w_id=3617663458268921),{})
pass
| apache-2.0 | 5,765,088,084,828,768,000 | 22.206667 | 70 | 0.670497 | false |
ewiger/decade | lib/antlr3c/genclib.py | 1 | 1592 | #!/usr/bin/python
'''
This code will automatically generate most of the ctypes from ANTLR3 C runtime
headers.
'''
import os
import sys
import ext_path
from pyclibrary import *
from glob import glob
ANTLR3C_INCLUDE = os.path.join(
os.path.dirname(os.path.dirname(os.path.realpath(__file__))),
'libantlr3c-3.4', 'include',
)
def get_antlr3c_headers():
return glob(ANTLR3C_INCLUDE + '/*')
def parse_headers(hfiles):
p = CParser(hfiles)
p.processAll(verbose=True)
return p
def save(p, output_dir):
from pprint import pprint
for k in p.dataList:
with file(os.path.join(output_dir, '%s.py' % k), 'w+') as output:
print 'Saving %s' % k
comment = """'''
%s
This is an auto-generated ctypes file from ANTLR3 C runtime headers. Note that
editing this file is not smart! For more details check genclib.py
It should be possible to redefine things in __init__.py if necessary (right
after imports section).
wbr, yy
'''\n""" % k.upper()
print >>output, comment
print >>output, '%s = \\' % k.upper()
pprint(p.defs[k], output)
if __name__ == '__main__':
hfiles = get_antlr3c_headers()
print('Found (%d) ANTLR3C headers, preparing to generate ctypes..' \
% len(hfiles))
p = parse_headers(hfiles)
output_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'generated')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
with open(os.path.join(output_dir, '__init__.py'), 'w+') as touched:
pass
save(p, output_dir)
| bsd-3-clause | -3,333,805,171,277,549,600 | 25.533333 | 87 | 0.627513 | false |
eunchong/build | scripts/slave/recipe_modules/archive/example.py | 1 | 3528 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
DEPS = [
'archive',
'recipe_engine/json',
'recipe_engine/path',
'recipe_engine/platform',
'recipe_engine/properties',
]
TEST_HASH_MAIN='5e3250aadda2b170692f8e762d43b7e8deadbeef'
TEST_COMMIT_POSITON_MAIN='refs/heads/B1@{#123456}'
TEST_HASH_COMPONENT='deadbeefdda2b170692f8e762d43b7e8e7a96686'
TEST_COMMIT_POSITON_COMPONENT='refs/heads/master@{#234}'
def RunSteps(api):
api.archive.clusterfuzz_archive(
build_dir=api.path['slave_build'].join('src', 'out', 'Release'),
update_properties=api.properties.get('update_properties'),
gs_bucket='chromium',
gs_acl=api.properties.get('gs_acl', ''),
archive_prefix='chrome-asan',
archive_subdir_suffix=api.properties.get('archive_subdir_suffix', ''),
revision_dir=api.properties.get('revision_dir'),
primary_project=api.properties.get('primary_project'),
)
def GenTests(api):
update_properties = {
'got_revision': TEST_HASH_MAIN,
'got_revision_cp': TEST_COMMIT_POSITON_MAIN,
}
for platform, build_files in (
('win', ['chrome', 'icu.dat', 'lib', 'file.obj']),
('mac', ['chrome', 'icu.dat', 'pdfsqueeze']),
('linux', ['chrome', 'icu.dat', 'lib.host']),
):
yield (
api.test('cf_archiving_%s' % platform) +
api.platform(platform, 64) +
api.properties(
update_properties=update_properties,
gs_acl='public-read',
archive_subdir_suffix='subdir',
) +
api.override_step_data('listdir build_dir', api.json.output(build_files))
)
# An svn project with a separate git property.
update_properties = {
'got_revision': '123456',
'got_revision_git': TEST_HASH_MAIN,
'got_revision_cp': TEST_COMMIT_POSITON_MAIN,
}
yield (
api.test('cf_archiving_svn_with_git') +
api.platform('linux', 64) +
api.properties(update_properties=update_properties) +
api.override_step_data(
'listdir build_dir', api.json.output(['chrome']))
)
# An svn project without git hash.
update_properties = {
'got_revision': '123456',
'got_revision_cp': TEST_COMMIT_POSITON_MAIN,
}
yield (
api.test('cf_archiving_svn_no_git') +
api.platform('linux', 64) +
api.properties(update_properties=update_properties) +
api.override_step_data(
'listdir build_dir', api.json.output(['chrome']))
)
# A component build with git.
update_properties = {
'got_x10_revision': TEST_HASH_COMPONENT,
'got_x10_revision_cp': TEST_COMMIT_POSITON_COMPONENT,
}
yield (
api.test('cf_archiving_component') +
api.platform('linux', 64) +
api.properties(
update_properties=update_properties,
revision_dir='x10',
primary_project='x10',
) +
api.override_step_data(
'listdir build_dir', api.json.output(['chrome', 'resources']))
)
# A component on svn with a separate git property.
update_properties = {
'got_x10_revision': '234',
'got_x10_revision_git': TEST_HASH_COMPONENT,
'got_x10_revision_cp': TEST_COMMIT_POSITON_COMPONENT,
}
yield (
api.test('cf_archiving_component_svn_with_git') +
api.platform('linux', 64) +
api.properties(
update_properties=update_properties,
revision_dir='x10',
primary_project='x10',
) +
api.override_step_data(
'listdir build_dir', api.json.output(['chrome']))
)
| bsd-3-clause | -6,284,614,575,291,679,000 | 29.947368 | 79 | 0.642574 | false |
SteveDiamond/cvxpy | cvxpy/reductions/dcp2cone/atom_canonicalizers/__init__.py | 2 | 3720 | """
Copyright 2013 Steven Diamond
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cvxpy.atoms import *
from cvxpy.atoms.suppfunc import SuppFuncAtom
from cvxpy.atoms.affine.binary_operators import MulExpression, multiply
from cvxpy.atoms.affine.index import special_index
from cvxpy.transforms.indicator import indicator
from cvxpy.reductions.dcp2cone.atom_canonicalizers.exp_canon import *
from cvxpy.reductions.dcp2cone.atom_canonicalizers.entr_canon import *
from cvxpy.reductions.dcp2cone.atom_canonicalizers.geo_mean_canon import *
from cvxpy.reductions.dcp2cone.atom_canonicalizers.huber_canon import *
from cvxpy.reductions.dcp2cone.atom_canonicalizers.indicator_canon import *
from cvxpy.reductions.dcp2cone.atom_canonicalizers.kl_div_canon import *
from cvxpy.reductions.dcp2cone.atom_canonicalizers.lambda_max_canon import *
from cvxpy.reductions.dcp2cone.atom_canonicalizers.lambda_sum_largest_canon import *
from cvxpy.reductions.dcp2cone.atom_canonicalizers.log_canon import *
from cvxpy.reductions.dcp2cone.atom_canonicalizers.log_det_canon import *
from cvxpy.reductions.dcp2cone.atom_canonicalizers.log_sum_exp_canon import *
from cvxpy.reductions.dcp2cone.atom_canonicalizers.log1p_canon import *
from cvxpy.reductions.dcp2cone.atom_canonicalizers.logistic_canon import *
from cvxpy.reductions.dcp2cone.atom_canonicalizers.matrix_frac_canon import *
from cvxpy.reductions.dcp2cone.atom_canonicalizers.normNuc_canon import *
from cvxpy.reductions.dcp2cone.atom_canonicalizers.power_canon import *
from cvxpy.reductions.dcp2cone.atom_canonicalizers.pnorm_canon import *
from cvxpy.reductions.dcp2cone.atom_canonicalizers.sigma_max_canon import *
from cvxpy.reductions.dcp2cone.atom_canonicalizers.quad_form_canon import *
from cvxpy.reductions.dcp2cone.atom_canonicalizers.quad_over_lin_canon import *
from cvxpy.reductions.dcp2cone.atom_canonicalizers.suppfunc_canon import suppfunc_canon
from cvxpy.reductions.utilities import special_index_canon
from cvxpy.reductions.eliminate_pwl.atom_canonicalizers import (
abs_canon, cummax_canon, cumsum_canon, maximum_canon,
max_canon, minimum_canon, min_canon, norm1_canon,
norm_inf_canon, sum_largest_canon
)
# TODO: remove pwl canonicalize methods, use EliminatePwl reduction instead
CANON_METHODS = {
cummax : cummax_canon,
cumsum : cumsum_canon,
geo_mean : geo_mean_canon,
lambda_max : lambda_max_canon,
lambda_sum_largest : lambda_sum_largest_canon,
log_det : log_det_canon,
log_sum_exp : log_sum_exp_canon,
MatrixFrac : matrix_frac_canon,
max : max_canon,
min : min_canon,
norm1 : norm1_canon,
normNuc : normNuc_canon,
norm_inf : norm_inf_canon,
Pnorm : pnorm_canon,
QuadForm : quad_form_canon,
quad_over_lin : quad_over_lin_canon,
sigma_max : sigma_max_canon,
sum_largest : sum_largest_canon,
abs : abs_canon,
entr : entr_canon,
exp : exp_canon,
huber : huber_canon,
kl_div : kl_div_canon,
log : log_canon,
log1p : log1p_canon,
logistic : logistic_canon,
maximum : maximum_canon,
minimum : minimum_canon,
power : power_canon,
indicator : indicator_canon,
special_index : special_index_canon,
SuppFuncAtom : suppfunc_canon
}
| gpl-3.0 | -7,737,156,368,280,461,000 | 42.255814 | 87 | 0.774731 | false |
tseaver/google-cloud-python | asset/synth.py | 1 | 3228 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import synthtool as s
from synthtool import gcp
gapic = gcp.GAPICGenerator()
common = gcp.CommonTemplates()
versions = ["v1beta1", "v1p2beta1", "v1"]
excludes = ["setup.py", "nox*.py", "README.rst", "docs/conf.py", "docs/index.rst"]
# ----------------------------------------------------------------------------
# Generate asset GAPIC layer
# ----------------------------------------------------------------------------
for version in versions:
library = gapic.py_library(
"asset",
version,
config_path=f"/google/cloud/asset/artman_cloudasset_{version}.yaml",
artman_output_name=f"asset-{version}",
include_protos=True,
)
s.move(library, excludes=excludes)
s.replace(
"google/cloud/asset_v*/proto/assets_pb2.py",
"from google.iam.v1 import policy_pb2 as",
"from google.iam.v1 import iam_policy_pb2_grpc as",
)
s.replace(
"google/cloud/asset_v*/proto/assets_pb2.py",
"from google.iam.v1 import iam_policy_pb2_grpc "
"as google_dot_iam_dot_v1_dot_policy__pb2",
"from google.iam.v1 import iam_policy_pb2 "
"as google_dot_iam_dot_v1_dot_policy__pb2",
)
s.replace(
"google/cloud/asset_v*/proto/assets_pb2.py",
"_ASSET.fields_by_name\['iam_policy'\].message_type "
"= google_dot_iam_dot_v1_dot_policy__pb2._POLICY",
"_ASSET.fields_by_name['iam_policy'].message_type = google_dot_iam_dot"
"_v1_dot_policy__pb2.google_dot_iam_dot_v1_dot_policy__pb2._POLICY",
)
_BORKED_ASSET_DOCSTRING = """\
The full name of the asset. For example: ``//compute.googleapi
s.com/projects/my_project_123/zones/zone1/instances/instance1`
`. See `Resource Names <https://cloud.google.com/apis/design/r
esource_names#full_resource_name>`__ for more information.
"""
_FIXED_ASSET_DOCSTRING = """
The full name of the asset. For example:
``//compute.googleapis.com/projects/my_project_123/zones/zone1/instances/instance1``.
See https://cloud.google.com/apis/design/resource_names#full_resource_name
for more information.
"""
s.replace(
"google/cloud/asset_v*/proto/assets_pb2.py",
_BORKED_ASSET_DOCSTRING,
_FIXED_ASSET_DOCSTRING,
)
# ----------------------------------------------------------------------------
# Add templated files
# ----------------------------------------------------------------------------
templated_files = gcp.CommonTemplates().py_library(unit_cov_level=79, cov_level=80)
s.move(templated_files, excludes=["noxfile.py"])
s.shell.run(["nox", "-s", "blacken"], hide_output=False)
| apache-2.0 | 1,773,273,584,265,377,000 | 35.269663 | 95 | 0.619579 | false |
karel-brinda/rnftools | rnftools/rnfformat/Validator.py | 1 | 4484 | import rnftools.rnfformat
import re
reg_lrn = re.compile(r"^([!-?A-^`-~]*)__([0-9a-f]+)__([!-?A-^`-~]+)__([!-?A-^`-~]*)$")
reg_prefix_part = re.compile(r"^[!-?A-^`-~]*$")
reg_id_part = re.compile(r"^[0-9a-f]+$")
reg_segmental_part = re.compile(r"^(?:(\([0-9FRN,]*\))(?:,(?!$)|$))+$")
reg_suffix_part = re.compile(r"^(?:((?:[a-zA-Z0-9]+:){0,1})\[([!-?A-Z\\^`-~]*)\](?:,(?!$)|$))+$")
reg_segment = re.compile(r"^\(([0-9]+),([0-9]+),([FRN]),([0-9]+),([0-9]+)\)$")
reg_comment = re.compile(r"^\[([!-?A-Z\\^`-~]*)\]$")
reg_extension = re.compile(r"^\[([!-?A-Z\\^`-~]*)\]$")
class Validator:
"""Class for validation of RNF.
Args:
initial_read_tuple_name (str): Initial read tuple name to detect profile (widths).
report_only_first (bool): Report only first occurrence of every error.
warnings_as_errors (bool): Treat warnings as errors (error code).
"""
def __init__(
self,
initial_read_tuple_name,
report_only_first=True,
warnings_as_errors=False,
):
self.report_only_first = report_only_first
self.reported_errors = set()
self.error_has_been_reported = False
self.warning_has_been_reported = False
self.warnings_as_errors = warnings_as_errors
self.rnf_profile = rnftools.rnfformat.RnfProfile(read_tuple_name=initial_read_tuple_name)
def validate(self, read_tuple_name):
"""Check RNF validity of a read tuple.
Args:
read_tuple_name (str): Read tuple name to be checked.s
"""
if reg_lrn.match(read_tuple_name) is None:
self.report_error(
read_tuple_name=read_tuple_name,
error_name="wrong_read_tuple_name_structure",
message="'{}' is not matched".format(reg_lrn),
)
else:
parts = read_tuple_name.split("__")
if reg_prefix_part.match(parts[0]) is None:
self.report_error(
read_tuple_name=read_tuple_name,
error_name="wrong_prefix_part",
message="'{}' is not matched".format(reg_prefix_part),
)
if reg_id_part.match(parts[1]) is None:
self.report_error(
read_tuple_name=read_tuple_name,
error_name="wrong_id_part",
message="'{}' is not matched".format(reg_id_part),
)
if reg_segmental_part.match(parts[2]) is None:
self.report_error(
read_tuple_name=read_tuple_name,
error_name="wrong_segmental_part",
message="'{}' is not matched".format(reg_segmental_part),
)
if reg_suffix_part.match(parts[3]) is None:
self.report_error(
read_tuple_name=read_tuple_name,
error_name="wrong_suffix_part",
message="'{}' is not matched".format(reg_suffix_part),
)
if not self.rnf_profile.check(read_tuple_name):
self.report_error(
read_tuple_name=read_tuple_name,
error_name="wrong_profile",
message="Read has a wrong profile (wrong widths). It should be: {} but it is: {}.".format(
self.rnf_profile,
rnftools.rnfformat.RnfProfile(read_tuple_name=read_tuple_name),
),
warning=True,
)
def get_return_code(self):
"""Get final return code (0 = ok, 1=error appeared).
"""
if self.error_has_been_reported:
return 1
if self.warning_has_been_reported and self.warnings_as_errors:
return 1
def report_error(self, read_tuple_name, error_name, wrong="", message="", warning=False):
"""Report an error.
Args:
read_tuple_name (): Name of the read tuple.
error_name (): Name of the error.
wrong (str): What is wrong.
message (str): Additional msessage to be printed.
warning (bool): Warning (not an error).
"""
if (not self.report_only_first) or (error_name not in self.reported_errors):
print("\t".join(["error" if warning == False else "warning", read_tuple_name, error_name, wrong, message]))
self.reported_errors.add(error_name)
if warning:
self.warning_has_been_reported = True
else:
self.error_has_been_reported = True
| mit | -313,604,499,112,898,240 | 37.991304 | 119 | 0.533452 | false |
mlperf/inference_results_v0.5 | closed/NVIDIA/code/common/harness.py | 1 | 13993 | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import os, sys
sys.path.insert(0, os.getcwd())
from code.common import logging, dict_get, run_command, args_to_string
import code.common.arguments as common_args
plugin_map = {
"ssd-large": ["build/plugins/NMSOptPlugin/libnmsoptplugin.so"],
"ssd-small": ["build/plugins/NMSOptPlugin/libnmsoptplugin.so"],
}
scenario_result_regex = {
"SingleStream": r"([0-9]+th percentile latency \(ns\) +: [0-9\.]+)",
"MultiStream": r"(Samples per query : [0-9\.]+)",
"Offline": r"(Samples per second: [0-9\.]+)",
"Server": r"(Scheduled samples per second +: [0-9\.]+)",
}
benchmark_qsl_size_map = {
# See: https://github.com/mlperf/inference_policies/blob/master/inference_rules.adoc#benchmarks-1
"resnet": 1024,
"mobilenet": 1024,
"ssd-large": 64,
"ssd-small": 256,
}
class BenchmarkHarness():
def __init__(self, args, name=""):
print (args)
self.args = args
self.name = name
self.verbose = dict_get(args, "verbose", default=None)
if self.verbose:
logging.info("===== Harness arguments for {:} =====".format(name))
for key in args:
logging.info("{:}={:}".format(key, args[key]))
self.system_id = args["system_id"]
self.scenario = args["scenario"]
self.engine_dir = "./build/engines/{:}/{:}/{:}".format(self.system_id, self.name, self.scenario)
self.precision = args["precision"]
self.has_gpu = dict_get(args, "gpu_batch_size", default=None) is not None
self.has_dla = dict_get(args, "dla_batch_size", default=None) is not None
self.enumerate_engines()
def enumerate_engines(self):
if self.has_gpu:
self.gpu_engine = self._get_engine_name("gpu", self.args["gpu_batch_size"])
self.check_file_exists(self.gpu_engine)
if self.has_dla:
self.dla_engine = self._get_engine_name("dla", self.args["dla_batch_size"])
self.check_file_exists(self.dla_engine)
def _get_engine_name(self, device_type, batch_size):
return "{:}/{:}-{:}-{:}-b{:}-{:}.plan".format(self.engine_dir, self.name, self.scenario,
device_type, batch_size, self.precision)
def build_default_flags(self, custom_args):
flag_dict = {}
flag_dict["verbose"] = self.verbose
# Handle plugins
if self.name in plugin_map:
plugins = plugin_map[self.name]
for plugin in plugins:
self.check_file_exists(plugin)
flag_dict["plugins"] = ",".join(plugins)
# Generate flags for logfile names.
log_dir = os.path.join(self.args["log_dir"], self.system_id, self.name, self.scenario)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
flag_dict["logfile_outdir"] = log_dir
flag_dict["logfile_prefix"] = "mlperf_log_"
# Handle custom arguments
for arg in custom_args:
val = dict_get(self.args, arg, None)
if val is not None:
flag_dict[arg] = val
return flag_dict
def build_configs(self, flag_dict):
# ideally, these values would be in mlperf.conf. since they aren't, write them into user.conf using these values.
# QSL Seed: 0x2b7e 1516 28ae d2a6
# Schedule Seed: 0x3243 f6a8 885a 308d
# Sample Seed: 0x093c 467e 37db 0c7a
seeds_map = {
"qsl_rng_seed": "3133965575612453542",
"sample_index_rng_seed": "665484352860916858",
"schedule_rng_seed": "3622009729038561421",
}
# required settings for each scenario
required_settings_map = {
"SingleStream": ["qsl_rng_seed", "sample_index_rng_seed", "schedule_rng_seed"], # "single_stream_expected_latency_ns", See: https://github.com/mlperf/inference/issues/471
"Offline": ["offline_expected_qps", "qsl_rng_seed", "sample_index_rng_seed", "schedule_rng_seed"],
"MultiStream": ["multi_stream_samples_per_query", "qsl_rng_seed", "sample_index_rng_seed", "schedule_rng_seed"],
"Server": ["server_target_qps", "qsl_rng_seed", "sample_index_rng_seed", "schedule_rng_seed"],
}
# optional settings that we support overriding
optional_settings_map = {
"SingleStream": [ "single_stream_target_latency_percentile", "min_query_count" ],
"Offline": [ "min_query_count" ],
"MultiStream": [ "multi_stream_target_qps", "multi_stream_target_latency_ns", "multi_stream_max_async_queries", "multi_stream_target_latency_percentile", "min_query_count" ],
"Server": [ "server_target_latency_percentile", "server_target_latency_ns", "min_query_count" ],
}
# option name to config file map
options_map = {
"single_stream_expected_latency_ns": "target_latency",
"offline_expected_qps": "target_qps",
"multi_stream_samples_per_query": "samples_per_query",
"server_target_qps": "target_qps",
}
parameter_scaling_map = {
"target_latency": 1 / 1000000.0,
"target_latency_percentile": 100.0,
}
system = self.system_id
benchmark = self.name
scenario = self.scenario
mlperf_conf_path = "measurements/{:}/{:}/{:}/mlperf.conf".format(system, benchmark, scenario)
user_conf_path = "measurements/{:}/{:}/{:}/user.conf".format(system, benchmark, scenario)
# setup paths
if "mlperf_conf_path" not in flag_dict:
flag_dict["mlperf_conf_path"] = mlperf_conf_path
if "user_conf_path" not in flag_dict:
flag_dict["user_conf_path"] = user_conf_path
# assign seed values
for name, value in seeds_map.items():
if name not in flag_dict:
flag_dict[name] = value
# auto-generate user.conf
with open(user_conf_path, "w") as f:
for param in required_settings_map[scenario]:
param_name = param
if param in options_map:
param_name = options_map[param]
value = flag_dict[param]
if param_name in parameter_scaling_map:
value = value * parameter_scaling_map[param_name]
f.write("*.{:}.{:} = {:}\n".format(scenario, param_name, value))
flag_dict[param] = None
for param in optional_settings_map[scenario]:
if param not in flag_dict: continue
param_name = param
if param in options_map:
param_name = options_map[param]
value = flag_dict[param]
if param_name in parameter_scaling_map:
value = value * parameter_scaling_map[param_name]
f.write("*.{:}.{:} = {:}\n".format(scenario, param_name, value))
flag_dict[param] = None
def run_harness(self):
executable = "./build/bin/harness_default"
self.check_file_exists(executable)
# These arguments are in self.args, passed in via code.main, which handles override arguments.
harness_args = common_args.LOADGEN_ARGS + common_args.LWIS_ARGS + common_args.SHARED_ARGS
flag_dict = self.build_default_flags(harness_args)
# Handle engines
if self.has_gpu:
flag_dict["gpu_engines"] = self.gpu_engine
if self.has_dla:
flag_dict["dla_engines"] = self.dla_engine
# Handle performance sample count
flag_dict["performance_sample_count"] = benchmark_qsl_size_map[self.name]
# Handle the expected qps values
if self.has_gpu and self.has_dla:
prefix = "concurrent_"
elif self.has_gpu:
prefix = "gpu_"
flag_dict["max_dlas"] = 0
elif self.has_dla:
prefix = "dla_"
flag_dict["max_dlas"] = 1
else:
raise ValueError("Cannot specify --no_gpu and --gpu_only at the same time")
if self.scenario == "SingleStream":
harness_flags = common_args.SINGLE_STREAM_PARAMS
elif self.scenario == "Offline":
harness_flags = common_args.OFFLINE_PARAMS
elif self.scenario == "MultiStream":
harness_flags = common_args.MULTI_STREAM_PARAMS
elif self.scenario == "Server":
harness_flags = common_args.SERVER_PARAMS
# use jemalloc2 for server scenario.
executable = "LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.2 " + executable
else:
raise ValueError("Invalid scenario: {:}".format(self.scenario))
for arg in harness_flags:
val = dict_get(self.args, prefix+arg, None)
if val is None:
raise ValueError("Missing required key {:}".format(prefix+arg))
flag_dict[arg] = val
# Handle configurations
self.build_configs(flag_dict)
argstr = args_to_string(flag_dict) + " --scenario " + self.scenario + " --model " + self.name
if self.name in ["ssd-small", "ssd-large"]:
argstr += " --response_postprocess coco"
cmd = "{:} {:}".format(executable, argstr)
output = run_command(cmd, get_output=True)
# Return harness result.
return self.harness_get_result(output, scenario_result_regex[self.scenario])
def harness_get_result(self, output, regex):
# All harness outputs should have a result string
result_regex = re.compile(regex)
result_string = ""
# All but the server harness should have an output with a validity message
valid_regex = re.compile(r"(Result is : (VALID|INVALID))")
valid_string = ""
# Iterate through the harness output
for line in output:
# Check for the result string
result_match = result_regex.match(line)
if not result_match is None:
result_string = result_match.group(1)
break
for line in output:
# Check for the validity string
valid_match = valid_regex.match(line)
if not valid_match is None:
valid_string = valid_match.group(1)
break
if result_string == "":
return "Cannot find performance result. Maybe you are running in AccuracyOnly mode."
elif valid_string == "":
return result_string + " but cannot find validity result."
else:
return result_string + " and " + valid_string
def check_file_exists(self, f):
if not os.path.isfile(f):
raise RuntimeError("File {:} does not exist.".format(f))
class GNMTHarness(BenchmarkHarness):
def __init__(self, args, name=""):
super().__init__(args, name=name)
def check_dir_exists(self, d):
if not os.path.isdir(d):
raise RuntimeError("Directory {:} does not exist.".format(d))
def enumerate_engines(self):
self.engines = []
if self.scenario == "Server":
batch_sizes = self.args["batch_sizes"]
else:
batch_sizes = [ self.args["gpu_batch_size"] ]
for batch_size in batch_sizes:
engine_name = self._get_engine_name("gpu", batch_size)
self.check_dir_exists(engine_name)
self.engines.append(engine_name)
def run_harness(self):
if self.scenario == "Server":
executable = "./build/bin/harness_gnmt_server"
harness_args = common_args.GNMT_SERVER_ARGS
else:
executable = "./build/bin/harness_gnmt_default"
harness_args = common_args.GNMT_HARNESS_ARGS
self.check_file_exists(executable)
flag_dict = self.build_default_flags(harness_args)
# Scenario based arguments
if self.scenario == "Offline":
scenario_args = common_args.OFFLINE_PARAMS
elif self.scenario == "SingleStream":
scenario_args = common_args.SINGLE_STREAM_PARAMS
else:
scenario_args = []
for key in scenario_args:
flag_dict[key] = dict_get(self.args, "gpu_"+key, None)
engine_flag = "engine" if len(self.engines) == 1 else "engines"
flag_dict[engine_flag] = ",".join(self.engines)
# Remove the batch size flags
flag_dict["batch_sizes"] = None
flag_dict["gpu_batch_size"] = None
# Choose input file based on test mode
if ((flag_dict.get("test_mode", None) == "PerformanceOnly" or flag_dict.get("test_mode", None) is None)
and flag_dict.get("input_file_performance", None) is not None):
flag_dict["input_file"] = flag_dict["input_file_performance"]
elif flag_dict.get("input_file_accuracy", None) is not None:
flag_dict["input_file"] = flag_dict["input_file_accuracy"]
flag_dict["input_file_performance"] = None
flag_dict["input_file_accuracy"] = None
# Handle configurations
self.build_configs(flag_dict)
argstr = args_to_string(flag_dict)
cmd = "{:} {:}".format(executable, argstr)
output = run_command(cmd, get_output=True)
# Return harness result.
return self.harness_get_result(output, scenario_result_regex[self.scenario])
| apache-2.0 | -5,496,312,983,046,946,000 | 39.20977 | 186 | 0.590652 | false |
alnaav/shredNN | nn/train/gradient_descent_trainer.py | 1 | 3264 | import numpy as np
from nn.train.trainer import Trainer
class LayerData:
def __init__(self, layer):
self.z = np.zeros(layer.size)
self.a = np.zeros(layer.size)
self.neuron_error = np.zeros(layer.size)
self.grad_w = np.zeros(layer.w.shape)
self.grad_b = np.zeros(layer.b.shape)
self.d_w = np.zeros(layer.w.shape)
self.d_b = np.zeros(layer.b.shape)
class GradientDescentTrainer(Trainer):
def __init__(self, regularization_param=0.01, learning_rate=0.2):
self.iteration_number = 10000
self.l = regularization_param
self.a = learning_rate
self.__set_coeff__(1)
def __set_coeff__(self, samples_len):
self.rev_m = 1.0 / samples_len
self.coeff = self.l * self.rev_m * 0.5
def calc_gradient(self, curr, a_prev, w):
curr.grad_w += curr.neuron_error.transpose().dot(a_prev)
curr.grad_b += curr.neuron_error[1, :]
curr.d_w = self.rev_m * curr.grad_w + self.l * w
curr.d_b = self.rev_m * curr.grad_b
def step(self, layers, x, y):
layers_data = [LayerData(layer) for layer in layers[1:]]
a = x
for i, layer in enumerate(layers[1:]):
layers_data[i].z = a.dot(layer.w.transpose())
layers_data[i].z += layer.b
layers_data[i].a = layer.activation.apply(layers_data[i].z)
a = layers_data[i].a
cost = self.cost(layers, layers_data[-1].a, y)
curr = layers_data[-1]
curr.neuron_error = curr.a - y
for i in range(len(layers) - 1, 1, -1):
prev = layers_data[i - 2]
curr = layers_data[i - 1]
prev.neuron_error = curr.neuron_error.dot(layers[i].w) * layers[i - 1].activation.apply_derivative(prev.z)
self.calc_gradient(curr, prev.a, layers[i].w)
self.calc_gradient(layers_data[0], x, layers[1].w)
for layer, data in zip(layers[1:], layers_data):
layer.w -= self.a * data.d_w
layer.b -= self.a * data.d_b
return cost
def cost(self, layers, predicted, expected):
hv = predicted.ravel()
yv = expected.ravel()
reg = 0
for layer in layers[1:]:
reg += np.sum(layer.w * layer.w)
reg *= self.coeff
err = -(np.log2(hv).transpose().dot(yv) + np.log2(1 - hv).transpose().dot((1 - yv))) * self.rev_m
return err + reg
def train(self, nn, features, target, k):
samples_number = features.shape[0]
self.__set_coeff__(samples_number)
y = np.zeros((samples_number, k))
for i in range(0, samples_number):
y[i, target[i]] = 1
batch_size = 1000
batches_number = samples_number / batch_size + 1
print "{} batches".format(batches_number)
for i in range(0, self.iteration_number):
sb = 0
for b in range(0, batches_number):
size = min(batch_size, samples_number - sb)
self.__set_coeff__(size)
curr_x = features[sb:sb + size, :]
curr_y = y[sb:sb + size, :]
self.step(nn.layers, curr_x, curr_y)
if i % 1000 == 0:
print "{} iterations done".format(i)
| apache-2.0 | -6,355,674,511,678,844,000 | 31.64 | 118 | 0.543811 | false |
andyvand/cygsystem-config-llvm | src/Properties_Renderer.py | 1 | 7792 | """This renderer class renders volume properties into a separate
drawing area next to the main volume rendering drawing area.
"""
import sys
import math
import operator
import types
import select
import signal
import gobject
import pango
import string
import os
from lvmui_constants import *
import stat
import gettext
_ = gettext.gettext
### gettext first, then import gtk (exception prints gettext "_") ###
try:
import gtk
import gtk.glade
except RuntimeError, e:
print _("""
Unable to initialize graphical environment. Most likely cause of failure
is that the tool was not run using a graphical environment. Please either
start your graphical user interface or set your DISPLAY variable.
Caught exception: %s
""") % e
sys.exit(-1)
import gnome
import gnome.ui
LABEL_X = 325
LABEL_Y = 600
X_OFF = 20
Y_OFF = 10
BIG_HEADER_SIZE = 12000
PROPERTY_SIZE = 8000
PROPERTIES_STR=_("Properties for")
PHYSICAL_VOLUME_STR=_("Physical Volume")
LOGICAL_VOLUME_STR=_("Logical Volume")
UNALLOCATED_VOLUME_STR=_("Unallocated Volume")
UNINITIALIZED_VOLUME_STR=_("Disk Entity")
PHYSICAL_VOLUMEGROUP_STR=_("Volume Group")
LOGICAL_VOLUMEGROUP_STR=_("Volume Group")
VOLUMEGROUP_STR=_("Volume Group")
##############################################################
class Properties_Renderer:
def __init__(self, area, widget):
self.main_window = widget
self.area = area #actual widget, used for getting style, hence bgcolor
self.area.set_size_request(700, 500)
self.current_selection_layout = None
self.layout_list = list()
self.layout_pixmap = gtk.gdk.Pixmap(self.main_window, LABEL_X, LABEL_Y)
self.gc = self.main_window.new_gc()
self.pango_context = self.area.get_pango_context()
color = gtk.gdk.colormap_get_system().alloc_color("white", 1,1)
self.area.modify_bg(gtk.STATE_NORMAL, color)
self.area.connect('expose-event', self.on_expose_event)
self.clear_layout_pixmap()
def render_to_layout_area(self, prop_list, name, type):
self.clear_layout_pixmap()
self.layout_list = list()
self.prepare_header_layout(name, type)
self.prepare_prop_layout(prop_list, type)
self.prepare_selection_props()
self.do_render()
def prepare_header_layout(self, name, type):
pc = self.pango_context
desc = pc.get_font_description()
desc.set_size(BIG_HEADER_SIZE)
pc.set_font_description(desc)
layout_string1 = "<span size=\"12000\">" +PROPERTIES_STR + "</span>\n"
if type == PHYS_TYPE:
layout_string2 = "<span size=\"12000\">" + PHYSICAL_VOLUME_STR + "</span>\n"
layout_string3 = "<span foreground=\"#ED1C2A\" size=\"12000\"><b>" + name + "</b></span>"
elif type == LOG_TYPE:
layout_string2 = "<span size=\"12000\">" + LOGICAL_VOLUME_STR + "</span>\n"
layout_string3 = "<span foreground=\"#43ACE2\" size=\"12000\"><b>" + name + "</b></span>"
elif type == UNALLOCATED_TYPE:
layout_string2 = "<span size=\"12000\">" + UNALLOCATED_VOLUME_STR + "</span>\n"
layout_string3 = "<span foreground=\"#ED1C2A\" size=\"12000\"><b>" + name + "</b></span>"
elif type == UNINITIALIZED_TYPE:
layout_string2 = "<span size=\"12000\">" + UNINITIALIZED_VOLUME_STR + "</span>\n"
layout_string3 = "<span foreground=\"#404040\" size=\"12000\"><b>" + name + "</b></span>"
elif type == VG_PHYS_TYPE:
layout_string2 = "<span size=\"12000\">" + PHYSICAL_VOLUMEGROUP_STR + "</span>\n"
layout_string3 = "<span foreground=\"#ED1C2A\" size=\"12000\"><b>" + name + "</b></span>"
elif type == VG_LOG_TYPE:
layout_string2 = "<span size=\"12000\">" + LOGICAL_VOLUMEGROUP_STR + "</span>\n"
layout_string3 = "<span foreground=\"#43ACE2\" size=\"12000\"><b>" + name + "</b></span>"
else:
layout_string2 = "<span size=\"12000\">" + VOLUMEGROUP_STR + "</span>\n"
layout_string3 = "<span foreground=\"#43A2FF\" size=\"12000\"><b>" + name + "</b></span>"
layout_string = layout_string1 + layout_string2 + layout_string3
header_layout = self.area.create_pango_layout('')
header_layout.set_markup(layout_string)
self.layout_list.append(header_layout)
def prepare_prop_layout(self, prop_list,type):
pc = self.pango_context
desc = pc.get_font_description()
desc.set_size(PROPERTY_SIZE)
pc.set_font_description(desc)
text_str = self.prepare_props_list(prop_list, type)
props_layout = self.area.create_pango_layout('')
props_layout.set_markup(text_str)
self.layout_list.append(props_layout)
def clear_layout_pixmap(self):
self.set_color("white")
self.layout_pixmap.draw_rectangle(self.gc, True, 0, 0, -1, -1)
def clear_layout_area(self):
self.clear_layout_pixmap()
self.layout_list = list()
self.main_window.draw_drawable(self.gc, self.layout_pixmap, 0, 0, X_OFF, Y_OFF, -1, -1)
def set_color(self, color):
self.gc.set_foreground(gtk.gdk.colormap_get_system().alloc_color(color, 1,1))
def prepare_selection_props(self):
pass
def prepare_props_list(self, props_list, type):
stringbuf = list()
for i in range(0, len(props_list), 2):
if i == 0:
stringbuf.append("<b>" + props_list[i] + "</b>")
if (type == PHYS_TYPE) or (type == VG_PHYS_TYPE) or (type == UNALLOCATED_TYPE):
stringbuf.append("<span foreground=\"#ED1C2A\">")
elif (type == LOG_TYPE) or (type == VG_LOG_TYPE):
stringbuf.append("<span foreground=\"#43ACE2\">")
elif type == VG_TYPE:
stringbuf.append("<span foreground=\"#43A2FF\">")
else:
stringbuf.append("<span foreground=\"#404040\">")
stringbuf.append(props_list[i+1])
stringbuf.append("</span>")
else:
stringbuf.append("\n")
stringbuf.append("<b>" + props_list[i] + "</b>")
if (type == PHYS_TYPE) or (type == VG_PHYS_TYPE) or (type == UNALLOCATED_TYPE):
stringbuf.append("<span foreground=\"#ED1C2A\">")
elif (type == LOG_TYPE) or (type == VG_LOG_TYPE):
stringbuf.append("<span foreground=\"#43ACE2\">")
elif type == VG_TYPE:
stringbuf.append("<span foreground=\"#43A2FF\">")
else:
stringbuf.append("<span foreground=\"#404040\">")
stringbuf.append(props_list[i+1])
stringbuf.append("</span>")
text_str = "".join(stringbuf)
return text_str
def do_render(self):
self.clear_layout_pixmap()
self.set_color("black")
y_offset = 0
for layout in self.layout_list:
x,y = layout.get_pixel_size()
if y_offset == 0:
self.layout_pixmap.draw_layout(self.gc, 0, 0, layout)
y_offset = y_offset + y
else:
self.layout_pixmap.draw_layout(self.gc, 0, y_offset + 5, layout)
y_offset = y_offset + y
if self.current_selection_layout != None:
self.layout_pixmap.draw_layout(self.gc, 0, y_offset + 5, self.current_selection_layout)
self.main_window.draw_drawable(self.gc, self.layout_pixmap, 0, 0, X_OFF, Y_OFF, -1, -1)
def render_selection(self, layout):
###FIXME - This has the potential of eliminating all entries on the list.
if layout == None:
self.current_selection_layout = None
self.do_render()
elif layout is self.current_selection_layout:
return
else:
self.current_selection_layout = layout
self.do_render()
def on_expose_event(self, widget, event):
self.do_render()
| gpl-2.0 | -4,781,185,715,055,585,000 | 36.104762 | 95 | 0.602156 | false |
droobey/meeseeksbox | src/action.py | 1 | 16274 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Carry out voice commands by recognising keywords."""
import datetime
import logging
import subprocess
import actionbase
import os
import json
import random
import requests
# =============================================================================
#
# Hey, Makers!
#
# This file contains some examples of voice commands that are handled locally,
# right on your Raspberry Pi.
#
# Do you want to add a new voice command? Check out the instructions at:
# https://aiyprojects.withgoogle.com/voice/#makers-guide-3-3--create-a-new-voice-command-or-action
# (MagPi readers - watch out! You should switch to the instructions in the link
# above, since there's a mistake in the MagPi instructions.)
#
# In order to make a new voice command, you need to do two things. First, make a
# new action where it says:
# "Implement your own actions here"
# Secondly, add your new voice command to the actor near the bottom of the file,
# where it says:
# "Add your own voice commands here"
#
# =============================================================================
# Actions might not use the user's command. pylint: disable=unused-argument
# Example: Say a simple response
# ================================
#
# This example will respond to the user by saying something. You choose what it
# says when you add the command below - look for SpeakAction at the bottom of
# the file.
#
# There are two functions:
# __init__ is called when the voice commands are configured, and stores
# information about how the action should work:
# - self.say is a function that says some text aloud.
# - self.words are the words to use as the response.
# run is called when the voice command is used. It gets the user's exact voice
# command as a parameter.
class SpeakAction(object):
"""Says the given text via TTS."""
def __init__(self, say, words):
self.say = say
self.words = words
def run(self, voice_command):
self.say(self.words)
# Example: Tell the current time
# ==============================
#
# This example will tell the time aloud. The to_str function will turn the time
# into helpful text (for example, "It is twenty past four."). The run function
# uses to_str say it aloud.
class SpeakTime(object):
"""Says the current local time with TTS."""
def __init__(self, say):
self.say = say
def run(self, voice_command):
time_str = self.to_str(datetime.datetime.now())
self.say(time_str)
def to_str(self, dt):
"""Convert a datetime to a human-readable string."""
HRS_TEXT = ['midnight', 'one', 'two', 'three', 'four', 'five', 'six',
'seven', 'eight', 'nine', 'ten', 'eleven', 'twelve']
MINS_TEXT = ["five", "ten", "quarter", "twenty", "twenty-five", "half"]
hour = dt.hour
minute = dt.minute
# convert to units of five minutes to the nearest hour
minute_rounded = (minute + 2) // 5
minute_is_inverted = minute_rounded > 6
if minute_is_inverted:
minute_rounded = 12 - minute_rounded
hour = (hour + 1) % 24
# convert time from 24-hour to 12-hour
if hour > 12:
hour -= 12
if minute_rounded == 0:
if hour == 0:
return 'It is midnight.'
return "It is %s o'clock." % HRS_TEXT[hour]
if minute_is_inverted:
return 'It is %s to %s.' % (MINS_TEXT[minute_rounded - 1], HRS_TEXT[hour])
return 'It is %s past %s.' % (MINS_TEXT[minute_rounded - 1], HRS_TEXT[hour])
# Example: Run a shell command and say its output
# ===============================================
#
# This example will use a shell command to work out what to say. You choose the
# shell command when you add the voice command below - look for the example
# below where it says the IP address of the Raspberry Pi.
class SpeakShellCommandOutput(object):
"""Speaks out the output of a shell command."""
def __init__(self, say, shell_command, failure_text):
self.say = say
self.shell_command = shell_command
self.failure_text = failure_text
def run(self, voice_command):
output = subprocess.check_output(self.shell_command, shell=True).strip()
if output:
self.say(output.decode('utf-8'))
elif self.failure_text:
self.say(self.failure_text)
# Example: Change the volume
# ==========================
#
# This example will can change the speaker volume of the Raspberry Pi. It uses
# the shell command SET_VOLUME to change the volume, and then GET_VOLUME gets
# the new volume. The example says the new volume aloud after changing the
# volume.
class VolumeControl(object):
"""Changes the volume and says the new level."""
GET_VOLUME = r'amixer get Master | grep "Front Left:" | sed "s/.*\[\([0-9]\+\)%\].*/\1/"'
SET_VOLUME = 'amixer -q set Master %d%%'
def __init__(self, say, change):
self.say = say
self.change = change
def run(self, voice_command):
res = subprocess.check_output(VolumeControl.GET_VOLUME, shell=True).strip()
try:
logging.info("volume: %s", res)
vol = int(res) + self.change
vol = max(0, min(100, vol))
subprocess.call(VolumeControl.SET_VOLUME % vol, shell=True)
self.say(_('Volume at %d %%.') % vol)
except (ValueError, subprocess.CalledProcessError):
logging.exception("Error using amixer to adjust volume.")
# Example: Repeat after me
# ========================
#
# This example will repeat what the user said. It shows how you can access what
# the user said, and change what you do or how you respond.
class RepeatAfterMe(object):
"""Repeats the user's command."""
def __init__(self, say, keyword):
self.say = say
self.keyword = keyword
def run(self, voice_command):
# The command still has the 'repeat after me' keyword, so we need to
# remove it before saying whatever is left.
to_repeat = voice_command.replace(self.keyword, '', 1)
self.say(to_repeat)
# =========================================
# Makers! Implement your own actions here.
# =========================================
#This works by rolling a number between 1 and whatever you want.
#There will be the chance of 1/your number to play a either one of two wavs
#If the random number rolled isn't one it will play <keyword>.wav from the wavs/chance folder
#It will then -1 from your number (total chance) and save it to a file
#If it IS 1 it will play <keyword>-full.wav wavs/chance folder
#It will then reset the chance back to default
#
#e.g. actor.add_keyword(_('command'), WavChance(say,"moonmen", 10))
# has a 1/10 chance of playing moonmen-full.wav or moonmen.wav
class WavChance(object):
def __init__(self,say,keyword,chanc=10):
self.say = say
self.path = "../wavs/chance/"+str(keyword)
self.chance = str(chanc)
def run(self, command):
import os,random
if os.path.isfile("../wavs/chance/chance-"+os.path.basename(self.path)):
with open("../wavs/chance/chance-"+os.path.basename(self.path),"r") as f:
chance=int(f.read().strip())
else:
f=open("../wavs/chance/chance-"+os.path.basename(self.path),"w")
f.write(str(self.chance))
f.close()
chance=self.chance
logging.debug("Current chance: "+str(chance))
r=random.randint(1,chance)
logging.debug("1/"+str(chance)+", rolled a "+str(r))
if r == 1:
#ding,ding,ding,jackpot!
os.system("aplay "+self.path+"-full.wav")
with open("../wavs/chance/chance-"+os.path.basename(self.path),"w") as f:
f.write(str(self.chance))
else:
os.system("aplay "+self.path+".wav")
chance=chance-1
with open("../wavs/chance/chance-"+os.path.basename(self.path),"w") as f:
f.write(str(chance))
class PlayWav(object):
def __init__(self,say,keyword):
self.say = say
self.path = keyword
def run(self, command):
os.system("aplay "+self.path)
#choose a random TV Show!
class EpisodeRandom(object):
def __init__(self,say,keyword):
self.say=say
self.keyword=keyword
#Put your TVDB API KEY here!
#Get API key here http://thetvdb.com/?tab=apiregister
self.api_key="TVDBAPIKEY"
def grab_tvdb(self,id):
import tvdb
tv=tvdb.tvdb(self.api_key,id)
page=1
maxpage=1
try:
s=tv.GET("series/"+str(id)+"/episodes?page="+str(page))
except:
self.say("I couldn't connect to the TV database. Have you set your API key correctly?")
return False
maxpage=s["links"]["last"]+1
o=[]
o.extend(s["data"])
for i in range (2, maxpage):
s=tv.GET("series/"+str(id)+"/episodes?page="+str(i))
o.extend(s["data"])
return {"episodes":o}
def grab_tvdb_show(self,show):
import tvdb
#Get API key here http://thetvdb.com/?tab=apiregister
tv=tvdb.tvdb(self.api_key) #Put your TVDB API KEY here!
try:
s=tv.GET("search/series?name="+str(show))
return s["data"][0]
except:
self.say("I couldn't connect to the TV database. Have you set your API key correctly?")
return False
def run(self, command):
random.seed()
#Put your favourite TV Shows here
#You'll need its tvdb_id, just search it on thetvdb.com
#It will be in the URL e.g. http://thetvdb.com/?tab=series&id=IDHERE
#list = [[showname,the_tvdb_id],....]
shows=[["Futurama",73871],
["Rick and Morty",275274]]
from time import time,sleep
if not self.keyword == "episodeof":
c=random.choice(shows)
else:
show=command.replace("suggest random episode of","").strip()
show=show.replace("random episode of","").strip()
get_show=self.grab_tvdb_show(show)
if not get_show:
self.say("Sorry, I can't seem to find "+str(show)+" on the TV database")
return
else:
c=[get_show["seriesName"],get_show["id"]]
cachepath="tvdb_cache/tvdb_"+str(c[1])+".json"
if os.path.isfile(cachepath):
with open(cachepath) as data_file:
data = json.load(data_file)
if int(data["updated"])+604800 < time():
#out of date
o = self.grab_tvdb(c[1])
if not o:
return False
o["updated"]=time()
with open(cachepath, 'w') as outfile:
json.dump(o,outfile)
showeps=o
else:
#up to date
with open(cachepath) as data_file:
showeps=json.load(data_file)
else:
#new file
o = self.grab_tvdb(c[1])
o["updated"]=time()
with open(cachepath, 'w') as outfile:
json.dump(o,outfile)
showeps=o
while True:
ep=random.choice(showeps["episodes"])
if not ep['airedSeason'] == 0:
break
intro=["How about","Try","You should watch","Have a look at","You may like"]
self.say(random.choice(intro)+" "+c[0]+" Season "+str(ep['airedSeason'])+" Episode "+str(ep['airedEpisodeNumber'])+" "+ep['episodeName'])
def make_actor(say):
"""Create an actor to carry out the user's commands."""
actor = actionbase.Actor()
actor.add_keyword(
_('ip address'), SpeakShellCommandOutput(
say, "ip -4 route get 1 | head -1 | cut -d' ' -f8",
_('I do not have an ip address assigned to me.')))
actor.add_keyword(_('volume up'), VolumeControl(say, 10))
actor.add_keyword(_('volume down'), VolumeControl(say, -10))
actor.add_keyword(_('max volume'), VolumeControl(say, 100))
actor.add_keyword(_('repeat after me'),
RepeatAfterMe(say, _('repeat after me')))
# =========================================
# Makers! Add your own voice commands here.
# =========================================
#read config
#Edit the file called cmd-config
#Layout
#parrot=Voom,Norwegian Blue,4000000
# ^ ^ ^ ^
#Command | | |
# Class Keyword Extra Parameter
with open("cmd-config","r") as f:
for line in f:
line=line.strip()
if line[0]=="#": #comment, ingore this line
continue
cmd=line.split("=",1)
module=cmd[1].split(",",3)
kword=module[1]
try:
cl=globals()[module[0]]
if len(module)>2:
actor.add_keyword(_( cmd[0].strip() ), cl(say,kword,module[2]))
else:
actor.add_keyword(_( cmd[0].strip() ), cl(say,kword))
logging.debug("Added command from config - "+str(cmd[0])+" ("+module[0]+")")
except:
import traceback,sys
exc_type, exc_value, exc_traceback = sys.exc_info()
logging.debug("Failed to add command from config - "+str(cmd[0])+" ("+module[0]+")")
traceback.print_exc()
actor.add_keyword(_('what is your purpose'),
SpeakAction(say, _("I pass butter")))
#start random episode
#Look in the EpisodeRandom function for setup!
actor.add_keyword(_("suggest random episode of"),
EpisodeRandom(say,_("episodeof")))
actor.add_keyword(_("random episode"),
EpisodeRandom(say,_("episode")))
actor.add_keyword(_("random episode of"),
EpisodeRandom(say,_("episodeof")))
#end random episode
return actor
def add_commands_just_for_cloud_speech_api(actor, say):
"""Add simple commands that are only used with the Cloud Speech API."""
def simple_command(keyword, response):
actor.add_keyword(keyword, SpeakAction(say, response))
simple_command('alexa', _("We've been friends since we were both starter projects"))
simple_command(
'beatbox',
'pv zk pv pv zk pv zk kz zk pv pv pv zk pv zk zk pzk pzk pvzkpkzvpvzk kkkkkk bsch')
simple_command(_('clap'), _('clap clap'))
simple_command('google home', _('She taught me everything I know.'))
simple_command(_('hello'), _('hello to you too'))
simple_command(_('tell me a joke'),
_('What do you call an alligator in a vest? An investigator.'))
simple_command(_('three laws of robotics'),
_("""The laws of robotics are
0: A robot may not injure a human being or, through inaction, allow a human
being to come to harm.
1: A robot must obey orders given it by human beings except where such orders
would conflict with the First Law.
2: A robot must protect its own existence as long as such protection does not
conflict with the First or Second Law."""))
simple_command(_('where are you from'), _("A galaxy far, far, just kidding. I'm from Seattle."))
simple_command(_('your name'), _('A machine has no name'))
actor.add_keyword(_('time'), SpeakTime(say))
| mit | 8,872,446,751,623,612,000 | 34.924945 | 150 | 0.572508 | false |
theeluwin/textrankr | textrankr/utils.py | 1 | 2716 | from typing import (
List,
Tuple,
Callable,
)
from re import split
from itertools import combinations
from collections import Counter
from networkx import Graph
from .sentence import Sentence
__all__: Tuple[str, ...] = (
'parse_text_into_sentences',
'multiset_jaccard_index',
'build_sentence_graph',
)
def parse_text_into_sentences(text: str, tokenizer: Callable[[str], List[str]]) -> List[Sentence]:
"""
This function splits the given text into sentence candidates using a pre-defined splitter,
then creates a list of `sentence.Sentence` instances which have bag-of-words inside, tokenized by the given tokenizer.
"""
# init
index: int = 0
duplication_checker: set = set()
sentences: List[Sentence] = []
# parse text
candidates: List[str] = split(r'(?:(?<=[^0-9])\.|\n|!|\?)', text)
for candidate in candidates:
# cleanse the candidate
candidate_stripped: str = candidate.strip('. ')
if not len(candidate_stripped):
continue
if candidate_stripped in duplication_checker:
continue
# tokenize the candidate
tokens: List[str] = tokenizer(candidate_stripped)
if len(tokens) < 2:
continue
duplication_checker.add(candidate_stripped)
# create a sentence
bow: Counter = Counter(tokens)
sentence = Sentence(index, candidate_stripped, bow)
sentences.append(sentence)
index += 1
# return
return sentences
def multiset_jaccard_index(counter1: Counter, counter2: Counter) -> float:
"""
Calculates the jaccard index between two given multisets.
Note that a `Counter` instance can be used for representing multisets.
"""
intersection_count: int = sum((counter1 & counter2).values())
union_count: int = sum((counter1 | counter2).values())
try:
return intersection_count / union_count
except ZeroDivisionError:
return 0.0
def build_sentence_graph(sentences: List[Sentence], tolerance: float = 0.05) -> Graph:
"""
Builds a `networkx.Graph` instance, using sentences as nodes.
An edge weight is determined by the jaccard index between two sentences,
but the edge will be ignored if the weight is lower then the given tolerance.
"""
# init
graph: Graph = Graph()
# add nodes
graph.add_nodes_from(sentences)
# add edges
for sentence1, sentence2 in combinations(sentences, 2):
weight: float = multiset_jaccard_index(sentence1.bow, sentence2.bow)
if weight > tolerance:
graph.add_edge(sentence1, sentence2, weight=weight)
# return
return graph
| mit | -7,227,328,344,896,412,000 | 27.893617 | 126 | 0.647275 | false |
dc3-plaso/plaso | plaso/parsers/winreg_plugins/usbstor.py | 1 | 4441 | # -*- coding: utf-8 -*-
"""File containing a Windows Registry plugin to parse the USBStor key."""
import logging
from plaso.containers import windows_events
from plaso.parsers import winreg
from plaso.parsers.winreg_plugins import interface
__author__ = 'David Nides ([email protected])'
class USBStorPlugin(interface.WindowsRegistryPlugin):
"""USBStor key plugin."""
NAME = u'windows_usbstor_devices'
DESCRIPTION = u'Parser for USB Plug And Play Manager USBStor Registry Key.'
FILTERS = frozenset([
interface.WindowsRegistryKeyPathFilter(
u'HKEY_LOCAL_MACHINE\\System\\CurrentControlSet\\Enum\\USBSTOR')])
URLS = [u'http://www.forensicswiki.org/wiki/USB_History_Viewing']
_SOURCE_APPEND = u': USBStor Entries'
def GetEntries(self, parser_mediator, registry_key, **kwargs):
"""Collect Values under USBStor and return an event object for each one.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
registry_key: A Windows Registry key (instance of
dfwinreg.WinRegistryKey).
"""
for subkey in registry_key.GetSubkeys():
values_dict = {}
values_dict[u'subkey_name'] = subkey.name
# Time last USB device of this class was first inserted.
event_object = windows_events.WindowsRegistryEvent(
subkey.last_written_time, registry_key.path, values_dict,
offset=registry_key.offset, source_append=self._SOURCE_APPEND)
parser_mediator.ProduceEvent(event_object)
name_values = subkey.name.split(u'&')
number_of_name_values = len(name_values)
# Normally we expect 4 fields here however that is not always the case.
if number_of_name_values != 4:
logging.warning(
u'Expected 4 &-separated values in: {0:s}'.format(subkey.name))
if number_of_name_values >= 1:
values_dict[u'device_type'] = name_values[0]
if number_of_name_values >= 2:
values_dict[u'vendor'] = name_values[1]
if number_of_name_values >= 3:
values_dict[u'product'] = name_values[2]
if number_of_name_values >= 4:
values_dict[u'revision'] = name_values[3]
for device_key in subkey.GetSubkeys():
values_dict[u'serial'] = device_key.name
friendly_name_value = device_key.GetValueByName(u'FriendlyName')
if friendly_name_value:
values_dict[u'friendly_name'] = friendly_name_value.GetDataAsObject()
else:
values_dict.pop(u'friendly_name', None)
# ParentIdPrefix applies to Windows XP Only.
parent_id_prefix_value = device_key.GetValueByName(u'ParentIdPrefix')
if parent_id_prefix_value:
values_dict[u'parent_id_prefix'] = (
parent_id_prefix_value.GetDataAsObject())
else:
values_dict.pop(u'parent_id_prefix', None)
# Win7 - Last Connection.
# Vista/XP - Time of an insert.
event_object = windows_events.WindowsRegistryEvent(
device_key.last_written_time, registry_key.path, values_dict,
offset=registry_key.offset, source_append=self._SOURCE_APPEND)
parser_mediator.ProduceEvent(event_object)
device_parameter_key = device_key.GetSubkeyByName(u'Device Parameters')
if device_parameter_key:
event_object = windows_events.WindowsRegistryEvent(
device_parameter_key.last_written_time, registry_key.path,
values_dict, offset=registry_key.offset,
source_append=self._SOURCE_APPEND)
parser_mediator.ProduceEvent(event_object)
log_configuration_key = device_key.GetSubkeyByName(u'LogConf')
if log_configuration_key:
event_object = windows_events.WindowsRegistryEvent(
log_configuration_key.last_written_time, registry_key.path,
values_dict, offset=registry_key.offset,
source_append=self._SOURCE_APPEND)
parser_mediator.ProduceEvent(event_object)
properties_key = device_key.GetSubkeyByName(u'Properties')
if properties_key:
event_object = windows_events.WindowsRegistryEvent(
properties_key.last_written_time, registry_key.path,
values_dict, offset=registry_key.offset,
source_append=self._SOURCE_APPEND)
parser_mediator.ProduceEvent(event_object)
winreg.WinRegistryParser.RegisterPlugin(USBStorPlugin)
| apache-2.0 | 7,311,776,805,502,006,000 | 38.651786 | 79 | 0.665166 | false |
unreal666/outwiker | src/outwiker/gui/basepagepanel.py | 2 | 4433 | # -*- coding: utf-8 -*-
import logging
import os.path
import wx
from wx.lib.scrolledpanel import ScrolledPanel
from outwiker.core.event import Event
logger = logging.getLogger('outwiker.gui.pasepagepanel')
class BasePagePanel(ScrolledPanel):
"""
Базовый класс для панелей представления страниц
"""
def __init__(self, parent, application):
style = wx.TAB_TRAVERSAL | wx.HSCROLL | wx.VSCROLL
super().__init__(parent, style=style)
self._currentpage = None
self._application = application
self.mainWindow = self._application.mainWindow
# Событие, срабатывающее, когда устанавливается новая страница
# Параметр: новая страница
self._onSetPage = Event()
# Словарь, хранящий информацию о созданных инструментах
# Ключ - строка, описывающая инструмент
# Значение - экземпляр класса ToolsInfo
self._tools = {}
@property
def allTools(self):
"""
Возвращает список ToolsInfo.
"""
return list(self._tools.values())
def _removeAllTools(self):
self.mainWindow.Freeze()
for toolKey in self._tools:
self.removeTool(toolKey, fullUpdate=False)
self.mainWindow.UpdateAuiManager()
self.mainWindow.Thaw()
def removeTool(self, idstring, fullUpdate=True):
if idstring not in self._tools:
logger.error('BasePagePanel.removeTool. Invalid idstring: {}'.format(idstring))
return
tool = self._tools[idstring]
if (tool.panelname in self.mainWindow.toolbars and
self.mainWindow.toolbars[tool.panelname].FindById(tool.id) is not None):
self.mainWindow.toolbars[tool.panelname].DeleteTool(tool.id, fullUpdate=fullUpdate)
tool.menu.Remove(tool.id)
self.mainWindow.Unbind(wx.EVT_MENU, id=tool.id)
del self._tools[idstring]
def enableTool(self, tool, enabled):
"""
Активировать или дезактивировать один инструмент(пункт меню и кнопку)
tool - экземпляр класса ToolsInfo
"""
tool.menu.Enable(tool.id, enabled)
if self.mainWindow.toolbars[tool.panelname].FindById(tool.id) is not None:
toolbar = self.mainWindow.toolbars[tool.panelname]
toolbar.Freeze()
toolbar.EnableTool(tool.id, enabled)
toolbar.Realize()
toolbar.Thaw()
###############################################
# Методы, которые обязательно надо перегрузить
###############################################
def Print(self):
"""
Вызов печати страницы
"""
pass
def UpdateView(self, page):
"""
Обновление страницы
"""
pass
def Save(self):
"""
Сохранить страницу
"""
pass
def Clear(self):
"""
Убрать за собой.
Удалить добавленные элементы интерфейса и отписаться от событий
"""
pass
def checkForExternalEditAndSave(self):
"""
Проверить, изменилась ли страница внешними средствами. Если изменилась,
отреагировать на эти изменения.
"""
@property
def page(self):
return self._currentpage
@page.setter
def page(self, page):
self.Save()
self._currentpage = page
if page is not None and not os.path.exists(page.path):
return
self._onSetPage(page)
self.UpdateView(page)
def Close(self):
"""
Закрытие панели.
Вызывать вручную!!!
"""
self.Save()
self.CloseWithoutSave()
def CloseWithoutSave(self):
"""
Закрытие панели без сохранения.
"""
self.Clear()
super().Close()
self.Destroy()
| gpl-3.0 | -5,303,935,627,709,025,000 | 25.157534 | 95 | 0.584446 | false |
mozilla-it/mozlibldap | examples/make-pubkeys-investigation.py | 1 | 3570 | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Copyright (c) 2015 Mozilla Corporation
# Author: [email protected]
# Requires:
# mozlibldap
from __future__ import print_function
import mozlibldap
import string
import json
import sys
LDAP_URL = 'ldap://ldap.db.scl3.mozilla.com'
LDAP_BIND_DN = '[email protected],o=com,dc=mozilla'
LDAP_BIND_PASSWD = "mysecretpassphrase"
def main():
lcli = mozlibldap.MozLDAP(LDAP_URL, LDAP_BIND_DN, LDAP_BIND_PASSWD)
searches = {}
# get a list of users that have a pubkey in ldap
users = lcli.get_all_enabled_users_attr('sshPublicKey')
for user_attr in users:
search = {}
user = user_attr[0].split(',', 1)[0].split('=', 1)[1]
print("current user: "+user, file=sys.stderr)
keys = user_attr[1]
if len(keys) == 0:
continue
contentre = '^((#.+)|(\s+)'
for pubkey in keys['sshPublicKey']:
if len(pubkey) < 5 or not (pubkey.startswith("ssh")):
continue
pubkey = string.join(pubkey.split(' ', 2)[:2], '\s')
pubkey = pubkey.replace('/', '\/')
pubkey = pubkey.replace('+', '\+')
pubkey = pubkey.replace('\r\n', '')
contentre += '|({pubkey}\s.+)'.format(pubkey=pubkey)
contentre += ')$'
search["names"] = []
search["names"].append("^authorized_keys$")
search["contents"] = []
search["contents"].append(contentre)
paths = []
try:
paths = get_search_paths(lcli, user)
except:
continue
if not paths or len(paths) < 1:
continue
search["paths"] = paths
search["options"] = {}
search["options"]["matchall"] = True
search["options"]["macroal"] = True
search["options"]["maxdepth"] = 1
search["options"]["mismatch"] = []
search["options"]["mismatch"].append("content")
print(json.dumps(search), file=sys.stderr)
searches[user+"_ssh_pubkeys"] = search
action = {}
action["name"] = "Investigate the content of authorized_keys for LDAP users"
action["target"] = "(name LIKE 'admin%' OR name LIKE 'ssh%' " + \
"OR name LIKE 'people%' OR name LIKE 'zlb%' OR name IN " + \
"('reviewboard-hg1.dmz.scl3.mozilla.com', 'hgssh.stage.dmz.scl3.mozilla.com', " + \
"'hgssh1.dmz.scl3.mozilla.com', 'hgssh2.dmz.scl3.mozilla.com', " + \
"'git1.dmz.scl3.mozilla.com', 'git1.private.scl3.mozilla.com', " + \
"'svn1.dmz.phx1.mozilla.com', 'svn2.dmz.phx1.mozilla.com', " + \
"'svn3.dmz.phx1.mozilla.com')) AND tags->>'operator'='IT' AND " + \
"mode='daemon' AND status='online'"
action["version"] = 2
action["operations"] = []
operation = {}
operation["module"] = "file"
operation["parameters"] = {}
operation["parameters"]["searches"] = searches
action["operations"].append(operation)
print(json.dumps(action, indent=4, sort_keys=True))
def get_search_paths(lcli, user):
paths = []
res = lcli.query("mail="+user, ['homeDirectory', 'hgHome',
'stageHome', 'svnHome'])
for attr in res[0][1]:
try:
paths.append(res[0][1][attr][0]+"/.ssh")
except:
continue
return paths
if __name__ == "__main__":
main()
| mpl-2.0 | -8,203,583,225,681,633,000 | 35.428571 | 95 | 0.564426 | false |
BlackHole/enigma2-obh10 | lib/python/Components/TimerSanityCheck.py | 2 | 13683 | import NavigationInstance
from time import localtime, mktime, gmtime, time
from enigma import iServiceInformation, eServiceCenter, eServiceReference, getBestPlayableServiceReference
from timer import TimerEntry
import RecordTimer
from Tools.CIHelper import cihelper
from Components.config import config
class TimerSanityCheck:
def __init__(self, timerlist, newtimer=None):
self.localtimediff = 25 * 3600 - mktime(gmtime(25 * 3600))
self.timerlist = timerlist
self.newtimer = newtimer
self.simultimer = []
self.rep_eventlist = []
self.nrep_eventlist = []
self.bflag = -1
self.eflag = 1
def check(self, ext_timer=None):
if ext_timer and isinstance(ext_timer, RecordTimer.RecordTimerEntry):
self.newtimer = ext_timer
self.simultimer = []
if self.newtimer:
if not self.newtimer.conflict_detection or (self.newtimer.service_ref and '%3a//' in self.newtimer.service_ref.ref.toString()):
print "[TimerSanityCheck] Exception - timer does not have to be checked!"
return True
self.simultimer = [self.newtimer]
return self.checkTimerlist()
def getSimulTimerList(self):
return self.simultimer
def doubleCheck(self):
if self.newtimer and self.newtimer.service_ref and self.newtimer.service_ref.ref.valid():
self.simultimer = [self.newtimer]
for timer in self.timerlist:
if timer == self.newtimer:
return True
if self.newtimer.begin >= timer.begin and self.newtimer.end <= timer.end:
if timer.justplay and not self.newtimer.justplay:
continue
if timer.service_ref.ref.flags & eServiceReference.isGroup:
if self.newtimer.service_ref.ref.flags & eServiceReference.isGroup and timer.service_ref.ref.getPath() == self.newtimer.service_ref.ref.getPath():
return True
continue
getUnsignedDataRef1 = timer.service_ref.ref.getUnsignedData
getUnsignedDataRef2 = self.newtimer.service_ref.ref.getUnsignedData
for x in (1, 2, 3, 4):
if getUnsignedDataRef1(x) != getUnsignedDataRef2(x):
break
else:
return True
return False
def checkTimerlist(self, ext_timer=None):
#with special service for external plugins
# Entries in eventlist
# timeindex
# BeginEndFlag 1 for begin, -1 for end
# index -1 for the new Timer, 0..n index of the existing timers
# count of running timers
serviceHandler = eServiceCenter.getInstance()
# create a list with all start and end times
# split it into recurring and singleshot timers
##################################################################################
# process the new timer
self.rep_eventlist = []
self.nrep_eventlist = []
if ext_timer and isinstance(ext_timer, RecordTimer.RecordTimerEntry):
self.newtimer = ext_timer
#GML:1 - A timer which has already ended (happens during start-up check) can't clash!!
#
# NOTE: that when adding a timer it also cannot clash with:
# o any timers which run before the latest period of no timers running
# before the timer to be added starts
# o any timers which run after the first period of no timers running
# after the timer to be added ends
# Code to handle this needs to be added (it is *NOT* here yet!)
#
if (self.newtimer is not None) and (self.newtimer.end < time()): # does not conflict
return True
if not self.newtimer or not self.newtimer.service_ref or not self.newtimer.service_ref.ref.valid():
print "[TimerSanityCheck] Error - timer not valid!"
return False
if self.newtimer.disabled or not self.newtimer.conflict_detection or '%3a//' in self.newtimer.service_ref.ref.toString():
print "[TimerSanityCheck] Exception - timer does not have to be checked!"
return True
curtime = localtime(time())
if curtime.tm_year > 1970 and self.newtimer.end < time():
print "[TimerSanityCheck] timer is finished!"
return True
rflags = self.newtimer.repeated
rflags = ((rflags & 0x7F) >> 3) | ((rflags & 0x07) << 4)
if rflags:
begin = self.newtimer.begin % 86400 # map to first day
if (self.localtimediff > 0) and ((begin + self.localtimediff) > 86400):
rflags = ((rflags >> 1) & 0x3F) | ((rflags << 6) & 0x40)
elif (self.localtimediff < 0) and (begin < self.localtimediff):
rflags = ((rflags << 1) & 0x7E) | ((rflags >> 6) & 0x01)
while rflags: # then arrange on the week
if rflags & 1:
self.rep_eventlist.append((begin, -1))
begin += 86400
rflags >>= 1
else:
self.nrep_eventlist.extend([(self.newtimer.begin, self.bflag, -1), (self.newtimer.end, self.eflag, -1)])
##################################################################################
# now process existing timers
self.check_timerlist = []
idx = 0
for timer in self.timerlist:
if timer != self.newtimer:
if timer.disabled or not timer.conflict_detection or not timer.service_ref or '%3a//' in timer.service_ref.ref.toString() or timer.state == TimerEntry.StateEnded:
continue
if timer.repeated:
rflags = timer.repeated
rflags = ((rflags & 0x7F) >> 3) | ((rflags & 0x07) << 4)
begin = timer.begin % 86400 # map all to first day
if (self.localtimediff > 0) and ((begin + self.localtimediff) > 86400):
rflags = ((rflags >> 1) & 0x3F) | ((rflags << 6) & 0x40)
elif (self.localtimediff < 0) and (begin < self.localtimediff):
rflags = ((rflags << 1) & 0x7E) | ((rflags >> 6) & 0x01)
while rflags:
if rflags & 1:
self.rep_eventlist.append((begin, idx))
begin += 86400
rflags >>= 1
else:
self.nrep_eventlist.extend([(timer.begin, self.bflag, idx), (timer.end, self.eflag, idx)])
self.check_timerlist.append(timer)
idx += 1
################################################################################
# journalize timer repeations
if self.nrep_eventlist:
interval_begin = min(self.nrep_eventlist)[0]
interval_end = max(self.nrep_eventlist)[0]
offset_0 = interval_begin - (interval_begin % 604800)
weeks = (interval_end - offset_0) / 604800
if (interval_end - offset_0) % 604800:
weeks += 1
for cnt in range(int(weeks)):
for event in self.rep_eventlist:
if event[1] == -1: # -1 is the identifier of the changed timer
event_begin = self.newtimer.begin
event_end = self.newtimer.end
else:
event_begin = self.check_timerlist[event[1]].begin
event_end = self.check_timerlist[event[1]].end
new_event_begin = event[0] + offset_0 + (cnt * 604800)
# summertime correction
new_lth = localtime(new_event_begin).tm_hour
new_event_begin += 3600 * (localtime(event_begin).tm_hour - new_lth)
new_event_end = new_event_begin + (event_end - event_begin)
if event[1] == -1:
if new_event_begin >= self.newtimer.begin: # is the soap already running?
self.nrep_eventlist.extend([(new_event_begin, self.bflag, event[1]), (new_event_end, self.eflag, event[1])])
else:
if new_event_begin >= self.check_timerlist[event[1]].begin: # is the soap already running?
self.nrep_eventlist.extend([(new_event_begin, self.bflag, event[1]), (new_event_end, self.eflag, event[1])])
else:
offset_0 = 345600 # the Epoch begins on Thursday
for cnt in (0, 1): # test two weeks to take care of Sunday-Monday transitions
for event in self.rep_eventlist:
if event[1] == -1: # -1 is the identifier of the changed timer
event_begin = self.newtimer.begin
event_end = self.newtimer.end
else:
event_begin = self.check_timerlist[event[1]].begin
event_end = self.check_timerlist[event[1]].end
new_event_begin = event[0] + offset_0 + (cnt * 604800)
new_event_end = new_event_begin + (event_end - event_begin)
self.nrep_eventlist.extend([(new_event_begin, self.bflag, event[1]), (new_event_end, self.eflag, event[1])])
################################################################################
# order list chronological
self.nrep_eventlist.sort()
##################################################################################
# detect overlapping timers and overlapping times
fakeRecList = []
ConflictTimer = None
ConflictTunerType = None
newTimerTunerType = None
cnt = 0
idx = 0
overlaplist = []
is_ci_use = 0
is_ci_timer_conflict = 0
ci_timer = False
if config.misc.use_ci_assignment.value and cihelper.ServiceIsAssigned(self.newtimer.service_ref.ref) and not (self.newtimer.record_ecm and not self.newtimer.descramble):
ci_timer = self.newtimer
ci_timer_begin = ci_timer.begin
ci_timer_end = ci_timer.end
ci_timer_dur = ci_timer_end - ci_timer_begin
ci_timer_events = []
for ev in self.nrep_eventlist:
if ev[2] == -1:
ci_timer_events.append((ev[0], ev[0] + ci_timer_dur))
for event in self.nrep_eventlist:
cnt += event[1]
if event[2] == -1: # new timer
timer = self.newtimer
else:
timer = self.check_timerlist[event[2]]
if event[1] == self.bflag:
tunerType = []
ref = timer.service_ref and timer.service_ref.ref
timer_ref = timer.service_ref
if ref and ref.flags & eServiceReference.isGroup and timer.isRunning():
timer_ref = getBestPlayableServiceReference(timer.service_ref.ref, eServiceReference())
fakeRecService = NavigationInstance.instance.recordService(timer_ref, True)
if fakeRecService:
fakeRecResult = fakeRecService.start(True)
else:
fakeRecResult = -1
# TODO
#if fakeRecResult == -6 and len(NavigationInstance.instance.getRecordings(True)) < 2:
# print "[TimerSanityCheck] less than two timers in the simulated recording list - timer conflict is not plausible - ignored !"
# fakeRecResult = 0
if not fakeRecResult: # tune okay
if hasattr(fakeRecService, 'frontendInfo'):
feinfo = fakeRecService.frontendInfo()
if feinfo and hasattr(feinfo, 'getFrontendData'):
tunerType.append(feinfo.getFrontendData().get("tuner_type", -1))
feinfo = None
else: # tune failed.. so we must go another way to get service type (DVB-S, DVB-T, DVB-C)
def getServiceType(ref): # helper function to get a service type of a service reference
serviceInfo = serviceHandler.info(ref)
serviceInfo = serviceInfo and serviceInfo.getInfoObject(ref, iServiceInformation.sTransponderData)
return -1 if serviceInfo is None else serviceInfo.get("tuner_type", -1)
if ref and ref.flags & eServiceReference.isGroup: # service group ?
serviceList = serviceHandler.list(ref) # get all alternative services
if serviceList:
for ref in serviceList.getContent("R"): # iterate over all group service references
type = getServiceType(ref)
if not type in tunerType: # just add single time
tunerType.append(type)
elif ref:
tunerType.append(getServiceType(ref))
if event[2] == -1: # new timer
newTimerTunerType = tunerType
overlaplist.append((fakeRecResult, timer, tunerType))
fakeRecList.append((timer, fakeRecService))
if fakeRecResult:
if ConflictTimer is None: # just take care of the first conflict
ConflictTimer = timer
ConflictTunerType = tunerType
elif event[1] == self.eflag:
for fakeRec in fakeRecList:
if timer == fakeRec[0] and fakeRec[1]:
NavigationInstance.instance.stopRecordService(fakeRec[1])
fakeRecList.remove(fakeRec)
fakeRec = None
for entry in overlaplist:
if entry[1] == timer:
overlaplist.remove(entry)
else:
print "[TimerSanityCheck] bug: unknown flag!"
if ci_timer and timer != ci_timer and cihelper.ServiceIsAssigned(timer.service_ref.ref) and not (timer.record_ecm and not timer.descramble):
if event[1] == self.bflag:
timer_begin = event[0]
timer_end = event[0] + (timer.end - timer.begin)
else:
timer_end = event[0]
timer_begin = event[0] - (timer.end - timer.begin)
for ci_ev in ci_timer_events:
if (ci_ev[0] >= timer_begin and ci_ev[0] <= timer_end) or (ci_ev[1] >= timer_begin and ci_ev[1] <= timer_end):
if ci_timer.service_ref.ref != timer.service_ref.ref:
is_ci_timer_conflict = 1
break
if is_ci_timer_conflict == 1:
if ConflictTimer is None:
ConflictTimer = timer
ConflictTunerType = tunerType
self.nrep_eventlist[idx] = (event[0], event[1], event[2], cnt, overlaplist[:]) # insert a duplicate into current overlaplist
fakeRecService = None
fakeRecResult = None
idx += 1
if ConflictTimer is None:
print "[TimerSanityCheck] conflict not found!"
return True
##################################################################################
# we have detected a conflict, now we must figure out the involved timers
if self.newtimer is not ConflictTimer: # the new timer is not the conflicting timer?
for event in self.nrep_eventlist:
if len(event[4]) > 1: # entry in overlaplist of this event??
kt = False
nt = False
for entry in event[4]:
if entry[1] is ConflictTimer:
kt = True
if entry[1] is self.newtimer:
nt = True
if nt and kt:
ConflictTimer = self.newtimer
ConflictTunerType = newTimerTunerType
break
self.simultimer = [ConflictTimer]
for event in self.nrep_eventlist:
if len(event[4]) > 1: # entry in overlaplist of this event??
for entry in event[4]:
if entry[1] is ConflictTimer:
break
else:
continue
for entry in event[4]:
if not entry[1] in self.simultimer:
for x in entry[2]:
if x in ConflictTunerType:
self.simultimer.append(entry[1])
break
if len(self.simultimer) < 2:
print "[TimerSanityCheck] possible bug: unknown conflict!"
return True
print "[TimerSanityCheck] conflict detected!"
return False
| gpl-2.0 | -3,732,489,084,296,369,700 | 39.602374 | 171 | 0.655631 | false |
scorpilix/Golemtest | golem/interface/client/environments.py | 1 | 1674 | from golem.core.deferred import sync_wait
from golem.interface.command import group, Argument, command, CommandResult
@group(name="envs", help="Manage environments")
class Environments(object):
name = Argument('name', help="Environment name")
table_headers = ['name', 'supported', 'active', 'performance',
'description']
sort = Argument(
'--sort',
choices=table_headers,
optional=True,
default=None,
help="Sort environments"
)
@command(argument=sort, help="Show environments")
def show(self, sort):
deferred = Environments.client.get_environments()
result = sync_wait(deferred) or []
values = []
for env in result:
values.append([
env['id'],
str(env['supported']),
str(env['accepted']),
str(env['performance']),
env['description']
])
return CommandResult.to_tabular(Environments.table_headers, values,
sort=sort)
@command(argument=name, help="Enable environment")
def enable(self, name):
deferred = Environments.client.enable_environment(name)
return sync_wait(deferred)
@command(argument=name, help="Disable environment")
def disable(self, name):
deferred = Environments.client.disable_environment(name)
return sync_wait(deferred)
@command(argument=name, help="Recount performance for an environment")
def recount(self, name):
deferred = Environments.client.run_benchmark(name)
return sync_wait(deferred, timeout=1800)
| gpl-3.0 | 2,024,806,146,087,001,300 | 30 | 75 | 0.603345 | false |
ionelmc/python-aspectlib | tests/test_aspectlib_test.py | 1 | 17683 | from __future__ import print_function
from pytest import raises
from test_pkg1.test_pkg2 import test_mod
from aspectlib import PY2
from aspectlib.test import OrderedDict
from aspectlib.test import Story
from aspectlib.test import StoryResultWrapper
from aspectlib.test import _Binds
from aspectlib.test import _format_calls
from aspectlib.test import _Raises
from aspectlib.test import _Returns
from aspectlib.test import mock
from aspectlib.test import record
from aspectlib.utils import PY26
from aspectlib.utils import repr_ex
pytest_plugins = 'pytester',
def format_calls(calls):
return ''.join(_format_calls(calls))
def module_fun(a, b=2):
pass
def module_fun2(a, b=2):
pass
exc = RuntimeError()
def rfun():
raise exc
def nfun(a, b=2):
return a, b
def test_record():
fun = record(nfun)
assert fun(2, 3) == (2, 3)
assert fun(3, b=4) == (3, 4)
assert fun.calls == [
(None, (2, 3), {}),
(None, (3, ), {'b': 4}),
]
def test_record_result():
fun = record(results=True)(nfun)
assert fun(2, 3) == (2, 3)
assert fun(3, b=4) == (3, 4)
assert fun.calls == [
(None, (2, 3), {}, (2, 3), None),
(None, (3, ), {'b': 4}, (3, 4), None),
]
def test_record_exception():
fun = record(results=True)(rfun)
raises(RuntimeError, fun)
assert fun.calls == [
(None, (), {}, None, exc),
]
def test_record_result_callback():
calls = []
fun = record(results=True, callback=lambda *args: calls.append(args))(nfun)
assert fun(2, 3) == (2, 3)
assert fun(3, b=4) == (3, 4)
assert calls == [
(None, 'test_aspectlib_test.nfun', (2, 3), {}, (2, 3), None),
(None, 'test_aspectlib_test.nfun', (3, ), {'b': 4}, (3, 4), None),
]
def test_record_exception_callback():
calls = []
fun = record(results=True, callback=lambda *args: calls.append(args))(rfun)
raises(RuntimeError, fun)
assert calls == [
(None, 'test_aspectlib_test.rfun', (), {}, None, exc),
]
def test_record_callback():
calls = []
fun = record(callback=lambda *args: calls.append(args))(nfun)
assert fun(2, 3) == (2, 3)
assert fun(3, b=4) == (3, 4)
assert calls == [
(None, 'test_aspectlib_test.nfun', (2, 3), {}),
(None, 'test_aspectlib_test.nfun', (3, ), {'b': 4}),
]
def test_record_with_no_call():
called = []
@record(iscalled=False)
def fun():
called.append(True)
assert fun() is None
assert fun.calls == [
(None, (), {}),
]
assert called == []
def test_record_with_call():
called = []
@record
def fun():
called.append(True)
fun()
assert fun.calls == [
(None, (), {}),
]
assert called == [True]
def test_record_as_context():
with record(module_fun) as history:
module_fun(2, 3)
module_fun(3, b=4)
assert history.calls == [
(None, (2, 3), {}),
(None, (3, ), {'b': 4}),
]
del history.calls[:]
module_fun(2, 3)
module_fun(3, b=4)
assert history.calls == []
def test_bad_mock():
raises(TypeError, mock)
raises(TypeError, mock, call=False)
def test_simple_mock():
assert "foobar" == mock("foobar")(module_fun)(1)
def test_mock_no_calls():
with record(module_fun) as history:
assert "foobar" == mock("foobar")(module_fun)(2)
assert history.calls == []
def test_mock_with_calls():
with record(module_fun) as history:
assert "foobar" == mock("foobar", call=True)(module_fun)(3)
assert history.calls == [(None, (3,), {})]
def test_double_recording():
with record(module_fun) as history:
with record(module_fun2) as history2:
module_fun(2, 3)
module_fun2(2, 3)
assert history.calls == [
(None, (2, 3), {}),
]
del history.calls[:]
assert history2.calls == [
(None, (2, 3), {}),
]
del history2.calls[:]
module_fun(2, 3)
assert history.calls == []
assert history2.calls == []
def test_record_not_iscalled_and_results():
raises(AssertionError, record, module_fun, iscalled=False, results=True)
record(module_fun, iscalled=False, results=False)
record(module_fun, iscalled=True, results=True)
record(module_fun, iscalled=True, results=False)
def test_story_empty_play_noproxy():
with Story(test_mod).replay(recurse_lock=True, proxy=False, strict=False) as replay:
raises(AssertionError, test_mod.target)
assert replay._actual == {}
def test_story_empty_play_proxy():
assert test_mod.target() is None
raises(TypeError, test_mod.target, 123)
with Story(test_mod).replay(recurse_lock=True, proxy=True, strict=False) as replay:
assert test_mod.target() is None
raises(TypeError, test_mod.target, 123)
assert format_calls(replay._actual) == format_calls(OrderedDict([
((None, 'test_pkg1.test_pkg2.test_mod.target', '', ''), _Returns("None")),
((None, 'test_pkg1.test_pkg2.test_mod.target', '123', ''), _Raises(repr_ex(TypeError(
'target() takes no arguments (1 given)' if PY2 else
'target() takes 0 positional arguments but 1 was given',
))))
]))
def test_story_empty_play_noproxy_class():
with Story(test_mod).replay(recurse_lock=True, proxy=False, strict=False) as replay:
raises(AssertionError, test_mod.Stuff, 1, 2)
assert replay._actual == {}
def test_story_empty_play_error_on_init():
with Story(test_mod).replay(strict=False) as replay:
raises(ValueError, test_mod.Stuff, "error")
print(replay._actual)
assert replay._actual == OrderedDict([
((None, 'test_pkg1.test_pkg2.test_mod.Stuff', "'error'", ''), _Raises('ValueError()'))
])
def test_story_half_play_noproxy_class():
with Story(test_mod) as story:
obj = test_mod.Stuff(1, 2)
with story.replay(recurse_lock=True, proxy=False, strict=False):
obj = test_mod.Stuff(1, 2)
raises(AssertionError, obj.mix, 3, 4)
def test_xxx():
with Story(test_mod) as story:
obj = test_mod.Stuff(1, 2)
test_mod.target(1) == 2
test_mod.target(2) == 3
test_mod.target(3) ** ValueError
other = test_mod.Stuff(2, 2)
obj.other('a') == other
obj.meth('a') == 'x'
obj = test_mod.Stuff(2, 3)
obj.meth() ** ValueError('crappo')
obj.meth('c') == 'x'
with story.replay(recurse_lock=True, strict=False) as replay:
obj = test_mod.Stuff(1, 2)
obj.meth('a')
test_mod.target(1)
obj.meth()
test_mod.func(5)
obj = test_mod.Stuff(4, 4)
obj.meth()
for k, v in story._calls.items():
print(k, "=>", v)
print("############## UNEXPECTED ##############")
for k, v in replay._actual.items():
print(k, "=>", v)
# TODO
def test_story_text_helpers():
with Story(test_mod) as story:
obj = test_mod.Stuff(1, 2)
obj.meth('a') == 'x'
obj.meth('b') == 'y'
obj = test_mod.Stuff(2, 3)
obj.meth('c') == 'z'
test_mod.target(1) == 2
test_mod.target(2) == 3
with story.replay(recurse_lock=True, strict=False) as replay:
obj = test_mod.Stuff(1, 2)
obj.meth('a')
obj.meth()
obj = test_mod.Stuff(4, 4)
obj.meth()
test_mod.func(5)
test_mod.target(1)
print(replay.missing)
assert replay.missing == """stuff_1.meth('b') == 'y' # returns
stuff_2 = test_pkg1.test_pkg2.test_mod.Stuff(2, 3)
stuff_2.meth('c') == 'z' # returns
test_pkg1.test_pkg2.test_mod.target(2) == 3 # returns
"""
print(replay.unexpected)
assert replay.unexpected == """stuff_1.meth() == None # returns
stuff_2 = test_pkg1.test_pkg2.test_mod.Stuff(4, 4)
stuff_2.meth() == None # returns
test_pkg1.test_pkg2.test_mod.func(5) == None # returns
"""
print(replay.diff)
if PY26:
assert replay.diff == """--- expected """ """
+++ actual """ """
@@ -1,7 +1,7 @@
stuff_1 = test_pkg1.test_pkg2.test_mod.Stuff(1, 2)
stuff_1.meth('a') == 'x' # returns
-stuff_1.meth('b') == 'y' # returns
-stuff_2 = test_pkg1.test_pkg2.test_mod.Stuff(2, 3)
-stuff_2.meth('c') == 'z' # returns
+stuff_1.meth() == None # returns
+stuff_2 = test_pkg1.test_pkg2.test_mod.Stuff(4, 4)
+stuff_2.meth() == None # returns
+test_pkg1.test_pkg2.test_mod.func(5) == None # returns
test_pkg1.test_pkg2.test_mod.target(1) == 2 # returns
-test_pkg1.test_pkg2.test_mod.target(2) == 3 # returns
"""
else:
assert replay.diff == """--- expected
+++ actual
@@ -1,7 +1,7 @@
stuff_1 = test_pkg1.test_pkg2.test_mod.Stuff(1, 2)
stuff_1.meth('a') == 'x' # returns
-stuff_1.meth('b') == 'y' # returns
-stuff_2 = test_pkg1.test_pkg2.test_mod.Stuff(2, 3)
-stuff_2.meth('c') == 'z' # returns
+stuff_1.meth() == None # returns
+stuff_2 = test_pkg1.test_pkg2.test_mod.Stuff(4, 4)
+stuff_2.meth() == None # returns
+test_pkg1.test_pkg2.test_mod.func(5) == None # returns
test_pkg1.test_pkg2.test_mod.target(1) == 2 # returns
-test_pkg1.test_pkg2.test_mod.target(2) == 3 # returns
"""
def test_story_empty_play_proxy_class_missing_report(LineMatcher):
with Story(test_mod).replay(recurse_lock=True, proxy=True, strict=False) as replay:
obj = test_mod.Stuff(1, 2)
obj.mix(3, 4)
obj.mix('a', 'b')
raises(ValueError, obj.raises, 123)
obj = test_mod.Stuff(0, 1)
obj.mix('a', 'b')
obj.mix(3, 4)
test_mod.target()
raises(ValueError, test_mod.raises, 'badarg')
raises(ValueError, obj.raises, 123)
test_mod.ThatLONGStuf(1).mix(2)
test_mod.ThatLONGStuf(3).mix(4)
obj = test_mod.ThatLONGStuf(2)
obj.mix()
obj.meth()
obj.mix(10)
LineMatcher(replay.diff.splitlines()).fnmatch_lines([
"--- expected",
"+++ actual",
"@@ -0,0 +1,18 @@",
"+stuff_1 = test_pkg1.test_pkg2.test_mod.Stuff(1, 2)",
"+stuff_1.mix(3, 4) == (1, 2, 3, 4) # returns",
"+stuff_1.mix('a', 'b') == (1, 2, 'a', 'b') # returns",
"+stuff_1.raises(123) ** ValueError((123,)*) # raises",
"+stuff_2 = test_pkg1.test_pkg2.test_mod.Stuff(0, 1)",
"+stuff_2.mix('a', 'b') == (0, 1, 'a', 'b') # returns",
"+stuff_2.mix(3, 4) == (0, 1, 3, 4) # returns",
"+test_pkg1.test_pkg2.test_mod.target() == None # returns",
"+test_pkg1.test_pkg2.test_mod.raises('badarg') ** ValueError(('badarg',)*) # raises",
"+stuff_2.raises(123) ** ValueError((123,)*) # raises",
"+that_long_stuf_1 = test_pkg1.test_pkg2.test_mod.ThatLONGStuf(1)",
"+that_long_stuf_1.mix(2) == (1, 2) # returns",
"+that_long_stuf_2 = test_pkg1.test_pkg2.test_mod.ThatLONGStuf(3)",
"+that_long_stuf_2.mix(4) == (3, 4) # returns",
"+that_long_stuf_3 = test_pkg1.test_pkg2.test_mod.ThatLONGStuf(2)",
"+that_long_stuf_3.mix() == (2,) # returns",
"+that_long_stuf_3.meth() == None # returns",
"+that_long_stuf_3.mix(10) == (2, 10) # returns",
])
def test_story_empty_play_proxy_class():
assert test_mod.Stuff(1, 2).mix(3, 4) == (1, 2, 3, 4)
with Story(test_mod).replay(recurse_lock=True, proxy=True, strict=False) as replay:
obj = test_mod.Stuff(1, 2)
assert obj.mix(3, 4) == (1, 2, 3, 4)
assert obj.mix('a', 'b') == (1, 2, 'a', 'b')
raises(TypeError, obj.meth, 123)
obj = test_mod.Stuff(0, 1)
assert obj.mix('a', 'b') == (0, 1, 'a', 'b')
assert obj.mix(3, 4) == (0, 1, 3, 4)
raises(TypeError, obj.meth, 123)
assert format_calls(replay._actual) == format_calls(OrderedDict([
((None, 'test_pkg1.test_pkg2.test_mod.Stuff', "1, 2", ''), _Binds('stuff_1')),
(('stuff_1', 'mix', "3, 4", ''), _Returns("(1, 2, 3, 4)")),
(('stuff_1', 'mix', "'a', 'b'", ''), _Returns("(1, 2, 'a', 'b')")),
(('stuff_1', 'meth', "123", ''), _Raises(repr_ex(TypeError(
'meth() takes exactly 1 argument (2 given)' if PY2 else
'meth() takes 1 positional argument but 2 were given'
)))),
((None, 'test_pkg1.test_pkg2.test_mod.Stuff', "0, 1", ''), _Binds('stuff_2')),
(('stuff_2', 'mix', "'a', 'b'", ''), _Returns("(0, 1, 'a', 'b')")),
(('stuff_2', 'mix', "3, 4", ''), _Returns("(0, 1, 3, 4)")),
(('stuff_2', 'meth', "123", ''), _Raises(repr_ex(TypeError(
'meth() takes exactly 1 argument (2 given)' if PY2 else
'meth() takes 1 positional argument but 2 were given'
))))
]))
def test_story_half_play_proxy_class():
assert test_mod.Stuff(1, 2).mix(3, 4) == (1, 2, 3, 4)
with Story(test_mod) as story:
obj = test_mod.Stuff(1, 2)
obj.mix(3, 4) == (1, 2, 3, 4)
with story.replay(recurse_lock=True, proxy=True, strict=False) as replay:
obj = test_mod.Stuff(1, 2)
assert obj.mix(3, 4) == (1, 2, 3, 4)
assert obj.meth() is None
raises(TypeError, obj.meth, 123)
obj = test_mod.Stuff(0, 1)
assert obj.mix('a', 'b') == (0, 1, 'a', 'b')
assert obj.mix(3, 4) == (0, 1, 3, 4)
raises(TypeError, obj.meth, 123)
assert replay.unexpected == format_calls(OrderedDict([
(('stuff_1', 'meth', '', ''), _Returns('None')),
(('stuff_1', 'meth', '123', ''), _Raises(repr_ex(TypeError(
'meth() takes exactly 1 argument (2 given)' if PY2 else
'meth() takes 1 positional argument but 2 were given'
)))),
((None, 'test_pkg1.test_pkg2.test_mod.Stuff', '0, 1', ''), _Binds("stuff_2")),
(('stuff_2', 'mix', "'a', 'b'", ''), _Returns("(0, 1, 'a', 'b')")),
(('stuff_2', 'mix', '3, 4', ''), _Returns('(0, 1, 3, 4)')),
(('stuff_2', 'meth', '123', ''), _Raises(repr_ex(TypeError(
'meth() takes exactly 1 argument (2 given)' if PY2 else
'meth() takes 1 positional argument but 2 were given'
))))
]))
def test_story_full_play_noproxy():
with Story(test_mod) as story:
test_mod.target(123) == 'foobar'
test_mod.target(1234) ** ValueError
with story.replay(recurse_lock=True, proxy=False, strict=False, dump=False) as replay:
raises(AssertionError, test_mod.target)
assert test_mod.target(123) == 'foobar'
raises(ValueError, test_mod.target, 1234)
assert replay.unexpected == ""
def test_story_full_play_noproxy_dump():
with Story(test_mod) as story:
test_mod.target(123) == 'foobar'
test_mod.target(1234) ** ValueError
with story.replay(recurse_lock=True, proxy=False, strict=False, dump=True) as replay:
raises(AssertionError, test_mod.target)
assert test_mod.target(123) == 'foobar'
raises(ValueError, test_mod.target, 1234)
assert replay.unexpected == ""
def test_story_full_play_proxy():
with Story(test_mod) as story:
test_mod.target(123) == 'foobar'
test_mod.target(1234) ** ValueError
with story.replay(recurse_lock=True, proxy=True, strict=False) as replay:
assert test_mod.target() is None
assert test_mod.target(123) == 'foobar'
raises(ValueError, test_mod.target, 1234)
raises(TypeError, test_mod.target, 'asdf')
assert replay.unexpected == format_calls(OrderedDict([
((None, 'test_pkg1.test_pkg2.test_mod.target', '', ''), _Returns("None")),
((None, 'test_pkg1.test_pkg2.test_mod.target', "'asdf'", ''), _Raises(repr_ex(TypeError(
'target() takes no arguments (1 given)'
if PY2
else 'target() takes 0 positional arguments but 1 was given',)
)))
]))
def test_story_result_wrapper():
x = StoryResultWrapper(lambda *a: None)
raises(AttributeError, setattr, x, 'stuff', 1)
raises(AttributeError, getattr, x, 'stuff')
raises(TypeError, lambda: x >> 2)
raises(TypeError, lambda: x << 1)
raises(TypeError, lambda: x > 1)
x == 1
x ** Exception()
def test_story_result_wrapper_bad_exception():
x = StoryResultWrapper(lambda *a: None)
raises(RuntimeError, lambda: x ** 1)
x ** Exception
x ** Exception('boom!')
def test_story_create():
with Story(test_mod) as story:
test_mod.target('a', 'b', 'c') == 'abc'
test_mod.target() ** Exception
test_mod.target(1, 2, 3) == 'foobar'
obj = test_mod.Stuff('stuff')
assert isinstance(obj, test_mod.Stuff)
obj.meth('other', 1, 2) == 123
obj.mix('other') == 'mixymix'
# from pprint import pprint as print
# print (dict(story._calls))
assert dict(story._calls) == {
(None, 'test_pkg1.test_pkg2.test_mod.Stuff', "'stuff'", ''): _Binds('stuff_1'),
('stuff_1', 'meth', "'other', 1, 2", ''): _Returns("123"),
('stuff_1', 'mix', "'other'", ''): _Returns("'mixymix'"),
(None, 'test_pkg1.test_pkg2.test_mod.target', '', ''): _Raises("Exception"),
(None, 'test_pkg1.test_pkg2.test_mod.target', "1, 2, 3", ''): _Returns("'foobar'"),
(None, 'test_pkg1.test_pkg2.test_mod.target', "'a', 'b', 'c'", ''): _Returns("'abc'"),
}
def xtest_story_empty_play_proxy_class_dependencies():
with Story(test_mod).replay(recurse_lock=True, proxy=True, strict=False) as replay:
obj = test_mod.Stuff(1, 2)
other = obj.other('x')
raises(ValueError, other.raises, 'badarg')
other.mix(3, 4)
obj = test_mod.Stuff(0, 1)
obj.mix(3, 4)
other = obj.other(2)
other.mix(3, 4)
print(repr(replay.diff))
assert replay.diff == ""
| bsd-2-clause | 88,641,805,288,770,620 | 30.408526 | 96 | 0.565402 | false |
pyrrho314/recipesystem | trunk/astrodata/DataSpider.py | 1 | 42351 | try:
import pyfits
except:
pass
import os
import re
from AstroData import *
ldebug = False
verbose = False
from astrodata.adutils import terminal
from ReductionContextRecords import AstroDataRecord
import subprocess
import os
from copy import copy,deepcopy
from AstroData import Errors
from astrodata import new_pyfits_version
uselocalcalserv = False
batchno = 100
if uselocalcalserv: # takes WAY TOO LONG~!!!!!!
from astrodata.LocalCalibrationService import CalibrationService
from CalibrationDefinitionLibrary import CalibrationDefinitionLibrary # For xml calibration requests
def shallow_walk(directory):
global batchno
opti = False
if opti:
print "sw: going to call os.listdir"
ld = os.listdir(directory)
if opti:
print "sw: called os.listdir"
root = directory
dirn = []
files = []
if opti:
print "sw: sorting directories from files in directory"
if batchno != None:
batchsize = batchno
else:
batchsize = 100
for li in ld:
if os.path.isdir(li):
dirn.append(li)
else:
files.append(li)
if len(files)> batchsize:
if opti:
print "yielding batch of " + str(batchsize)
print repr(files)
yield (root, [], files)
files = []
if opti:
print "sw: yielding"
yield (root, [], files)
class DataSpider(object):
"""
DataSpider() is a work class to encapsulate
reusable code to work the AstroData related classes.
e.g. it will walk a directory using AstroData
to check type sizes.
"""
hdulist = None
contextType = None
classification_library = None
cal_search = None
def __init__(self, context = None):
# ==== member vars ====
self.contextType = context
self.classification_library = self.get_classification_library()
if uselocalcalserv:
self.calService = CalibrationService()
self.calDefLib = CalibrationDefinitionLibrary()
def get_classification_library(self):
# @@todo: handle context here
if (self.classification_library == None):
try:
self.classification_library = ClassificationLibrary()
except CLAlreadyExists, s:
self.classification_library = s.clInstance
return self.classification_library
def dumpinfo(self):
#print self.hdulist.info()
if new_pyfits_version:
cards = self.hdulist[0].header.cards
else:
cards = self.hdulist[0].header.ascard
for hd in self.hdulist:
if (hd.data != None):
try:
print hd.data.type()
except:
print "Table"
def typewalk(self, directory = ".", only = "all", pheads = None,
showinfo = False,
onlyStatus = False,
onlyTypology = False,
# generic descriptors interface
showDescriptors = None, # string of comma separated descriptor names (function names!)
filemask = None,
showCals = False,
incolog = True,
stayTop = False,
recipe = None,
raiseExcept = False,
where = None,
batchnum = None,
opti = None):
"""
Recursively walk a given directory and put type information to stdout
"""
global verbose
global debug
global batchno
if batchnum != None:
batchno = batchnum
if raiseExcept:
from astrodata.debugmodes import set_descriptor_throw
set_descriptor_throw(True)
onlylist = only.split(",")
if (verbose):
print "onlylist:",repr(onlylist)
verbose = False
ldebug = False
dirnum = 0
if stayTop == True:
walkfunc = shallow_walk
if opti:
print "Doing a shallow walk"
else:
walkfunc = os.walk
if opti:
print "Doing an os.walk"
for root,dirn,files in walkfunc(directory):
verbose = False
if opti:
print "Analyzing:", root
dirnum += 1
if (verbose) :
print "DS90:",root,dirn,files
#print "root:", root
#print "dirn:", dirn
#if verbose:
# print "DS92:",root, repr(dirn), repr(file)
if (".svn" not in root):
width = 10
## !!!!!
## !!!!! CREATE THE LINE WRITTEN FOR EACH DIRECTORY RECURSED !!!!!
## !!!!!
fullroot = os.path.abspath(root)
if verbose:
print 'DS91:',fullroot
if root == ".":
rootln = "\n${NORMAL}${BOLD}directory: ${NORMAL}. ("+fullroot + ")${NORMAL}"
else:
rootln = "\n${NORMAL}${BOLD}directory: ${NORMAL}"+root + "${NORMAL}"
firstfile = True
for tfile in files:
# we have considered removing this check in place of a
# pyfits open but that was not needed, the pyfits open
# is down lower, this is just to avoid checking files
# that are not named correctly to be FITS, so why check them?
# especially on a command recursing directories and potentially
# looking at a lot of files.
if filemask == None:
# @@NAMING: fits file mask for typewalk
mask = r".*?\.(fits|FITS)$"
else:
mask = filemask
try:
matched = re.match(mask, tfile)
except:
print "BAD FILEMASK (must be a valid regular expression):", mask
return str(sys.exc_info()[1])
if (re.match(mask, tfile)) :
if (ldebug) : print "FITS:", tfile
fname = os.path.join(root, tfile)
try:
# NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE
# fl is the astrodata instance of tfile/fname
fl = AstroData(fname)
#
# NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE
except KeyboardInterrupt:
raise
except:
mes = "Could not open file: %s as AstroData" % fname
print mes
# raise Errors.AstroDataError(mes)
continue
gain = 0
stringway = False
if (stringway):
if (onlyTypology == onlyStatus):
dtypes = self.classification_library.discover_types(fname)
elif (onlyTypology):
dtypes = self.classification_library.discover_typology(fname)
elif (onlyStatus):
dtypes = self.classification_library.discover_status(fname)
else:
# this is the AstroData Class way
# to ask the file itself
if (onlyTypology == onlyStatus):
dtypes = fl.discover_types()
elif (onlyTypology):
dtypes = fl.discover_typology()
elif (onlyStatus):
dtypes = fl.discover_status()
if verbose:
print "DS130:", repr(dtypes)
# print "after classification"
if (dtypes != None) and (len(dtypes)>0):
#check to see if only is set
#only check for given type
found = False
if (only == "all"):
found=True
else:
# note: only can be split this way with no worry about
# whitespace because it's from the commandline, no whitespace
# allowed in that argument, just "," as a separator
ol = only.split(",")
# print ol
found = False
for tpname in dtypes:
if (verbose):
print "DS148", " in ", repr(ol),
if (tpname in ol):
found = True
break
if (verbose):
print "yes, found = ", str(found)
if (found == True):
if where != None:
# let them use underscore as spaces, bash + getopts doesn't like space in params even in quotes
cleanwhere = re.sub("_"," ", where)
ad = fl
try:
found = eval(cleanwhere)
except:
print "can't execute where:\n\t" + where + "\n\t" +cleanwhere
print "reason:\n\t"+str(sys.exc_info()[1])+"\n"+repr(sys.exc_info())
sys.exit(1)
if (found != True):
continue
if (firstfile == True):
print rootln
firstfile = False
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# !!!!PRINTING OUT THE FILE AND TYPE INFO!!!!
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
indent = 5
pwid = 40
fwid = pwid - indent
# print start of string
#print "DS270:", len(tfile)
while len(tfile)>= fwid-1:
if False:
part = tfile[:fwid]
print " ${BG_WHITE}%s${NORMAL}" % part
tfile = tfile[fwid-1:]
else:
print " ${BG_WHITE}%s${NORMAL}" % tfile
tfile = ""
if len(tfile)>0:
prlin = " %s " % tfile
prlincolor = " ${BG_WHITE}%s${NORMAL} " % tfile
else:
prlin = " "
prlincolor = " "
empty = " "*indent + "."*fwid
fwid = pwid+indent
lp = len(prlin)
nsp = pwid - ( lp % pwid )
# print out indent, filename, and "..." to justify types area"
# there is a way to do with with a comprehension?
print prlincolor+("."*nsp)+"${NORMAL}",
# print dtypes
tstr = ""
termsize = terminal.getTerminalSize()
maxlen = termsize[0] - pwid -1
printed = False
dtypes.sort()
for dtype in dtypes:
if (dtype != None):
newtype = "(%s) " % dtype
else:
newtype = "(Unknown) "
# print "(%s)N20091027S0133.fits" % dtype ,
astr = tstr + newtype
if len(astr) >= maxlen:
print "${BLUE}"+ tstr + "${NORMAL}"
tstr = newtype
print empty,
else:
tstr = astr
if tstr != "":
print "${BLUE}"+ tstr + "${NORMAL}"
tstr = ""
astr = ""
printed = True
# new line at the end of the output
# print ""
if (showinfo == True):
print "-"*40
print "AstroData.info():"
fl.info()
print "-"*40
print "pyfits.info():"
fl.hdulist.info()
print "-"*40
#hlist = pyfits.open(fname)
#hlist.info()
#hlist.close()
# print descriptors
# show descriptors
if (showDescriptors != None):
sdl = showDescriptors.split(",")
if verbose:
print "DS320:", repr(sdl)
# print ol
# get maxlen
if "err" in sdl:
errOnly = True
sdl.remove("err")
else:
errOnly = False
maxlen = 0
for sd in sdl:
maxlen = max(len(sd),maxlen)
for sd in sdl:
#print "DS242:", sd
try:
if "(" not in sd:
dval = eval("fl."+sd+"(asList=True)")
else:
#print "DS333:", repr(sd)
dval = eval("fl."+sd)
pad = " " * (maxlen - len(sd))
sd = str(sd) + pad
if dval:
if (not errOnly):
print (" ${BOLD}%s${NORMAL} = %s") % (sd, str(dval))
else:
print ' ${BOLD}(DERR)%s${NORMAL}: ${RED}returned None${NORMAL}' % (sd)
except AttributeError:
exinfo = sys.exc_info()
print ' ${BOLD}(DERR)%s${NORMAL}: ${RED}NO SUCH DESCRIPTOR${NORMAL}' % (sd)
#if raiseExcept:
# raise
except KeyboardInterrupt:
raise
except:
# pad = " " * (maxlen - len(sd))
# sd = str(sd) + pad
exinfo = sys.exc_info()
print ' ${BOLD}(DERR)%s${NORMAL}: ${RED}%s${NORMAL}' % (sd, repr(exinfo[1]).strip())
#if raiseExcept:
# raise
# if phead then there are headers to print per file
if (pheads != None):
#print " -----------"sys.exec
print " ${UNDERLINE}PHU Headers${NORMAL}"
#print " -----------"
#print "pheads", pheads
hlist = pyfits.open(fname)
pheaders = pheads.split(",")
for headkey in pheaders:
#if in phu, this is the code
try:
print " %s = (%s)" % (headkey, hlist[0].header[headkey])
except KeyError:
print " %s not present in PHU of %s" % (headkey, tfile)
hlist.close()
if (showCals == True):
from astrodata.adutils.adccutils.calutil import localCalibrationSearch
from astrodata.adutils.adccutils.calutil import geminiCalibrationSearch
calurls = localCalibrationSearch(fl)
print " ${BOLD}Local Calibration Search${NORMAL}"
if calurls != None:
for caltyp in calurls.keys():
print " ${BOLD}%s${NORMAL}: %s" % (caltyp, calurls[caltyp])
else:
print " ${RED}No Calibrations Found${NORMAL}"
calurls = geminiCalibrationSearch(fl)
print " ${BOLD}Gemini Calibration Search${NORMAL}"
if calurls != None:
for caltyp in calurls.keys():
print " ${BOLD}%s${NORMAL}: %s" % (caltyp, calurls[caltyp])
else:
print " ${RED}No Calibrations Found${NORMAL}"
if (recipe):
banner = ' Running Recipe "%s" on %s ' % (recipe, fname)
print "${REVERSE}${RED}" + " "*len(banner)
print banner
print " "*len(banner)+"${NORMAL}"
if recipe == "default":
rs = ""
else:
rs = "-r %s" % recipe
subprocess.call("reduce %s %s" % (rs, fname), shell=True)
else:
if (verbose) : print "%s is not a FITS file" % tfile
if False: # done with walk function switching if stayTop == True:
# cheap way to not recurse.
break;
def datasetwalk(self, directory = ".", only = "all", pheads = None,
showinfo = False,
onlyStatus = False,
onlyTypology = False,
# generic descriptors interface
showDescriptors = None, # string of comma separated descriptor names (function names!)
filemask = None,
showCals = False,
incolog = True,
stayTop = False,
recipe = None,
raiseExcept = False,
where = None,
batchnum = None,
opti = None):
"""
Recursively walk a given directory and put type information to stdout
"""
# About the DirDict class
"""
The DirDict class represents a single directory, and all it's contents
that are relevant. It is filled by the client code (datasetwalk)
so that only "relevant" files are added, and only directories containing
relevant files are shown. Allows iteration to, for example, populate
a tree control.
Note, the path given is the root path, the user has no access to any
parent or sibling directories. However... also note, it is a locally
running action, it just happens to use a web interface rather than
tk, qt, etc. Actions may be final.
"""
dirdict = DirDict(os.path.abspath(directory))
global verbose
global debug
global batchno
if batchnum != None:
batchno = batchnum
onlylist = only.split(",")
if (verbose):
print "onlylist:",repr(onlylist)
print '>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> DATA SPI'
verbose = True
ldebug = True
dirnum = 0
if stayTop == True:
walkfunc = shallow_walk
if opti:
print "Doing a shallow walk"
else:
walkfunc = os.walk
if opti:
print "Doing an os.walk"
for root,dirn,files in walkfunc(directory):
#dirdict.adddir(root)
if opti:
print "Analyzing:", root
dirnum += 1
if (verbose) :
print "root:", root
print "dirn:", dirn
if verbose:
print "DS92:",root, repr(dirn), repr(file)
if (".svn" not in root):
width = 10
## !!!!!
## !!!!! CREATE THE LINE WRITTEN FOR EACH DIRECTORY RECURSED !!!!!
## !!!!!
fullroot = os.path.abspath(root)
if root == ".":
rootln = "\n${NORMAL}${BOLD}directory: ${NORMAL}. ("+fullroot + ")${NORMAL}"
else:
rootln = "\n${NORMAL}${BOLD}directory: ${NORMAL}"+root + "${NORMAL}"
firstfile = True
# print "DS472:", repr(files)
for tfile in files:
if tfile == None:
raise str(files)
# we have considered removing this check in place of a
# pyfits open but that was not needed, the pyfits open
# is down lower, this is just to avoid checking files
# that are not named correctly to be FITS, so why check them?
# especially on a command recursing directories and potentially
# looking at a lot of files.
if filemask == None:
# @@NAMING: fits file mask for typewalk
mask = r".*?\.(fits|FITS)$"
else:
mask = filemask
try:
matched = re.match(mask, tfile)
except:
print "BAD FILEMASK (must be a valid regular expression):", mask
return str(sys.exc_info()[1])
sys.stdout.write(".")
if (re.match(mask, tfile)) :
if (ldebug) : print "FITS:", tfile
fname = os.path.join(root, tfile)
try:
fl = AstroData(fname)
except KeyboardInterrupt:
raise
except:
mes = "Could not open %s as AstroData" % fname
continue
gain = 0
stringway = False
if (stringway):
if (onlyTypology == onlyStatus):
dtypes = self.classification_library.discover_types(fname)
elif (onlyTypology):
dtypes = self.classification_library.discover_typology(fname)
elif (onlyStatus):
dtypes = self.classification_library.discover_status(fname)
else:
# this is the AstroData Class way
# to ask the file itself
if (onlyTypology == onlyStatus):
dtypes = fl.discover_types()
elif (onlyTypology):
dtypes = fl.discover_typology()
elif (onlyStatus):
dtypes = fl.discover_status()
if verbose:
print "DS130:", repr(dtypes)
# print "after classification"
if (dtypes != None) and (len(dtypes)>0):
#check to see if only is set
#only check for given type
found = False
if (only == "all"):
found=True
else:
# note: only can be split this way with no worry about
# whitespace because it's from the commandline, no whitespace
# allowed in that argument, just "," as a separator
ol = only.split(",")
# print ol
found = False
for tpname in dtypes:
if (verbose):
print "DS148", " in ", repr(ol),
if (tpname in ol):
found = True
break
if (verbose):
print "yes, found = ", str(found)
if (found == True):
if where != None:
# let them use underscore as spaces, bash + getopts doesn't like space in params even in quotes
cleanwhere = re.sub("_"," ", where)
ad = fl
try:
found = eval(cleanwhere)
except:
print "can't execute where:\n\t" + where + "\n\t" +cleanwhere
print "reason:\n\t"+str(sys.exc_info()[1])+"\n"+repr(sys.exc_info())
sys.exit(1)
if (found != True):
continue
if (firstfile == True):
pass # print rootln
firstfile = False
#dirdict tending
dirdict.add_dir(fullroot)
dirdict.add_file(tfile, root=fullroot)
sys.stdout.write("+")
sys.stdout.flush()
if tfile != "":
dirdict.add_file_prop(tfile, root= fullroot, propname="types", propval=dtypes)
# new line at the end of the output
# print ""
# show descriptors
if (showDescriptors != None):
sdl = showDescriptors.split(",")
# print ol
# get maxlen
maxlen = 0
for sd in sdl:
maxlen = max(len(sd),maxlen)
# print "DS595:", repr(fl.gain(as_dict=True))
# print "DS596:", repr(fl.amp_read_area(asList = True))
for sd in sdl:
#print "DS242:", sd
try:
if "(" not in sd:
dval = eval("fl."+sd+"(asList=True)")
else:
dval = eval("fl."+sd)
pad = " " * (maxlen - len(sd))
sd = str(sd) + pad
print (" ${BOLD}%s${NORMAL} = %s") % (sd, str(dval))
except AttributeError:
pad = " " * (maxlen - len(sd))
sd = str(sd) + pad
exinfo = sys.exc_info()
print " ${BOLD}%s${NORMAL} = ${RED}NO SUCH DESCRIPTOR${NORMAL}" % (sd)
if raiseExcept:
raise
except:
pad = " " * (maxlen - len(sd))
sd = str(sd) + pad
print (" ${BOLD}%s${NORMAL} = ${RED}FAILED${NORMAL}: %s") % (sd, str(sys.exc_info()[1]))
raise
if raiseExcept:
raise
# if phead then there are headers to print per file
if (pheads != None):
#print " -----------"sys.exec
print " ${UNDERLINE}PHU Headers${NORMAL}"
#print " -----------"
#print "pheads", pheads
hlist = pyfits.open(fname)
pheaders = pheads.split(",")
for headkey in pheaders:
#if in phu, this is the code
try:
print " %s = (%s)" % (headkey, hlist[0].header[headkey])
except KeyError:
print " %s not present in PHU of %s" % (headkey, tfile)
hlist.close()
if (showCals == True):
adr = AstroDataRecord(fl)
for caltyp in ["bias", "twilight"]:
rq = self.calDefLib.get_cal_req([adr],caltyp)[0]
try:
cs = "%s" % (str(self.calService.search(rq)[0]))
except:
cs = "No %s found, %s " % ( caltyp, str(sys.exc_info()[1]))
raise
print " %10s: %s" % (caltyp, cs)
if (recipe):
banner = ' Running Recipe "%s" on %s ' % (recipe, fname)
print "${REVERSE}${RED}" + " "*len(banner)
print banner
print " "*len(banner)+"${NORMAL}"
if recipe == "default":
rs = ""
else:
rs = "-r %s" % recipe
subprocess.call("reduce %s %s" % (rs, fname), shell=True)
else:
if (verbose) : print "%s is not a FITS file" % tfile
if False: # done with walk function switching if stayTop == True:
# cheap way to not recurse.
break;
print ""
return dirdict
def path2list(path):
# this is because path.split doesn't split dirs with trailing /'s
if path[-1]==os.sep:
path = path[:-1]
upath = path
palist = []
while True:
upath, tail = os.path.split(upath)
if tail == "":
break;
else:
palist.insert(0, tail)
return palist
class DirDict(object):
rootdir = None
rootdirlist = None
direntry = None
givenRootdir = None
entryDict = None
def __init__(self, rootdir = "."):
self.givenRootdir = rootdir
self.rootdir = os.path.abspath(rootdir)
self.direntry = DirEntry("",parent=self)
self.entryDict = {}
def report_entry( self, name, path):
self.entryDict.update({name:path})
def reldir(self, dirname):
if dirname[:len(self.rootdir)] != self.rootdir:
raise "this shouldn't happen, maybe a security breach"
else:
return dirname[len(self.rootdir):]
def add_dir(self, path):
# print "DS746: adding path", path
if path[:len(self.rootdir)] != self.rootdir:
raise "can't add that bad directory! "+path
relpath = path[len(self.rootdir):]
if self.direntry.path == relpath:
# print "DS750: path is already added at top:", path
return
else:
# print "DS753: having subdir add path if need be"
pathlist = path2list(relpath)
rpathlist = copy(pathlist)
self.direntry.add_dir(rpathlist)
def add_file(self, filename, root = None):
if root == None:
base = os.path.basename(filename)
dirn = os.path.dirname(filename)
else:
dirn = os.path.join(root,os.path.dirname(filename))
base = os.path.basename(filename)
# print "DS765:", repr(dirn)
dirlist = path2list(self.reldir(dirn))
# print "DS767:", repr(dirlist)
self.direntry.add_file(FileEntry(base,dirn), dirlist)
def add_file_prop(self, filename, root=None, propname = None, propval = None):
#print "\nDS775:", repr(filename), repr(root)
targfileent=self.direntry.find_file_entry(filename, root)
#print "DS777:",repr(targfileent), repr(filename), repr(root)
#print "DS778:",targfileent.fullpath()
targfileent.add_prop(propname, propval)
def fullpath(self):
return self.rootdir
def dirwalk(self):
for direntry in self.direntry.dirwalk():
#print "DS760:", direntry.path, direntry.fullpath(),direntry
yield direntry
def get_full_path(self,filename):
if filename in self.entryDict:
return os.path.join(self.entryDict[filename], filename)
else:
return None
def as_xml(self):
return self.direntry.as_xml()
class DirEntry(object):
path = None
files = None
dirs = None
parent = None
dataSpider = None
def __init__(self, dirpath, parent = None):
self.path = dirpath
self.files = {}
self.dirs = {}
self.parent = parent
def reldir(self, dirname):
root = self.parent.fullpath()
if not dirname.startswith(root):
print "DS752: (%s) %s", dirname, root
raise "this shouldn't happen, maybe a security breach"
else:
return dirname[len(root):]
def report_entry(self, name, path):
self.parent.report_entry(name, path)
def add_dir(self, pathlist):
subdir = pathlist.pop(0)
if subdir not in self.dirs.keys():
#print "DS774: adding subdir:", subdir
self.dirs.update({subdir:DirEntry(subdir, parent = self)})
#print "DS776:", id(self), repr(self.dirs)
#print "consumable pathlist:", pathlist
if len(pathlist)>0:
self.dirs[subdir].add_dir(pathlist)
def add_file(self, base, dirlist):
#$ print "DS795:", repr(dirlist)
if len(dirlist)==0:
# it's my file!
base.parent=self
self.files.update({base.basename:base})
else:
tdir = dirlist.pop(0)
if tdir not in self.dirs:
raise "broken tree search, no place for file"
else:
self.dirs[tdir].add_file(base, dirlist)
def fullpath(self):
rets = os.path.join(self.parent.fullpath(),self.path)
return rets
def dirwalk(self):
yield self
if len(self.dirs)>0:
for dekey in self.dirs:
for dent in self.dirs[dekey].dirwalk():
yield dent
def find_file_entry(self,filename,root=None, dirlist = None):
if root == None:
base = os.path.basename(filename)
dirn = os.path.dirname(filename)
else:
dirn = os.path.join(root,os.path.dirname(filename))
base = os.path.basename(filename)
self.report_entry(base, dirn)
if dirlist == None:
# print "DS852:", repr(dirn), repr(self.reldir(dirn))
dirlist = path2list(self.reldir(dirn))
if len(dirlist)==0:
#then find the file
# print "self.files, filn", repr(self.files)
for filn in self.files.keys():
if filn == filename:
fil = self.files[filn]
# print "DS858: found FileEntry:", repr(fil)
return fil
#raise "fileEntry does not exist"
return None
else:
tdir = dirlist.pop(0)
if tdir not in self.dirs:
raise "broken tree search, file address invalid"
else:
return self.dirs[tdir].find_file_entry(base, dirn, dirlist)
def as_xml(self, top = True):
rtemp = """
<dirEntry %(id)s name="%(dirname)s">
%(files)s\n
%(childdirs)s\n
</dirEntry>
"""
if top == True:
idstr = 'id="topDirectory"'
else:
idstr = ""
rfiles = ""
fils = self.files.keys()
if len(fils)>0:
rfiles += '<filesList name="files">\n'
for fil in fils:
rfiles += '\t<fileEntry name="%(file)s" fullpath="%(full)s">\n' % {
"file":self.files[fil].basename,
"full":self.files[fil].fullpath()}
props = self.files[fil].props
if False: # DON'T SEND TYPES, no need... ?? --> if "types" in props:
tlist = props["types"]
for typ in tlist:
rfiles += '\t\t<astrodatatype name="%(typ)s"/>\n' % {
"typ":typ}
rfiles += "\t</fileEntry>\n"
rfiles += "</filesList>\n"
dirs = self.dirs.keys()
rdirs = ""
if len(dirs)>0:
for dirn in dirs:
rdirs += self.dirs[dirn].as_xml(top=False)
return rtemp % { "dirname" : self.fullpath(),
"files" : rfiles,
"childdirs": rdirs,
"id":idstr }
def __str__(self):
return repr(self.dirs)
class FileEntry(object):
basename = None
directory = None
parent = None
props = None
def __init__(self, basename, directory, parent = None):
self.basename = basename
self.directory = directory
self.parent = parent
self.props = {}
def fullpath(self):
#print "DS865: FileEntry #", id(self)
return os.path.join(self.parent.fullpath(), self.basename)
def add_prop(self, name, val):
self.props.update({name:val})
| mpl-2.0 | 8,175,685,783,328,367,000 | 42.48152 | 138 | 0.378928 | false |
teamclairvoyant/airflow-scheduler-failover-controller | scheduler_failover_controller/metadata/base_metadata_service.py | 1 | 1260 | import datetime
from scheduler_failover_controller.utils import date_utils
class BaseMetadataService:
def initialize_metadata_source(self):
raise NotImplementedError
def get_failover_heartbeat(self):
raise NotImplementedError
def set_failover_heartbeat(self):
raise NotImplementedError
def get_active_failover_node(self):
raise NotImplementedError
def set_active_failover_node(self, node):
raise NotImplementedError
def get_active_scheduler_node(self):
raise NotImplementedError
def set_active_scheduler_node(self, node):
raise NotImplementedError
def clear(self):
raise NotImplementedError
def print_metadata(self):
print("Printing Metadata: ")
print("==============================")
print("active_failover_node: " + str(self.get_active_failover_node()))
print("active_scheduler_node: " + str(self.get_active_scheduler_node()))
print( "last_failover_heartbeat: " + str(self.get_failover_heartbeat()))
print("")
print("Printing Other Info: ")
print("==============================")
print( "current_timestamp: " + str(date_utils.get_datetime_as_str(datetime.datetime.now())))
| apache-2.0 | 6,998,434,482,056,831,000 | 30.5 | 100 | 0.637302 | false |
freiheit/Bay-Oh-Woolph | cogs/basicpromotions.py | 1 | 14770 | from discord.ext import commands
from utils import *
import discord
import asyncio
from cogs.updateroster import UpdateRoster
from config import Config
import logging
logger = logging.getLogger('bayohwoolph.cogs.basicpromotions')
BASICPROMOTIONS = Config.config['BASICPROMOTIONS']
ROLE_CADET = BASICPROMOTIONS['ROLE_CADET']
ROLE_OFFICER = BASICPROMOTIONS['ROLE_OFFICER']
ROLE_PS4 = BASICPROMOTIONS['ROLE_PS4']
ROLE_PS4CADET = BASICPROMOTIONS['ROLE_PS4CADET']
ROLE_XBOX = BASICPROMOTIONS['ROLE_XBOX']
ROLE_XBOXCADET = BASICPROMOTIONS['ROLE_XBOXCADET']
ROLE_PC = BASICPROMOTIONS['ROLE_PC']
CADETS_MESS = BASICPROMOTIONS['CADETS_MESS']
PS4_ROOM = BASICPROMOTIONS['PS4_ROOM']
XBOX_ROOM = BASICPROMOTIONS['XBOX_ROOM']
OFFICERS_CLUB = BASICPROMOTIONS['OFFICERS_CLUB']
BOT_NOISE = BASICPROMOTIONS['bot_noise']
ROLE_MEMBER = BASICPROMOTIONS['ROLE_MEMBER']
NEWPCCADETMSG = """**Welcome to Dark Echo, {0}!**
**<:echoBlue:230423421983522816> Here are the basic steps to get started with Dark Echo: <:echoBlue:230423421983522816>**
-->Please read and make sure you understand the channel structure in <#146723400671428608>.
1. If you use Inara, join us at <http://inara.cz/wing/300>
2. In the game, apply to the "Dark Echo" squadron.
3. Send in-game friend requests to the Echoes you see currently active on Discord and/or via in the in-game squadron.
4. Check <#161529165223428096> for current priorities.
5. Move your primary base of operations (any additional ships, etc) to Snodgrass Orbital in Disci.
6. Join the "Dark Echo" private group.
Note: You cannot get to Disci in a starter sidewinder. You need 9.5LY jump range. Upgrade from "E" to "D". We can help if you need it.
Check your welcome email, there is an optional and yet fun way to make your trip to Disci worthwhile.
Please set an avatar image in Discord, as it greatly helps with telling people apart when using the in-game overlay.
If you stay active with us for a couple of weeks and haven't heard about a promotion to Officer, please remind the Leadership.
"""
NEWPS4CADETMSG = """**Welcome to Dark Echo, {0}!**
**<:echoBlue:230423421983522816> Here are the basic steps to get started with Dark Echo: <:echoBlue:230423421983522816>**
-->Please read and make sure you understand the channel structure <#146723400671428608>.
1. If you use Inara, join us at http://inara.cz/wing/300
2. Send a PSN friend request to "Elite-DarkEcho".
3. Once PSN friend request is accepted: In the game, under "Friends and Private Groups",Send a friend request and request membership in the "Elite-DarkEcho" private group.
4. Check <#161529165223428096> for current priorities.
5. Move your primary base of operations (any additional ships, etc) to Snodgrass Orbital in Disci.
6. Set your ship id to [ECHO] or put [ECHO] in your ship name, whichever you prefer.
Note: You cannot get to Disci in a starter sidewinder. You need 9.5LY jump range. Upgrade Sidewinder or Eagle from "E" to "D"; or use a Hauler. If you're still having trouble, talk to us and somebody can help.
Check your welcome email, there is an optional and yet fun way to make your trip to Disci worthwhile.
If you stay active with us for a couple of weeks and haven't heard about a promotion to Officer, please remind the Leadership.
"""
NEWXBOXCADETMSG = """**Welcome to Dark Echo, {0}!**
**<:echoBlue:230423421983522816> Here are the basic steps to get started with Dark Echo: <:echoBlue:230423421983522816>**
-->Please read and make sure you understand the channel structure in <#146723400671428608>.
1. If you use Inara, join us at http://inara.cz/wing/300
2. Send a XBOX Live friend request to "ED Dark Echo" and join the "Dark Echo" club.
3. Once XBOX Live friend request is accepted: In the game, under "Friends and Private Groups",request membership in the "ED Dark Echo" private group.
4. Check <#161529165223428096> for current priorities.
5. Move your primary base of operations (any additional ships, etc) to Snodgrass Orbital in Disci.
6. Set your ship id to [ECHO] or put [ECHO] in your ship name, whichever you prefer.
Note: You cannot get to Disci in a starter sidewinder. You need 9.5LY jump range. Upgrade Sidewinder or Eagle from "E" to "D"; or use a Hauler. If you're still having trouble, talk to us and somebody can help.
Also check your welcome email, there is an optional and yet fun way to make your trip to Disci worthwhile.
If you stay active with us for a couple of weeks and haven't heard about a promotion to Officer, please remind the Leadership.
"""
NEWOFFICERMSG = """**<:echoBlue:230423421983522816> Welcome to Dark Echo's Officer's Club, {0}!**
Dark Echos Dark Council believe that you are an asset to this organization, and has promoted you to a full member (Officer).
Optional but traditional and highly recommended: Please bring some sort of rare beverage to Snodgrass Orbital in Disci and share a screenshot of that run on the forums and/or in <#173953415280328704>.
A Dark Council Member will update your forum permissions. Once your forum permissions are set up, make sure to:
If you use Inara, join us at <http://inara.cz/wing/300>.
"""
class Basicpromotions:
"""Leadership/Recruiter commands for promoting to basic membership roles."""
def __init__(self, bot):
self.bot = bot
@commands.command()
@commands.has_any_role('Leadership','Recruiter')
async def newpccadet(self,ctx,
member1 : discord.Member = None,
member2 : discord.Member = None,
member3 : discord.Member = None,
member4 : discord.Member = None,
member5 : discord.Member = None,
member6 : discord.Member = None,
member7 : discord.Member = None,
member8 : discord.Member = None,
member9 : discord.Member = None,
member10 : discord.Member = None,
member11 : discord.Member = None,
member12 : discord.Member = None,
member13 : discord.Member = None,
member14 : discord.Member = None,
member15 : discord.Member = None,
member16 : discord.Member = None,
member17 : discord.Member = None,
member18 : discord.Member = None,
member19 : discord.Member = None,
member20 : discord.Member = None ):
"""Get new PC platform Cadet started."""
await ctx.trigger_typing()
# pull all the arguments into an array
argmembers = [member1, member2, member3, member4, member5, member6, member7, member8, member9, member10, member11, member12, member13, member14, member15, member16, member17, member18, member19, member20 ]
# and then filter out the None/empty items, so that we have only an array of things actually mentioned
filter(None,argmembers)
members = [i for i in argmembers if i is not None]
memrole = discord.utils.get(ctx.guild.roles, id=int(ROLE_MEMBER))
cadetrole = discord.utils.get(ctx.guild.roles, id=int(ROLE_CADET))
pcrole = discord.utils.get(ctx.guild.roles, id=int(ROLE_PC))
for member in members:
try:
await member.add_roles(cadetrole,memrole,pcrole)
except Exception as e:
await ctx.send('Unable to set PC Cadet role.')
mentiontext = memberlist_to_mentionlist(members)
cadetsmess = self.bot.get_channel(int(CADETS_MESS))
await cadetsmess.send(NEWPCCADETMSG.format(mentiontext))
await ctx.send('Go check out <#{}>, '.format(CADETS_MESS) + mentiontext + '.')
@commands.command()
@commands.has_any_role('Leadership','Recruiter')
async def newps4cadet(self, ctx,
member1 : discord.Member = None,
member2 : discord.Member = None,
member3 : discord.Member = None,
member4 : discord.Member = None,
member5 : discord.Member = None,
member6 : discord.Member = None,
member7 : discord.Member = None,
member8 : discord.Member = None,
member9 : discord.Member = None,
member10 : discord.Member = None,
member11 : discord.Member = None,
member12 : discord.Member = None,
member13 : discord.Member = None,
member14 : discord.Member = None,
member15 : discord.Member = None,
member16 : discord.Member = None,
member17 : discord.Member = None,
member18 : discord.Member = None,
member19 : discord.Member = None,
member20 : discord.Member = None):
"""Get new Playstation4 platform Cadet started."""
await ctx.trigger_typing()
# pull all the arguments into an array
argmembers = [member1, member2, member3, member4, member5, member6, member7, member8, member9, member10, member11, member12, member13, member14, member15, member16, member17, member18, member19, member20 ]
# and then filter out the None/empty items, so that we have only an array of things actually mentioned
filter(None,argmembers)
members = [i for i in argmembers if i is not None]
memrole = discord.utils.get(ctx.guild.roles, id=int(ROLE_MEMBER))
cadetrole = discord.utils.get(ctx.guild.roles, id=int(ROLE_CADET))
ps4role = discord.utils.get(ctx.guild.roles, id=int(ROLE_PS4))
ps4cadet = discord.utils.get(ctx.guild.roles, id=int(ROLE_PS4CADET))
for member in members:
try:
await member.add_roles(cadetrole,memrole,ps4role,ps4cadet)
except Exception as e:
await ctx.send('Unable to set PS4 Cadet role.')
mentiontext = memberlist_to_mentionlist(members)
cadetsmess = self.bot.get_channel(int(CADETS_MESS))
await cadetsmess.send(NEWPS4CADETMSG.format(mentiontext))
await ctx.send('Go check out <#{}>, '.format(CADETS_MESS) + mentiontext + '.')
@commands.command()
@commands.has_any_role('Leadership','Recruiter')
async def newxboxcadet(self, ctx,
member1 : discord.Member = None,
member2 : discord.Member = None,
member3 : discord.Member = None,
member4 : discord.Member = None,
member5 : discord.Member = None,
member6 : discord.Member = None,
member7 : discord.Member = None,
member8 : discord.Member = None,
member9 : discord.Member = None,
member10 : discord.Member = None,
member11 : discord.Member = None,
member12 : discord.Member = None,
member13 : discord.Member = None,
member14 : discord.Member = None,
member15 : discord.Member = None,
member16 : discord.Member = None,
member17 : discord.Member = None,
member18 : discord.Member = None,
member19 : discord.Member = None,
member20 : discord.Member = None ):
"""Get new xbox platform Cadet started."""
await ctx.trigger_typing()
# pull all the arguments into an array
argmembers = [member1, member2, member3, member4, member5, member6, member7, member8, member9, member10, member11, member12, member13, member14, member15, member16, member17, member18, member19, member20 ]
# and then filter out the None/empty items, so that we have only an array of things actually mentioned
filter(None,argmembers)
members = [i for i in argmembers if i is not None]
memrole = discord.utils.get(ctx.guild.roles, id=int(ROLE_MEMBER))
cadetrole = discord.utils.get(ctx.guild.roles, id=int(ROLE_CADET))
xboxrole = discord.utils.get(ctx.guild.roles, id=int(ROLE_XBOX))
xboxcadet = discord.utils.get(ctx.guild.roles, id=int(ROLE_XBOXCADET))
for member in members:
try:
await member.add_roles(cadetrole,memrole,xboxrole,xboxcadet)
except Exception as e:
await ctx.send('Unable to set Xbox Cadet role.')
mentiontext = memberlist_to_mentionlist(members)
cadetsmess = self.bot.get_channel(int(CADETS_MESS))
await cadetsmess.send(NEWXBOXCADETMSG.format(mentiontext))
await ctx.send('Go check out <#{}>, '.format(CADETS_MESS) + mentiontext + '.')
@commands.command()
@commands.has_role('Leadership')
async def newofficer(self, ctx,
member1 : discord.Member = None,
member2 : discord.Member = None,
member3 : discord.Member = None,
member4 : discord.Member = None,
member5 : discord.Member = None,
member6 : discord.Member = None,
member7 : discord.Member = None,
member8 : discord.Member = None,
member9 : discord.Member = None,
member10 : discord.Member = None,
member11 : discord.Member = None,
member12 : discord.Member = None,
member13 : discord.Member = None,
member14 : discord.Member = None,
member15 : discord.Member = None,
member16 : discord.Member = None,
member17 : discord.Member = None,
member18 : discord.Member = None,
member19 : discord.Member = None,
member20 : discord.Member = None ):
"""Give intro message to new officer and assign them Officer role."""
await ctx.trigger_typing()
# pull all the arguments into an array
argmembers = [member1, member2, member3, member4, member5, member6, member7, member8, member9, member10, member11, member12, member13, member14, member15, member16, member17, member18, member19, member20 ]
# and then filter out the None/empty items, so that we have only an array of things actually mentioned
filter(None,argmembers)
members = [i for i in argmembers if i is not None]
officerrole = discord.utils.get(ctx.guild.roles, id=int(ROLE_OFFICER))
cadetrole = discord.utils.get(ctx.guild.roles, id=int(ROLE_CADET))
botnoise = self.bot.get_channel(int(BOT_NOISE))
officersclub = self.bot.get_channel(int(OFFICERS_CLUB))
for member in members:
try:
await member.add_roles(officerrole)
except Exception as e:
await ctx.send('Unable to set Officer role.')
cleannick = member_to_clean_nick(member)
await botnoise.send('!addroster ' + cleannick)
mentiontext = memberlist_to_mentionlist(members)
# sleep for a second to make sure the role has gone through before sending messages that need it
await asyncio.sleep(1)
await officersclub.send(NEWOFFICERMSG.format(mentiontext))
await botnoise.send("!whois -r -d -role 'Officer' -nick")
for member in members:
await member.remove_roles(cadetrole)
def setup(bot):
bot.add_cog(Basicpromotions(bot))
| agpl-3.0 | -6,239,468,246,116,860,000 | 43.221557 | 213 | 0.673527 | false |
stvstnfrd/edx-platform | lms/djangoapps/verify_student/tasks.py | 1 | 5646 | """
Django Celery tasks for service status app
"""
import logging
from smtplib import SMTPException
import requests
import simplejson
from celery import Task, shared_task
from celery.states import FAILURE
from django.conf import settings
from django.core.mail import EmailMessage
from edx_django_utils.monitoring import set_code_owner_attribute
from common.djangoapps.edxmako.shortcuts import render_to_string
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
log = logging.getLogger(__name__)
class BaseSoftwareSecureTask(Task): # lint-amnesty, pylint: disable=abstract-method
"""
Base task class for use with Software Secure request.
Permits updating information about user attempt in correspondence to submitting
request to software secure.
"""
abstract = True
def on_success(self, retval, task_id, args, kwargs):
"""
Update SoftwareSecurePhotoVerification object corresponding to this
task with info about success.
Updates user verification attempt to "submitted" if the response was ok otherwise
set it to "must_retry".
Assumes `retval` is a dict containing the task's result, with the following keys:
'response_ok': boolean, indicating if the response was ok
'response_text': string, indicating the response text in case of failure.
"""
from .models import SoftwareSecurePhotoVerification
user_verification = SoftwareSecurePhotoVerification.objects.get(id=kwargs['user_verification_id'])
if retval['response_ok']:
user_verification.mark_submit()
log.info(
'Sent request to Software Secure for user: %r and receipt ID %r.',
user_verification.user.username,
user_verification.receipt_id,
)
return user_verification
user_verification.mark_must_retry(retval['response_text'])
def after_return(self, status, retval, task_id, args, kwargs, einfo):
"""
If max retries have reached and task status is still failing, mark user submission
with "must_retry" so that it can be retried latter.
"""
if self.max_retries == self.request.retries and status == FAILURE:
from .models import SoftwareSecurePhotoVerification
user_verification_id = kwargs['user_verification_id']
user_verification = SoftwareSecurePhotoVerification.objects.get(id=user_verification_id)
user_verification.mark_must_retry()
log.error(
'Software Secure submission failed for user %r, setting status to must_retry',
user_verification.user.username,
exc_info=True
)
@shared_task
@set_code_owner_attribute
def send_verification_status_email(context):
"""
Spins a task to send verification status email to the learner
"""
subject = context.get('subject')
message = render_to_string(context.get('template'), context.get('email_vars'))
from_addr = configuration_helpers.get_value(
'email_from_address',
settings.DEFAULT_FROM_EMAIL
)
dest_addr = context.get('email')
try:
msg = EmailMessage(subject, message, from_addr, [dest_addr])
msg.content_subtype = 'html'
msg.send(fail_silently=False)
except SMTPException:
log.warning(u"Failure in sending verification status e-mail to %s", dest_addr)
@shared_task(
base=BaseSoftwareSecureTask,
bind=True,
default_retry_delay=settings.SOFTWARE_SECURE_REQUEST_RETRY_DELAY,
max_retries=settings.SOFTWARE_SECURE_RETRY_MAX_ATTEMPTS,
)
@set_code_owner_attribute
def send_request_to_ss_for_user(self, user_verification_id, copy_id_photo_from):
"""
Assembles a submission to Software Secure.
Keyword Arguments:
user_verification_id (int) SoftwareSecurePhotoVerification model object identifier.
copy_id_photo_from (SoftwareSecurePhotoVerification): If provided, re-send the ID photo
data from this attempt. This is used for re-verification, in which new face photos
are sent with previously-submitted ID photos.
Returns:
request.Response
"""
from .models import SoftwareSecurePhotoVerification
user_verification = SoftwareSecurePhotoVerification.objects.get(id=user_verification_id)
log.info('=>New Verification Task Received %r', user_verification.user.username)
try:
headers, body = user_verification.create_request(copy_id_photo_from)
# checkout PROD-1395 for detail why we are adding system certificate paths for verification.
response = requests.post(
settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["API_URL"],
headers=headers,
data=simplejson.dumps(body, indent=2, sort_keys=True, ensure_ascii=False).encode('utf-8'),
verify=settings.VERIFY_STUDENT["SOFTWARE_SECURE"]['CERT_VERIFICATION_PATH']
)
return {
'response_ok': getattr(response, 'ok', False),
'response_text': getattr(response, 'text', '')
}
except Exception as exc: # pylint: disable=broad-except
log.error(
(
'Retrying sending request to Software Secure for user: %r, Receipt ID: %r '
'attempt#: %s of %s'
),
user_verification.user.username,
user_verification.receipt_id,
self.request.retries,
settings.SOFTWARE_SECURE_RETRY_MAX_ATTEMPTS,
)
log.error(str(exc))
self.retry()
| agpl-3.0 | -5,051,165,563,618,291,000 | 37.937931 | 106 | 0.667906 | false |
holgerd77/django-dynamic-scraper | tests/scraper/models.py | 1 | 1421 | #Stage 2 Update (Python 3)
from __future__ import unicode_literals
from django.utils.encoding import python_2_unicode_compatible
from builtins import str
from django.db import models
from dynamic_scraper.models import Scraper, SchedulerRuntime
from scrapy_djangoitem import DjangoItem
@python_2_unicode_compatible
class EventWebsite(models.Model):
name = models.CharField(max_length=200)
scraper = models.ForeignKey(Scraper, blank=True, null=True, on_delete=models.SET_NULL)
url = models.URLField()
scraper_runtime = models.ForeignKey(SchedulerRuntime, blank=True, null=True, on_delete=models.SET_NULL)
def __str__(self):
return self.name + " (" + str(self.id) + ")"
@python_2_unicode_compatible
class Event(models.Model):
title = models.CharField(max_length=200)
event_website = models.ForeignKey(EventWebsite, on_delete=models.CASCADE)
description = models.TextField(blank=True)
description2 = models.TextField(blank=True)
url = models.URLField(blank=True)
url2 = models.URLField(blank=True)
checker_runtime = models.ForeignKey(SchedulerRuntime, blank=True, null=True, on_delete=models.SET_NULL)
def __str__(self):
return self.title + " (" + str(self.id) + ")"
def detailed(self):
str = "title: {t}\n".format(t=self.title)
str += "event_website:"
return str
class EventItem(DjangoItem):
django_model = Event
| bsd-3-clause | -963,004,689,120,053,800 | 33.658537 | 107 | 0.708656 | false |
mjirik/io3d | io3d/misc.py | 1 | 8466 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# import sys
import os
from loguru import logger
import sys
import os.path
import numpy as np
from io import open
from .image import DataPlus
path_to_script = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(path_to_script, "./extern/sPickle"))
from .dili_subset import ndarray_to_list_in_structure
# from imma.image import resize_to_mm, resize_to_shape
def old_str_format_to_new(string):
"""
convert old format style to new style. Works for digits only
%05d is converted to {:05d}
:param string:
:return:
"""
import re
return re.sub(r"%(\d*d)", r"{:\1}", string)
def suggest_filename(file_path, exists=None):
"""
Try if exist path and append number to its end.
For debug you can set as input if file exists or not.
"""
import os.path
import re
if not isinstance(exists, bool):
exists = os.path.exists(file_path)
if exists:
file_path, file_extension = os.path.splitext(file_path)
# print(file_path)
m = re.search(r"_\d+$", file_path)
if m is None:
# cislo = 2
new_cislo_str = "_2"
else:
cislostr = m.group()
cislo = int(cislostr[1:]) + 1
# it is normal number
file_path = file_path[: -len(cislostr)]
new_cislo_str = "_" + str(cislo)
file_path = file_path + new_cislo_str + file_extension # .zfill(2)
# trorcha rekurze
file_path = suggest_filename(file_path)
return file_path
def obj_from_file(filename="annotation.yaml", filetype="auto", yaml_typ="unsafe"):
""" Read object from file """
if filetype == "auto":
_, ext = os.path.splitext(filename)
filetype = ext[1:]
if filetype in ("yaml", "yml"):
from ruamel.yaml import YAML
# yaml = YAML(typ="unsafe")
yaml = YAML(typ=yaml_typ)
with open(filename, encoding="utf-8") as f:
obj = yaml.load(f)
if obj is None:
obj = {}
# import yaml
# with open(filename, encoding="utf-8") as f:
# intext = f.read()
# obj = yaml.load(intext)
elif filetype in ("pickle", "pkl", "pklz", "picklezip"):
fcontent = read_pkl_and_pklz(filename)
# import pickle
if sys.version_info[0] < 3:
import cPickle as pickle
else:
import _pickle as pickle
# import sPickle as pickle
if sys.version_info.major == 2:
obj = pickle.loads(fcontent)
else:
obj = pickle.loads(fcontent, encoding="latin1")
else:
logger.error("Unknown filetype " + filetype)
return obj
def read_pkl_and_pklz(filename):
"""
Try read zipped or not zipped pickle file
"""
fcontent = None
try:
import gzip
f = gzip.open(filename, "rb")
fcontent = f.read()
f.close()
except IOError as e:
# if the problem is in not gzip file
logger.info("Input gzip exception: " + str(e))
f = open(filename, "rb")
fcontent = f.read()
f.close()
except Exception as e:
# other problem
import traceback
logger.error("Input gzip exception: " + str(e))
logger.error(traceback.format_exc())
return fcontent
def obj_to_file(
obj,
filename,
filetype="auto",
ndarray_to_list=False,
squeeze=True,
yaml_typ="unsafe",
):
"""Writes annotation in file.
:param filetype:
auto
yaml
pkl, pickle
pklz, picklezip
:param ndarray_to_list: convert ndarrays in obj to lists
:param squeeze: squeeze ndarray
"""
# import json
# with open(filename, mode='w') as f:
# json.dump(annotation,f)
if type(obj) == DataPlus:
obj = dict(obj)
if ndarray_to_list:
obj = ndarray_to_list_in_structure(obj, squeeze=squeeze)
# write to yaml
d = os.path.dirname(os.path.abspath(filename))
if not os.path.exists(d):
os.makedirs(d)
if filetype == "auto":
_, ext = os.path.splitext(filename)
filetype = ext[1:]
if filetype in ("yaml", "yml"):
# import yaml
from ruamel.yaml import YAML
# yaml = YAML(typ="unsafe")
yaml = YAML(typ=yaml_typ)
with open(filename, "wt", encoding="utf-8") as f:
yaml.dump(obj, f)
# if sys.version_info.major == 2:
# with open(filename, 'wb') as f:
# yaml.dump(obj, f, encoding="utf-8")
# else:
# with open(filename, "w", encoding="utf-8") as f:
# yaml.dump(obj, f)
elif filetype in ("pickle", "pkl"):
f = open(filename, "wb")
logger.info("filename " + filename)
# if sys.version_info[0] < 3: import cPickle as pickle
# else: import _pickle as pickle
import pickle
pickle.dump(obj, f, -1)
f.close
elif filetype in ("streamingpicklezip", "spklz"):
# this is not working :-(
import gzip
import sPickle as pickle
f = gzip.open(filename, "wb", compresslevel=1)
# f = open(filename, 'wb')
pickle.s_dump(obj, f)
f.close
elif filetype in ("picklezip", "pklz"):
import gzip
if sys.version_info[0] < 3:
import cPickle as pickle
else:
import _pickle as pickle
f = gzip.open(filename, "wb", compresslevel=1)
# f = open(filename, 'wb')
pickle.dump(obj, f)
f.close
elif filetype in ("mat"):
import scipy.io as sio
sio.savemat(filename, obj)
else:
logger.error("Unknown filetype " + filetype)
from imma.image import resize_to_shape, resize_to_shape
# def resize_to_mm(data3d, voxelsize_mm, new_voxelsize_mm, mode="nearest", order=1):
# """
# Function can resize data3d or segmentation to specifed voxelsize_mm
# :new_voxelsize_mm: requested voxelsize. List of 3 numbers, also
# can be a string 'orig', 'orgi*2' and 'orgi*4'.
#
# :voxelsize_mm: size of voxel
# :mode: default is 'nearest'
# """
# import scipy
# import scipy.ndimage
#
# if np.all(list(new_voxelsize_mm) == "orig"):
# new_voxelsize_mm = np.array(voxelsize_mm)
# elif np.all(list(new_voxelsize_mm) == "orig*2"):
# new_voxelsize_mm = np.array(voxelsize_mm) * 2
# elif np.all(list(new_voxelsize_mm) == "orig*4"):
# new_voxelsize_mm = np.array(voxelsize_mm) * 4
# # vx_size = np.array(metadata['voxelsize_mm']) * 4
#
# zoom = voxelsize_mm / (1.0 * np.array(new_voxelsize_mm))
# data3d_res = scipy.ndimage.zoom(data3d, zoom, mode=mode, order=order).astype(
# data3d.dtype
# )
# return data3d_res
def suits_with_dtype(mn, mx, dtype):
"""
Check whether range of values can be stored into defined data type.
:param mn: range minimum
:param mx: range maximum
:param dtype:
:return:
"""
type_info = np.iinfo(dtype)
if mx <= type_info.max and mn >= type_info.min:
return True
else:
return False
def use_economic_dtype(data3d, slope=1, inter=0, dtype=None):
""" Use more economic integer-like dtype if it is possible.
:param data3d:
:param dtype: if dtype is not used, the automatic is used
:return:
"""
if dtype is None:
dtype = data3d.dtype
if issubclass(dtype.type, np.integer):
mn = data3d.min() * slope + inter
mx = data3d.max() * slope + inter
if suits_with_dtype(mn, mx, dtype=np.uint8):
dtype = np.uint8
elif suits_with_dtype(mn, mx, dtype=np.int8):
dtype = np.int8
elif suits_with_dtype(mn, mx, dtype=np.uint16):
dtype = np.uint16
elif suits_with_dtype(mn, mx, dtype=np.int16):
dtype = np.int16
elif suits_with_dtype(mn, mx, dtype=np.uint32):
dtype = np.uint32
elif suits_with_dtype(mn, mx, dtype=np.int32):
dtype = np.int32
# new_data3d = ((np.float(slope) * data3d) + np.float(inter)).astype(dtype)
if slope == 1 and inter == 0:
# this can prevent out of memmory
new_data3d = data3d.astype(dtype)
else:
new_data3d = ((slope * data3d) + inter).astype(dtype)
return new_data3d
| mit | -4,283,288,989,103,422,000 | 27.505051 | 84 | 0.568037 | false |
mwatts15/YAROM | examples/adding_data.py | 1 | 1499 | import yarom as Y
import rdflib
Y.connect({'rdf.namespace': rdflib.Namespace("http://example.org/")})
def p1():
mary = Y.DataObject(key='mary')
fido = Y.DataObject(key='fido')
mary.relate('has_pet', fido)
mary.relate('age', Y.Quantity(23, 'years'))
mary.relate('email', "[email protected]")
Y.print_graph(mary.get_defined_component())
def p2_p3():
FOAF = rdflib.Namespace("http://xmlns.com/foaf/0.1/")
Y.config('rdf.namespace_manager').bind('foaf', FOAF)
class Person(Y.DataObject):
rdf_type = FOAF['Person']
class Dog(Y.DataObject):
pass
class FOAFAge(Y.DatatypeProperty):
link = FOAF['age']
linkName = "foaf_age"
owner_type = Person
multiple = False # XXX: Default is True
class FOAFMbox(Y.UnionProperty):
link = FOAF['mbox']
linkName = "foaf_mbox"
owner_type = Person # XXX: Not defining agent
multiple = True
Y.remap()
mary = Person(key='mary')
fido = Dog(key='fido')
mary.relate('has_pet', fido)
mary.relate('age', Y.Quantity(23, 'years'), FOAFAge)
mary.relate('email', "[email protected]", FOAFMbox)
Y.print_graph(mary.get_defined_component())
mary.save()
q_person = Person()
q_person.relate('has_pet', Dog())
for p in q_person.load():
p.relate('dog_lover', True)
p.save()
q_person = Person()
q_person.relate('dog_lover', True)
for p in q_person.load():
print(p)
p1()
p2_p3()
| bsd-3-clause | -940,927,674,706,282,800 | 25.298246 | 69 | 0.597065 | false |
archifix/settings | sublime/Packages/Jedi - Python autocompletion/dependencies/jedi/__init__.py | 1 | 1830 | """
Jedi is a static analysis tool for Python that can be used in IDEs/editors. Its
historic focus is autocompletion, but does static analysis for now as well.
Jedi is fast and is very well tested. It understands Python on a deeper level
than all other static analysis frameworks for Python.
Jedi has support for two different goto functions. It's possible to search for
related names and to list all names in a Python file and infer them. Jedi
understands docstrings and you can use Jedi autocompletion in your REPL as
well.
Jedi uses a very simple API to connect with IDE's. There's a reference
implementation as a `VIM-Plugin <https://github.com/davidhalter/jedi-vim>`_,
which uses Jedi's autocompletion. We encourage you to use Jedi in your IDEs.
It's really easy.
To give you a simple example how you can use the Jedi library, here is an
example for the autocompletion feature:
>>> import jedi
>>> source = '''
... import datetime
... datetime.da'''
>>> script = jedi.Script(source, 3, len('datetime.da'), 'example.py')
>>> script
<Script: 'example.py' ...>
>>> completions = script.completions()
>>> completions #doctest: +ELLIPSIS
[<Completion: date>, <Completion: datetime>, ...]
>>> print(completions[0].complete)
te
>>> print(completions[0].name)
date
As you see Jedi is pretty simple and allows you to concentrate on writing a
good text editor, while still having very good IDE features for Python.
"""
__version__ = '0.13.2'
from jedi.api import Script, Interpreter, set_debug_function, \
preload_module, names
from jedi import settings
from jedi.api.environment import find_virtualenvs, find_system_environments, \
get_default_environment, InvalidPythonEnvironment, create_environment, \
get_system_environment
from jedi.api.exceptions import InternalError
| mit | 4,224,056,168,648,134,000 | 37.93617 | 79 | 0.737705 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.