repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
evilkost/cdic | src/cdic_project/cdic/logic/copr_logic.py | 1 | 1290 | # coding: utf-8
from flask import abort
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.orm.query import Query
from .. import db
from ..logic.event_logic import create_project_event
from ..logic.project_logic import ProjectLogic
from ..models import Project, User, LinkedCopr
def check_link_exists(project: Project, username: str, coprname: str) -> bool:
query = (
LinkedCopr.query
.filter(LinkedCopr.project_id == project.id)
.filter(LinkedCopr.username == username)
.filter(LinkedCopr.coprname == coprname)
)
if len(query.all()) > 0:
return True
else:
return False
def create_link(project: Project, username: str, coprname: str,) -> LinkedCopr:
link = LinkedCopr(
project=project,
username=username,
coprname=coprname,
)
event = create_project_event(
project, "Linked copr: {}/{}".format(username, coprname),
event_type="created_link")
ProjectLogic.update_patched_dockerfile(project)
db.session.add_all([link, event, project])
return link
def get_link_by_id(link_id: int) -> LinkedCopr:
return LinkedCopr.query.get(link_id)
# def unlink_copr(link_id: int):
# link = get_link_by_id(link_id)
# if link:
# db.session.
| gpl-3.0 | 4,390,492,934,067,052,000 | 25.326531 | 79 | 0.66124 | false |
MilchReis/PicSort | src/gui/components.py | 1 | 4356 | # -*- coding: utf-8 -*-
'''
@author: nick
'''
import pygame
def isIn(objPos, objBounds, point):
if (point[0] > objPos[0] and point[0] < objPos[0] + objBounds[0]) and (point[1] > objPos[1] and point[1] < objPos[1] + objBounds[1]):
return True
else:
return False
class Component():
def __init__(self):
self.position = (None, None)
self.bounds = (None, None)
self.action = None
self.colorBg = (200, 200, 200)
self.colorBorder = (10, 10, 10)
def update(self, event):
pass
def refresh(self):
pass
def render(self, surface):
pass
class InfoField(Component):
def __init__(self):
Component.__init__(self)
self.text = ""
self.alpha = 128
self.colorBg = (0, 0, 0)
self.colorFont = (255, 255, 255)
self.fontSize = 14
self.font = pygame.font.SysFont("sans", self.fontSize)
self.font.set_bold(True)
self.event = None
def refresh(self):
if not self.event == None:
self.event()
def update(self, event):
pass
def render(self, surface):
if not(self.text == None) and not(self.text == ""):
text_width, text_height = self.font.size(self.text)
s = pygame.Surface((text_width + 20, text_height + 10))
s.set_alpha(self.alpha)
s.fill(self.colorBg)
surface.blit(s, self.position)
txt = self.font.render(self.text, 1, self.colorFont)
surface.blit(txt, (self.position[0] + 10, self.position[1] + 5))
class InputField(Component):
def __init__(self):
Component.__init__(self)
self.cursorTime = 500
self.cursorHide = False
self.text = ""
self.active = False
def update(self, event):
mouse = pygame.mouse.get_pos()
if event.type == pygame.KEYUP and self.active:
if event.key == pygame.K_BACKSPACE:
self.text = self.text[:-1]
elif event.key == pygame.K_KP_ENTER and not self.action == None:
self.action()
else:
if event.key in range(256):
self.text += chr(event.key)
if event.type == pygame.MOUSEBUTTONDOWN and isIn(self.position, self.bounds, mouse):
self.active = True
def render(self, pygamesurface):
# bg
pygame.draw.rect(pygamesurface, self.colorBg, (self.position[0], self.position[1], self.bounds[0], self.bounds[1]), 0)
# border
pygame.draw.rect(pygamesurface, self.colorBorder, (self.position[0], self.position[1], self.bounds[0], self.bounds[1]), 1)
# text
if len(self.text) > 0:
myfont = pygame.font.SysFont("sans", 12 )
txt = myfont.render( self.text, 1, self.colorBorder)
pygamesurface.blit( txt, (self.position[0] +10 , self.position[1] + 5) )
class Button(Component):
def __init__(self):
Component.__init__(self)
self.text = None
self.colorBg = (30, 30, 30)
self.buttonColorHover = (40, 40, 40)
self.buttonColorText = (255, 255, 255)
self.colorBorder = (10, 10, 10)
def update(self, event):
mouse = pygame.mouse.get_pos()
if isIn(self.position, self.bounds, mouse):
if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1 and not(self.action == None):
self.action();
def render(self, pygamesurface):
mouse = pygame.mouse.get_pos()
color = self.colorBg
if isIn(self.position, self.bounds, mouse):
color = self.buttonColorHover
# bg
pygame.draw.rect( pygamesurface, color, (self.position[0], self.position[1], self.bounds[0], self.bounds[1]), 0 )
# border
pygame.draw.rect( pygamesurface, self.colorBorder, (self.position[0], self.position[1], self.bounds[0], self.bounds[1]), 1 )
# text
myfont = pygame.font.SysFont("sans", 12 )
txt = myfont.render( self.text, 1, self.buttonColorText)
pygamesurface.blit( txt, (self.position[0] +10 , self.position[1] + 5) )
| gpl-2.0 | 8,014,038,098,838,877,000 | 28.632653 | 137 | 0.541093 | false |
csangani/ReproducingSprout | create_trace.py | 1 | 1774 | ## Create a network trace using specified distribution for packet intervals
import numpy
import os
import random
import sys
UPLINK_TRACE_SIZE = 30000
DOWNLINK_TRACE_SIZE = 350000
TRACES_PATH = 'cleaned_traces'
def create_trace(d_name, d_function, mode):
intervals = [int(round(abs(d_function()))) for _ in range(UPLINK_TRACE_SIZE if mode == 'uplink' else DOWNLINK_TRACE_SIZE)]
values = []
current_value = 0
for i in intervals:
values += [current_value]
current_value += i
with open('%s/%s/%s.pps' % (TRACES_PATH, d_name, mode), 'w+') as f:
for v in values:
f.write('%s\n' % v)
if __name__ == '__main__':
if len(sys.argv) != 2:
print 'Usage: python create_trace.py <distribution>'
sys.exit(1)
d_name = sys.argv[1]
if not os.path.exists('%s/%s' % (TRACES_PATH, d_name)):
os.makedirs('%s/%s' % (TRACES_PATH, d_name))
if d_name == 'gauss':
uplink_function = lambda: random.gauss(14.383, 298.962)
downlink_function = lambda: random.gauss(2.320, 11.526)
elif d_name == 'expovariate':
uplink_function = lambda: random.expovariate(1 / 14.383)
downlink_function = lambda: random.expovariate(1 / 2.320)
elif d_name == 'poisson':
uplink_function = lambda: numpy.random.poisson(14.383)
downlink_function = lambda: numpy.random.poisson(2.320)
elif d_name == 'uniform':
uplink_function = lambda: random.uniform(0,30)
downlink_function = lambda: random.uniform(0,10)
else:
print "Unrecognized distribution"
sys.exit(1)
create_trace(d_name, uplink_function, 'uplink')
create_trace(d_name, downlink_function, 'downlink')
| mit | -5,686,957,577,726,436,000 | 30.678571 | 126 | 0.603157 | false |
fle-internal/content-pack-maker | minimize-content-pack.py | 1 | 1088 | """
minimize-content-pack
Remove assessment items, subtitles and po files from a content pack.
Usage:
minimize-content-pack.py <old-content-pack-path> <out-path>
"""
import zipfile
from pathlib import Path
from docopt import docopt
ITEMS_TO_TRANSFER = [
"metadata.json",
"content.db",
"backend.mo",
"frontend.mo",
]
def minimize_content_pack(oldpackpath: Path, outpath: Path):
with zipfile.ZipFile(str(oldpackpath)) as oldzf,\
zipfile.ZipFile(str(outpath), "w") as newzf:
items = list(i for i in oldzf.namelist()
for will_be_transferred in ITEMS_TO_TRANSFER
if will_be_transferred in i)
for item in items:
bytes = oldzf.read(item)
newzf.writestr(item, bytes)
def main():
args = docopt(__doc__)
contentpackpath = Path(args["<old-content-pack-path>"])
outpath = Path(args["<out-path>"] or
"out/minimal.zip")
outpath = outpath.expanduser()
minimize_content_pack(contentpackpath, outpath)
if __name__ == "__main__":
main()
| bsd-2-clause | 1,715,905,646,846,452,200 | 22.652174 | 68 | 0.620404 | false |
openunix/fuse-cygwin | test/test_ctests.py | 1 | 3270 | #!/usr/bin/env python3
if __name__ == '__main__':
import pytest
import sys
sys.exit(pytest.main([__file__] + sys.argv[1:]))
import subprocess
import pytest
import platform
import sys
from distutils.version import LooseVersion
from util import (wait_for_mount, umount, cleanup, base_cmdline,
safe_sleep, basename, fuse_test_marker, fuse_caps,
fuse_proto)
from os.path import join as pjoin
import os.path
pytestmark = fuse_test_marker()
@pytest.mark.skipif('FUSE_CAP_WRITEBACK_CACHE' not in fuse_caps,
reason='not supported by running kernel')
@pytest.mark.parametrize("writeback", (False, True))
def test_write_cache(tmpdir, writeback):
if writeback and LooseVersion(platform.release()) < '3.14':
pytest.skip('Requires kernel 3.14 or newer')
# This test hangs under Valgrind when running close(fd)
# test_write_cache.c:test_fs(). Most likely this is because of an internal
# deadlock in valgrind, it probably assumes that until close() returns,
# control does not come to the program.
mnt_dir = str(tmpdir)
cmdline = [ pjoin(basename, 'test', 'test_write_cache'),
mnt_dir ]
if writeback:
cmdline.append('-owriteback_cache')
subprocess.check_call(cmdline)
names = [ 'notify_inval_inode', 'invalidate_path' ]
if fuse_proto >= (7,15):
names.append('notify_store_retrieve')
@pytest.mark.skipif(fuse_proto < (7,12),
reason='not supported by running kernel')
@pytest.mark.parametrize("name", names)
@pytest.mark.parametrize("notify", (True, False))
def test_notify1(tmpdir, name, notify):
mnt_dir = str(tmpdir)
cmdline = base_cmdline + \
[ pjoin(basename, 'example', name),
'-f', '--update-interval=1', mnt_dir ]
if not notify:
cmdline.append('--no-notify')
mount_process = subprocess.Popen(cmdline)
try:
wait_for_mount(mount_process, mnt_dir)
filename = pjoin(mnt_dir, 'current_time')
with open(filename, 'r') as fh:
read1 = fh.read()
safe_sleep(2)
with open(filename, 'r') as fh:
read2 = fh.read()
if notify:
assert read1 != read2
else:
assert read1 == read2
except:
cleanup(mnt_dir)
raise
else:
umount(mount_process, mnt_dir)
@pytest.mark.skipif(fuse_proto < (7,12),
reason='not supported by running kernel')
@pytest.mark.parametrize("notify", (True, False))
def test_notify_file_size(tmpdir, notify):
mnt_dir = str(tmpdir)
cmdline = base_cmdline + \
[ pjoin(basename, 'example', 'invalidate_path'),
'-f', '--update-interval=1', mnt_dir ]
if not notify:
cmdline.append('--no-notify')
mount_process = subprocess.Popen(cmdline)
try:
wait_for_mount(mount_process, mnt_dir)
filename = pjoin(mnt_dir, 'growing')
size = os.path.getsize(filename)
safe_sleep(2)
new_size = os.path.getsize(filename)
if notify:
assert new_size > size
else:
assert new_size == size
except:
cleanup(mnt_dir)
raise
else:
umount(mount_process, mnt_dir)
| gpl-2.0 | -747,641,563,466,654,500 | 32.71134 | 78 | 0.607645 | false |
mouton5000/DiscreteEventApplicationEditor | test/testsArithmeticExpressions/MathFunctions/testAbs.py | 1 | 2775 | __author__ = 'mouton'
from triggerExpressions import Evaluation
from unittest import TestCase
from math import pi, sqrt
from arithmeticExpressions import ALitteral, Func, UndefinedLitteral, SelfLitteral
from database import Variable
class TestAbs(TestCase):
@classmethod
def setUpClass(cls):
import grammar.grammars
grammar.grammars.compileGrammars()
def setUp(self):
self.eval1 = Evaluation()
self.eval2 = Evaluation()
self.eval2[Variable('X')] = pi
self.eval2[Variable('T')] = 'abc'
self.eval2[Variable('Z')] = 12.0
def test_integer_abs_with_empty_evaluation(self):
a1 = ALitteral(1)
absr = Func(a1, abs)
self.assertEqual(absr.value(self.eval1), abs(1))
def test_integer_abs_with_non_empty_evaluation(self):
a1 = ALitteral(1)
absr = Func(a1, abs)
self.assertEqual(absr.value(self.eval2), abs(1))
def test_float_abs_with_empty_evaluation(self):
a1 = ALitteral(pi)
absr = Func(a1, abs)
self.assertEqual(absr.value(self.eval1), abs(pi))
def test_float_abs_with_non_empty_evaluation(self):
a1 = ALitteral(pi)
absr = Func(a1, abs)
self.assertEqual(absr.value(self.eval2), abs(pi))
def test_string_abs_with_empty_evaluation(self):
a1 = ALitteral('abc')
absr = Func(a1, abs)
with self.assertRaises(TypeError):
absr.value(self.eval1)
def test_string_abs_with_non_empty_evaluation(self):
a1 = ALitteral('abc')
absr = Func(a1, abs)
with self.assertRaises(TypeError):
absr.value(self.eval2)
def test_undefined_abs_with_empty_evaluation(self):
a1 = UndefinedLitteral()
absr = Func(a1, abs)
with self.assertRaises(TypeError):
absr.value(self.eval1)
def test_undefined_abs_with_non_empty_evaluation(self):
a1 = UndefinedLitteral()
absr = Func(a1, abs)
with self.assertRaises(TypeError):
absr.value(self.eval2)
def test_evaluated_variable_abs(self):
a1 = ALitteral(Variable('X'))
absr = Func(a1, abs)
self.assertEqual(absr.value(self.eval2), abs(pi))
def test_unevaluated_variable_abs(self):
a1 = ALitteral(Variable('Y'))
absr = Func(a1, abs)
with self.assertRaises(ValueError):
absr.value(self.eval2)
def test_self_litteral_abs_with_empty_evaluation(self):
a1 = SelfLitteral()
absr = Func(a1, abs)
self.assertEqual(absr.value(self.eval1, pi), abs(pi))
def test_self_litteral_abs_with_non_empty_evaluation(self):
a1 = SelfLitteral()
absr = Func(a1, abs)
self.assertEqual(absr.value(self.eval2, pi), abs(pi)) | mit | 7,408,478,610,740,688,000 | 30.908046 | 82 | 0.625586 | false |
emptyewer/DEEPN | functions/stat_graph.py | 1 | 3795 | import math
import re
import matplotlib as mpl
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
import matplotlib.colors as colors
import matplotlib.cm as cm
import numpy as np
from matplotlib.figure import Figure
from PyQt4 import QtGui
class GraphFrame(QtGui.QFrame):
def __init__(self, filename, parent=None):
super(GraphFrame, self).__init__(parent)
self.setFrameShape(QtGui.QFrame.NoFrame)
self.parent = parent
self.graph_view = GraphView(filename, self)
def resizeEvent(self, event):
self.graph_view.setGeometry(self.rect())
class GraphView(QtGui.QWidget):
def __init__(self, filename, parent=None):
super(GraphView, self).__init__(parent)
self.dpi = 300
self.filename = filename
self.data = None
self.fig = Figure((4.5, 4.5), dpi=self.dpi)
self.axes = self.fig.add_subplot(111)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self)
self.canvas.mpl_connect('button_press_event', self._onpick)
self.layout = QtGui.QVBoxLayout()
self.layout.addWidget(self.canvas)
self.layout.setStretchFactor(self.canvas, 1)
self.setLayout(self.layout)
self.x = []
self.y = []
self.read_data()
self.load_data()
self.canvas.show()
self.set_parameters()
def read_data(self):
fh = open(self.filename)
for line in fh.readlines():
if not re.match(r'[A-Za-z]', line):
values = line.strip().split(',')
try:
self.x.append(float(values[0]))
self.y.append(float(values[2]))
except:
pass
self.x = np.array(self.x)
self.y = np.array(self.y)
def load_data(self):
self.bar = self.axes.plot(self.x, self.y, linewidth=1.0)
def _get_clicked_residues(self, event):
xmin, xmax = self.axes.get_xlim()
return(int(math.ceil(event.xdata - xmin))-1)
def _onpick(self, event):
pass
def set_parameters(self):
self.axes.tick_params(axis=u'both', which=u'both', length=0)
self.axes.set_xlim(min(self.x), max(self.x))
self.axes.set_ylim(0, max(self.y) + 1)
self.axes.set_xlabel('Threshold')
self.axes.set_ylabel('Overdispersion')
# fractions = self.y / max(self.y)
# normalized_colors = colors.Normalize(fractions.min(), fractions.max())
# count = 0
# for rect in self.bar:
# c = cm.jet(normalized_colors(fractions[count]))
# rect.set_facecolor(c)
# count += 1
# self.fig.patch.set_facecolor((0.886, 0.886, 0.886))
ticks_font = mpl.font_manager.FontProperties(family='times new roman', style='normal', size=12,
weight='normal', stretch='normal')
labels = [self.axes.title, self.axes.xaxis.label, self.axes.yaxis.label]
labels += self.axes.get_xticklabels() + self.axes.get_yticklabels()
for item in labels:
item.set_fontproperties(ticks_font)
item.set_fontsize(4)
self.fig.set_size_inches(30, self.fig.get_figheight(), forward=True)
#
# def update_graph(self):
# self.axes.clear()
# if self.pmap.use_ca:
# self.xcoor = self.pmap.residue_numbers_ca[self.pmap.parent.current_model]
# else:
# self.xcoor = self.pmap.residue_numbers_cb[self.pmap.parent.current_model]
# self.ycoor = self.pmap.histogram_maps[self.pmap.parent.current_model]
# self.bar = self.axes.bar(self.xcoor, self.ycoor, width=1.0, linewidth=0)
# self.set_parameters()
# self.canvas.draw()
| mit | -6,688,862,044,473,764,000 | 37.72449 | 103 | 0.594466 | false |
rookies/dmx2serial | DMX.py | 1 | 4980 | #!/usr/bin/python3
from enum import IntEnum
import struct
class Flag(IntEnum):
Payload = 0b10000000
Success = 0b01000000
Resend = 0b00100000
Configurate = 0b00000100
Hello = 0b00000010
Parity = 0b00000001
class FlagSet(object):
def __init__(self, flags=0x00):
flags = int(flags)
if flags < 0 or flags > 255:
raise ValueError("Invalid flags.")
self.flags = flags
def __str__(self):
return "{}({})".format(self.__class__.__name__, ",".join(['%s=%s' % (k, v) for (k, v) in self.asDict().items()]))
def asDict(self):
res = {}
for f in Flag:
if self.isSet(f):
res[f.name] = 1
else:
res[f.name] = 0
return res
def getBitfield(self):
return self.flags
def set(self, flag):
if not isinstance(flag, Flag):
raise ValueError("Please use instance of Flag.")
self.flags |= flag
def unset(self, flag):
if not isinstance(flag, Flag):
raise ValueError("Please use instance of Flag.")
self.flags &= ~flag
def toggle(self, flag):
if not isinstance(flag, Flag):
raise ValueError("Please use instance of Flag.")
self.flags ^= flag
def isSet(self, flag):
if not isinstance(flag, Flag):
raise ValueError("Please use instance of Flag.")
return ((self.flags & flag) is not 0)
class Packet(object):
checksum = 0x0
def __init__(self, version=0x00, flags=0x00, universe=0x00, channel=0x0000, value=0x00):
self.setVersion(version)
self.flags = FlagSet(flags)
self.setUniverse(universe)
self.setChannel(channel)
self.setValue(value)
def __str__(self):
return "{}(version={},flags={},universe={},channel={},value={},checksum={})".format(self.__class__.__name__, self.version, str(self.flags), self.universe, self.channel, self.value, self.checksum)
def getVersion(self): return self.version
def getFlags(self): return self.flags
def getUniverse(self): return self.universe
def getChannel(self): return self.channel
def getValue(self): return self.value
def setVersion(self, version):
version = int(version)
if version < 0 or version > 255:
raise ValueError("Invalid version.")
self.version = version
def setUniverse(self, universe):
universe = int(universe)
if universe < 0 or universe > 255:
raise ValueError("Invalid universe.")
self.universe = universe
def setChannel(self, channel):
channel = int(channel)
if channel < 0 or channel > 65535:
raise ValueError("Invalid channel.")
self.channel = channel
def setValue(self, value):
value = int(value)
if value < 0 or value > 255:
raise ValueError("Invalid value.")
self.value = value
def calculateParity(self):
self.flags.unset(Flag.Parity)
odd = (bin(self.version).count("1") + bin(self.flags.getBitfield()).count("1")) % 2
if odd is 1:
self.flags.set(Flag.Parity)
def checkParity(self):
odd = (bin(self.version).count("1") + bin(self.flags.getBitfield()).count("1")) % 2
return (odd is 0)
def calculateChecksum(self):
pass #TODO#
def checkChecksum(self):
pass #TODO#
def serialize(self):
if self.flags.isSet(Flag.Payload):
return struct.pack(
"<BBBHB",
self.version,
self.flags.getBitfield(),
self.universe,
self.channel,
self.value
)
else:
return struct.pack(
"<BB",
self.version,
self.flags.getBitfield()
)
def deserialize(self, data):
pass #TODO#
class PacketFactory(object):
@staticmethod
def createHsAsk():
return Packet(flags=(Flag.Hello | Flag.Parity))
@staticmethod
def createHsAnswer(success, resend):
p = Packet(version=1, flags=Flag.Hello)
if success:
p.flags.set(Flag.Success)
if resend:
p.flags.set(Flag.Resend)
p.calculateParity()
return p
@staticmethod
def createChSet(universe, channel, value):
p = Packet(version=1, flags=Flag.Payload, universe=universe, channel=channel, value=value)
p.calculateChecksum()
return p
@staticmethod
def createChAnswer(success, resend):
p = Packet(version=1)
if success:
p.flags.set(Flag.Success)
if resend:
p.flags.set(Flag.Resend)
p.calculateParity()
return p
@staticmethod
def createCfgAnswer(success, resend):
p = Packet(version=1, flags=Flag.Configurate)
if success:
p.flags.set(Flag.Success)
if resend:
p.flags.set(Flag.Resend)
p.calculateParity()
return p
if __name__ == "__main__":
#p = Packet(version=1, flags=(Flag.Payload | Flag.Hello))
#print(p)
#print(p.checkParity())
#p.calculateParity()
#print(p)
#print(p.checkParity())
print(" HsAsk():", PacketFactory.createHsAsk())
print(" HsAnswer(1):", PacketFactory.createHsAnswer(True))
print(" HsAnswer(0):", PacketFactory.createHsAnswer(False))
print(" ChSet(...):", PacketFactory.createChSet(7, 10, 255))
print(" ChAnswer(1):", PacketFactory.createChAnswer(True))
print(" ChAnswer(0):", PacketFactory.createChAnswer(False))
print("CfgAnswer(1):", PacketFactory.createCfgAnswer(True))
print("CfgAnswer(0):", PacketFactory.createCfgAnswer(False))
| mit | -8,310,246,874,338,883,000 | 25.349206 | 197 | 0.675301 | false |
philipn/i-am-cc | cc/cc/settings.py | 1 | 5647 | # Django settings for cc project.
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': '', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(os.path.split(os.path.abspath(__file__))[0], '..', '..', 'static')
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
os.path.join(os.path.split(os.path.abspath(__file__))[0], 'static'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'cc.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'cc.wsgi.application'
TEMPLATE_DIRS = (
os.path.join(os.path.split(os.path.abspath(__file__))[0], '..', '..', 'templates'),
)
LOGIN_URL = '/login/'
LOGIN_REDIRECT_URL = '/setup/'
AUTHENTICATION_BACKENDS = (
'profiles.auth.InstagramBackend',
'django.contrib.auth.backends.ModelBackend',
)
from django.template.defaultfilters import slugify
SOCIAL_AUTH_COMPLETE_URL_NAME = 'socialauth_complete'
SOCIAL_AUTH_ASSOCIATE_URL_NAME = 'associate_complete'
SOCIAL_AUTH_DEFAULT_USERNAME = lambda u: slugify(u)
SOCIAL_AUTH_EXTRA_DATA = True
SOCIAL_AUTH_CHANGE_SIGNAL_ONLY = True
SOCIAL_AUTH_ASSOCIATE_BY_MAIL = True
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_extensions',
'social_auth',
'tastypie',
# our apps
'profiles',
'external_apis',
'auth',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
try:
from localsettings import *
except:
pass
| mit | 999,494,496,442,481,700 | 31.085227 | 100 | 0.680538 | false |
distributed-system-analysis/pbench | lib/pbench/test/unit/server/conftest.py | 1 | 7414 | import datetime
from http import HTTPStatus
import os
import pytest
import shutil
import tempfile
from pathlib import Path
from posix import stat_result
from stat import ST_MTIME
from pbench.server.api import create_app, get_server_config
from pbench.test.unit.server.test_user_auth import login_user, register_user
from pbench.server.api.auth import Auth
from pbench.server.database.database import Database
from pbench.server.database.models.template import Template
server_cfg_tmpl = """[DEFAULT]
install-dir = {TMP}/opt/pbench-server
default-host = pbench.example.com
[pbench-server]
pbench-top-dir = {TMP}/srv/pbench
[Postgres]
db_uri = sqlite:///:memory:
[elasticsearch]
host = elasticsearch.example.com
port = 7080
[graphql]
host = graphql.example.com
port = 7081
[logging]
logger_type = file
# We run with DEBUG level logging during the server unit tests to help
# verify we are not emitting too many logs.
logging_level = DEBUG
[Indexing]
index_prefix = unit-test
###########################################################################
# The rest will come from the default config file.
[config]
path = %(install-dir)s/lib/config
files = pbench-server-default.cfg
"""
@pytest.fixture(scope="session", autouse=True)
def setup(request, pytestconfig):
"""Test package setup for pbench-server"""
# Create a single temporary directory for the "/srv/pbench" and
# "/opt/pbench-server" directories.
TMP = tempfile.TemporaryDirectory(suffix=".d", prefix="pbench-server-unit-tests.")
tmp_d = Path(TMP.name)
srv_pbench = tmp_d / "srv" / "pbench"
pbench_tmp = srv_pbench / "tmp"
pbench_tmp.mkdir(parents=True, exist_ok=True)
pbench_logs = srv_pbench / "logs"
pbench_logs.mkdir(parents=True, exist_ok=True)
pbench_recv = srv_pbench / "pbench-move-results-receive" / "fs-version-002"
pbench_recv.mkdir(parents=True, exist_ok=True)
opt_pbench = tmp_d / "opt" / "pbench-server"
pbench_bin = opt_pbench / "bin"
pbench_bin.mkdir(parents=True, exist_ok=True)
pbench_cfg = opt_pbench / "lib" / "config"
pbench_cfg.mkdir(parents=True, exist_ok=True)
pbench_archive = srv_pbench / "archive" / "fs-version-001"
pbench_archive.mkdir(parents=True, exist_ok=True)
# "Install" the default server configuration file.
shutil.copyfile(
"./server/lib/config/pbench-server-default.cfg",
str(pbench_cfg / "pbench-server-default.cfg"),
)
cfg_file = pbench_cfg / "pbench-server.cfg"
with cfg_file.open(mode="w") as fp:
fp.write(server_cfg_tmpl.format(TMP=TMP.name))
pytestconfig.cache.set("TMP", TMP.name)
pytestconfig.cache.set("_PBENCH_SERVER_CONFIG", str(cfg_file))
def teardown():
"""Test package teardown for pbench-server"""
TMP.cleanup()
request.addfinalizer(teardown)
@pytest.fixture
def server_config(pytestconfig, monkeypatch):
"""
Mock a pbench-server.cfg configuration as defined above.
Args:
pytestconfig: pytest environmental configuration fixture
monkeypatch: testing environment patch fixture
Returns:
a PbenchServerConfig object the test case can use
"""
cfg_file = pytestconfig.cache.get("_PBENCH_SERVER_CONFIG", None)
monkeypatch.setenv("_PBENCH_SERVER_CONFIG", cfg_file)
server_config = get_server_config()
return server_config
@pytest.fixture
def client(server_config):
"""A test client for the app.
NOTE: the db_session fixture does something similar, but with implicit
cleanup after the test, and without the Flask app setup DB tests don't
require.
"""
app = create_app(server_config)
app_client = app.test_client()
app_client.logger = app.logger
app_client.config = app.config
app_client.debug = True
app_client.testing = True
return app_client
@pytest.fixture
def db_session(server_config):
"""
Construct a temporary DB session for the test case that will reset on
completion.
NOTE: the client fixture does something similar, but without the implicit
cleanup, and with the addition of a Flask context that non-API tests don't
require.
Args:
server_config: pbench-server.cfg fixture
"""
Database.init_db(server_config, None)
yield
Database.db_session.remove()
@pytest.fixture
def user_ok(monkeypatch):
"""
Override the Auth.validate_user method to pass without checking the
database.
"""
def ok(user: str) -> str:
return user
monkeypatch.setattr(Auth, "validate_user", ok)
@pytest.fixture()
def fake_mtime(monkeypatch):
"""
Template's init event listener provides the file's modification date to
support template version control. For unit testing, mock the stat results
to appear at a fixed time.
Args:
monkeypatch: patch fixture
"""
def fake_stat(file: str):
"""
Create a real stat_result using an actual file, but change the st_mtime
to a known value before returning it.
Args:
file: filename (not used)
Returns:
mocked stat_results
"""
s = os.stat(".")
t = int(datetime.datetime(2021, 1, 29, 0, 0, 0).timestamp())
f = list(s)
f[ST_MTIME] = t
return stat_result(f)
with monkeypatch.context() as m:
m.setattr(Path, "stat", fake_stat)
yield
@pytest.fixture()
def find_template(monkeypatch, fake_mtime):
"""
Mock a Template class find call to return an object without requiring a DB
query.
Args:
monkeypatch: patching fixture
fake_mtime: fake file modification time on init
"""
def fake_find(name: str) -> Template:
return Template(
name="run",
idxname="run-data",
template_name="unit-test.v6.run-data",
file="run.json",
template_pattern="unit-test.v6.run-data.*",
index_template="unit-test.v6.run-data.{year}-{month}",
settings={"none": False},
mappings={"properties": None},
version=5,
)
with monkeypatch.context() as m:
m.setattr(Template, "find", fake_find)
yield
@pytest.fixture
def pbench_token(client, server_config):
# First create a user
response = register_user(
client,
server_config,
username="drb",
firstname="firstname",
lastname="lastName",
email="[email protected]",
password="12345",
)
assert response.status_code == HTTPStatus.CREATED
# Login user to get valid pbench token
response = login_user(client, server_config, "drb", "12345")
assert response.status_code == HTTPStatus.OK
data = response.json
assert data["auth_token"]
return data["auth_token"]
@pytest.fixture(params=("valid", "invalid", "empty"))
def build_auth_header(request, server_config, pbench_token, client):
header = (
{} if request.param == "empty" else {"Authorization": "Bearer " + pbench_token}
)
if request.param == "invalid":
# Create an invalid token by logging the user out
response = client.post(
f"{server_config.rest_uri}/logout",
headers=dict(Authorization="Bearer " + pbench_token),
)
assert response.status_code == HTTPStatus.OK
return {"header": header, "header_param": request.param}
| gpl-3.0 | -7,350,132,253,102,956,000 | 27.083333 | 87 | 0.65174 | false |
equella/Equella | Source/Tools/ImportLibraries/Python/odbclient.py | 1 | 1697 | # Copyright Dytech Solutions, 2005.
# This module is provided 'commercial-in-confidence' and may not be reproduced nor redistributed without
# express written permission from the copyright holder.
# Author: Adam Eijdenberg, Dytech Solutions <[email protected]>
# Note: This is a very basic ODBC database access wrapper. It requires Python and the Win32 extensions to be installed.
import dbi, odbc
import re
DATE_MATCHER = re.compile ('[^ ]+ ([^ ]+) ([0-9]+) ([0-9]+):([0-9]+):([0-9]+) ([0-9]+)')
MONTHS = {
'Jan': 1,
'Feb': 2,
'Mar': 3,
'Apr': 4,
'May': 5,
'Jun': 6,
'Jul': 7,
'Aug': 8,
'Sep': 9,
'Oct': 10,
'Nov': 11,
'Dec': 12,
}
def clean_field (field):
if hasattr (field, '__class__') and field.__class__ == str:
return unicode (field, 'cp1252')
else:
return field
def zp (s, i):
while len (s) < i:
s = '0' + s
return s
class ODBCClient:
# Create an ODBC client given the datasource name
def __init__ (self, odbcSourceName):
self.dbc = odbc.odbc (odbcSourceName)
# Given a SQL statement, return a two dimensional array of unicode strings as a result set
def fetch (self, q):
cursor = self.dbc.cursor ()
cursor.execute (q)
res = [[clean_field (field) for field in row] for row in cursor.fetchall ()]
cursor.close ()
return res
def date_to_iso (self, date):
month, date, hour, minute, second, year = DATE_MATCHER.match (str (date)).groups ()
return '%s-%s-%sT%s:%s:%s' % (zp (year, 4), zp (str (MONTHS [month]), 2), zp (date, 2), zp (hour, 2), zp (minute, 2), zp (second, 2))
| apache-2.0 | -7,283,380,422,882,922,000 | 29.854545 | 149 | 0.582793 | false |
erigones/esdc-ce | bin/eslib/filelock.py | 1 | 2314 | import os
import time
from functools import wraps
class FileLockTimeout(Exception):
pass
class FileLockError(Exception):
pass
class FileLock(object):
"""
Simple file lock.
"""
def __init__(self, lockfile):
self._lockfile = lockfile
self._lockfile_fd = None
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self._lockfile)
def __nonzero__(self):
return self.is_locked()
def _write_file(self):
self._lockfile_fd = os.open(self._lockfile, os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC, 0o644)
def _remove_file(self):
os.close(self._lockfile_fd)
self._lockfile_fd = None
os.remove(self._lockfile)
def exists(self):
return os.path.exists(self._lockfile)
def is_locked(self):
return self._lockfile_fd is not None
def acquire(self, timeout=30, sleep_interval=1):
start_time = time.time()
while not self.is_locked():
if not self.exists():
try:
self._write_file()
except (OSError, IOError):
pass # Failed to create lock file
else:
break # Created lock file
if timeout is not None and (time.time() - start_time) > timeout:
raise FileLockTimeout('Could not acquire lock within %d seconds' % timeout)
else:
time.sleep(sleep_interval)
def release(self):
if self.exists():
if self.is_locked():
self._remove_file()
else:
raise FileLockError('Lock was never acquired')
else:
raise FileLockError('Not locked')
def filelock(lockfile, **acquire_kwargs):
"""Simple file lock decorator"""
def filelock_decorator(fun):
@wraps(fun)
def wrap(*args, **kwargs):
if hasattr(lockfile, '__call__'):
filepath = lockfile(*args, **kwargs)
else:
filepath = lockfile
flock = FileLock(filepath)
flock.acquire(**acquire_kwargs)
try:
return fun(*args, **kwargs)
finally:
flock.release()
return wrap
return filelock_decorator
| apache-2.0 | -2,884,021,981,432,574,500 | 25.295455 | 109 | 0.537165 | false |
Berserker66/FactorioManager | tests.py | 1 | 2223 | __author__ = 'Fabian'
import unittest
broken_mods = {"5dim mod", "Air Filtering", "canInsert"}
class TestRemoteAPI(unittest.TestCase):
indextestfields = ["title", "contact", "name", "homepage", "author"]
def test_index(self):
from FactorioManager import remoteapi
index = remoteapi.ModIndex
for mod in index.index:
self.assertTrue(type(mod) is str)
self.assertTrue(len(index.index)> 1)
self.index = index
def test_mod_download(self):
from FactorioManager import remoteapi
mod = remoteapi.ModIndex.list[0]
loc = remoteapi.download(mod)
from FactorioManager.ModFile import ModFile
ModFile.checkfile(loc)
def test_all_mods_integrity(self):
from FactorioManager import remoteapi
from FactorioManager.ModFile import ModFile
for i,mod in enumerate(remoteapi.ModIndex.list):
modname = mod["title"]
with self.subTest(modname):
print("Testing mod {} of {}.".format(i + 1, len(remoteapi.ModIndex.list)))
loc = remoteapi.download(mod)
modfile = ModFile(loc)
ret = modfile.check()
if ret != True:
if modname in broken_mods:
self.skipTest("Mod {} is expected to fail: {}".format(modname,ret))
raise ret
elif modname in broken_mods:
self.fail("Mod {} is repaired, but still listed as broken.".format(modname))
else:
with self.subTest(modname + " Sanity Check"):
info = modfile.get_info()
for field in self.indextestfields:
if field in info and field not in mod:
self.fail("Infofield {} is in info.json but not in index.".format(field))
elif field not in info and field in mod:
self.fail("Infofield {} is in index but not in info.json.".format(field))
elif field in info and field in mod:
self.assertEqual(info[field], mod[field])
| mit | -479,495,605,471,145,800 | 42.588235 | 105 | 0.547458 | false |
HybridF5/jacket | jacket/tests/storage/unit/keymgr/test_mock_key_mgr.py | 1 | 3677 | # Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test cases for the mock key manager.
"""
import array
from jacket import context
from jacket.storage import exception
from jacket.storage.keymgr import key as keymgr_key
from jacket.tests.storage.unit.keymgr import mock_key_mgr
from jacket.tests.storage.unit.keymgr import test_key_mgr
class MockKeyManagerTestCase(test_key_mgr.KeyManagerTestCase):
def _create_key_manager(self):
return mock_key_mgr.MockKeyManager()
def setUp(self):
super(MockKeyManagerTestCase, self).setUp()
self.ctxt = context.RequestContext('fake', 'fake')
def test_create_key(self):
key_id_1 = self.key_mgr.create_key(self.ctxt)
key_id_2 = self.key_mgr.create_key(self.ctxt)
# ensure that the UUIDs are unique
self.assertNotEqual(key_id_1, key_id_2)
def test_create_key_with_length(self):
for length in [64, 128, 256]:
key_id = self.key_mgr.create_key(self.ctxt, key_length=length)
key = self.key_mgr.get_key(self.ctxt, key_id)
self.assertEqual(length // 8, len(key.get_encoded()))
def test_create_null_context(self):
self.assertRaises(exception.NotAuthorized,
self.key_mgr.create_key, None)
def test_store_key(self):
secret_key = array.array('B', b'\x00' * 32).tolist()
_key = keymgr_key.SymmetricKey('AES', secret_key)
key_id = self.key_mgr.store_key(self.ctxt, _key)
actual_key = self.key_mgr.get_key(self.ctxt, key_id)
self.assertEqual(_key, actual_key)
def test_store_null_context(self):
self.assertRaises(exception.NotAuthorized,
self.key_mgr.store_key, None, None)
def test_copy_key(self):
key_id = self.key_mgr.create_key(self.ctxt)
key = self.key_mgr.get_key(self.ctxt, key_id)
copied_key_id = self.key_mgr.copy_key(self.ctxt, key_id)
copied_key = self.key_mgr.get_key(self.ctxt, copied_key_id)
self.assertNotEqual(key_id, copied_key_id)
self.assertEqual(key, copied_key)
def test_copy_null_context(self):
self.assertRaises(exception.NotAuthorized,
self.key_mgr.copy_key, None, None)
def test_get_key(self):
pass
def test_get_null_context(self):
self.assertRaises(exception.NotAuthorized,
self.key_mgr.get_key, None, None)
def test_get_unknown_key(self):
self.assertRaises(KeyError, self.key_mgr.get_key, self.ctxt, None)
def test_delete_key(self):
key_id = self.key_mgr.create_key(self.ctxt)
self.key_mgr.delete_key(self.ctxt, key_id)
self.assertRaises(KeyError, self.key_mgr.get_key, self.ctxt, key_id)
def test_delete_null_context(self):
self.assertRaises(exception.NotAuthorized,
self.key_mgr.delete_key, None, None)
def test_delete_unknown_key(self):
self.assertRaises(KeyError, self.key_mgr.delete_key, self.ctxt, None)
| apache-2.0 | 2,562,251,845,402,491,400 | 35.04902 | 78 | 0.655698 | false |
GoogleCloudPlatform/PerfKitBenchmarker | perfkitbenchmarker/stages.py | 1 | 3495 | # Copyright 2016 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Variables and classes related to the different stages of a PKB run."""
import itertools
from absl import flags
PROVISION = 'provision'
PREPARE = 'prepare'
RUN = 'run'
CLEANUP = 'cleanup'
TEARDOWN = 'teardown'
STAGES = [PROVISION, PREPARE, RUN, CLEANUP, TEARDOWN]
_NEXT_STAGE = {PROVISION: PREPARE, PREPARE: RUN, RUN: CLEANUP,
CLEANUP: TEARDOWN}
_ALL = 'all'
_VALID_FLAG_VALUES = PROVISION, PREPARE, RUN, CLEANUP, TEARDOWN, _ALL
_SYNTACTIC_HELP = (
"A complete benchmark execution consists of {0} stages: {1}. Possible flag "
"values include an individual stage, a comma-separated list of stages, or "
"'all'. If a list of stages is provided, they must be in order without "
"skipping any stage.".format(len(STAGES), ', '.join(STAGES)))
class RunStageParser(flags.ListParser):
"""Parse a string containing PKB run stages.
See _SYNTACTIC_HELP for more information.
"""
def __init__(self, *args, **kwargs):
super(RunStageParser, self).__init__(*args, **kwargs)
self.syntactic_help = _SYNTACTIC_HELP
def parse(self, argument):
"""Parses a list of stages.
Args:
argument: string or list of strings.
Returns:
list of strings whose elements are chosen from STAGES.
Raises:
ValueError: If argument does not conform to the guidelines explained in
syntactic_help.
"""
stage_list = super(RunStageParser, self).parse(argument)
if not stage_list:
raise ValueError('Unable to parse {0}. Stage list cannot be '
'empty.'.format(repr(argument)))
invalid_items = set(stage_list).difference(_VALID_FLAG_VALUES)
if invalid_items:
raise ValueError(
'Unable to parse {0}. Unrecognized stages were found: {1}'.format(
repr(argument), ', '.join(sorted(invalid_items))))
if _ALL in stage_list:
if len(stage_list) > 1:
raise ValueError(
"Unable to parse {0}. If 'all' stages are specified, individual "
"stages cannot also be specified.".format(repr(argument)))
return list(STAGES)
previous_stage = stage_list[0]
for stage in itertools.islice(stage_list, 1, None):
expected_stage = _NEXT_STAGE.get(previous_stage)
if not expected_stage:
raise ValueError("Unable to parse {0}. '{1}' should be the last "
"stage.".format(repr(argument), previous_stage))
if stage != expected_stage:
raise ValueError(
"Unable to parse {0}. The stage after '{1}' should be '{2}', not "
"'{3}'.".format(repr(argument), previous_stage, expected_stage,
stage))
previous_stage = stage
return stage_list
flags.DEFINE(
RunStageParser(), 'run_stage', STAGES,
"The stage or stages of perfkitbenchmarker to run.",
flags.FLAGS, flags.ListSerializer(','))
| apache-2.0 | 4,238,887,930,728,280,600 | 32.932039 | 80 | 0.660658 | false |
googledatalab/pydatalab | solutionbox/ml_workbench/tensorflow/transform.py | 1 | 18304 | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Flake8 cannot disable a warning for the file. Flake8 does not like beam code
# and reports many 'W503 line break before binary operator' errors. So turn off
# flake8 for this file.
# flake8: noqa
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import datetime
import json
import logging
import os
import sys
import apache_beam as beam
import textwrap
def parse_arguments(argv):
"""Parse command line arguments.
Args:
argv: list of command line arguments including program name.
Returns:
The parsed arguments as returned by argparse.ArgumentParser.
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent("""\
Runs preprocessing on raw data for TensorFlow training.
This script applies some transformations to raw data to improve
training performance. Some data transformations can be expensive
such as the tf-idf text column transformation. During training, the
same raw data row might be used multiply times to train a model. This
means the same transformations are applied to the same data row
multiple times. This can be very inefficient, so this script applies
partial transformations to the raw data and writes an intermediate
preprocessed datasource to disk for training.
Running this transformation step is required for two usage paths:
1) If the img_url_to_vec transform is used. This is because
preprocessing as image is expensive and TensorFlow cannot easily
read raw image files during training.
2) If the raw data is in BigQuery. TensorFlow cannot read from a
BigQuery source.
Running this transformation step is recommended if a text transform is
used (like tf-idf or bag-of-words), and the text value for each row
is very long.
Running this transformation step may not have an interesting training
performance impact if the transforms are all simple like scaling
numerical values."""))
source_group = parser.add_mutually_exclusive_group(required=True)
source_group.add_argument(
'--csv',
metavar='FILE',
required=False,
action='append',
help='CSV data to transform.')
source_group.add_argument(
'--bigquery',
metavar='PROJECT_ID.DATASET.TABLE_NAME',
type=str,
required=False,
help=('Must be in the form `project.dataset.table_name`. BigQuery '
'data to transform'))
parser.add_argument(
'--analysis',
metavar='ANALYSIS_OUTPUT_DIR',
required=True,
help='The output folder of analyze')
parser.add_argument(
'--prefix',
metavar='OUTPUT_FILENAME_PREFIX',
required=True,
type=str)
parser.add_argument(
'--output',
metavar='DIR',
default=None,
required=True,
help=('Google Cloud Storage or Local directory in which '
'to place outputs.'))
parser.add_argument(
'--shuffle',
action='store_true',
default=False,
help='If used, data source is shuffled. This is recommended for training data.')
parser.add_argument(
'--batch-size',
metavar='N',
type=int,
default=100,
help='Larger values increase performance and peak memory usage.')
cloud_group = parser.add_argument_group(
title='Cloud Parameters',
description='These parameters are only used if --cloud is used.')
cloud_group.add_argument(
'--cloud',
action='store_true',
help='Run preprocessing on the cloud.')
cloud_group.add_argument(
'--job-name',
type=str,
help='Unique dataflow job name.')
cloud_group.add_argument(
'--project-id',
help='The project to which the job will be submitted.')
cloud_group.add_argument(
'--num-workers',
metavar='N',
type=int,
default=0,
help='Set to 0 to use the default size determined by the Dataflow service.')
cloud_group.add_argument(
'--worker-machine-type',
metavar='NAME',
type=str,
help='A machine name from https://cloud.google.com/compute/docs/machine-types. '
' If not given, the service uses the default machine type.')
cloud_group.add_argument(
'--async',
action='store_true',
help='If used, this script returns before the dataflow job is completed.')
args = parser.parse_args(args=argv[1:])
if args.cloud and not args.project_id:
raise ValueError('--project-id is needed for --cloud')
if args.async and not args.cloud:
raise ValueError('--async should only be used with --cloud')
if not args.job_name:
args.job_name = ('dataflow-job-{}'.format(
datetime.datetime.now().strftime('%Y%m%d%H%M%S')))
return args
@beam.ptransform_fn
def shuffle(pcoll): # pylint: disable=invalid-name
import random
return (pcoll
| 'PairWithRandom' >> beam.Map(lambda x: (random.random(), x))
| 'GroupByRandom' >> beam.GroupByKey()
| 'DropRandom' >> beam.FlatMap(lambda (k, vs): vs))
def image_transform_columns(features):
"""Returns a list of columns that prepare_image_transforms() should run on.
Because of beam + pickle, IMAGE_URL_TO_VEC_TRANSFORM cannot be used inside of
a beam function, so we extract the columns prepare_image_transforms() should
run on outside of beam.
"""
import six
from trainer import feature_transforms
img_cols = []
for name, transform in six.iteritems(features):
if transform['transform'] == feature_transforms.IMAGE_TRANSFORM:
img_cols.append(name)
return img_cols
def prepare_image_transforms(element, image_columns):
"""Replace an images url with its jpeg bytes.
Args:
element: one input row, as a dict
image_columns: list of columns that are image paths
Return:
element, where each image file path has been replaced by a base64 image.
"""
import base64
import cStringIO
from PIL import Image
from tensorflow.python.lib.io import file_io as tf_file_io
from apache_beam.metrics import Metrics
img_error_count = Metrics.counter('main', 'ImgErrorCount')
img_missing_count = Metrics.counter('main', 'ImgMissingCount')
for name in image_columns:
uri = element[name]
if not uri:
img_missing_count.inc()
continue
try:
with tf_file_io.FileIO(uri, 'r') as f:
img = Image.open(f).convert('RGB')
# A variety of different calling libraries throw different exceptions here.
# They all correspond to an unreadable file so we treat them equivalently.
# pylint: disable broad-except
except Exception as e:
logging.exception('Error processing image %s: %s', uri, str(e))
img_error_count.inc()
return
# Convert to desired format and output.
output = cStringIO.StringIO()
img.save(output, 'jpeg')
element[name] = base64.urlsafe_b64encode(output.getvalue())
return element
class EmitAsBatchDoFn(beam.DoFn):
"""A DoFn that buffers the records and emits them batch by batch."""
def __init__(self, batch_size):
"""Constructor of EmitAsBatchDoFn beam.DoFn class.
Args:
batch_size: the max size we want to buffer the records before emitting.
"""
self._batch_size = batch_size
self._cached = []
def process(self, element):
self._cached.append(element)
if len(self._cached) >= self._batch_size:
emit = self._cached
self._cached = []
yield emit
def finish_bundle(self, element=None):
from apache_beam.transforms import window
from apache_beam.utils.windowed_value import WindowedValue
if len(self._cached) > 0: # pylint: disable=g-explicit-length-test
yield WindowedValue(self._cached, -1, [window.GlobalWindow()])
class TransformFeaturesDoFn(beam.DoFn):
"""Converts raw data into transformed data."""
def __init__(self, analysis_output_dir, features, schema, stats):
self._analysis_output_dir = analysis_output_dir
self._features = features
self._schema = schema
self._stats = stats
self._session = None
def start_bundle(self, element=None):
"""Build the transfromation graph once."""
import tensorflow as tf
from trainer import feature_transforms
g = tf.Graph()
session = tf.Session(graph=g)
# Build the transformation graph
with g.as_default():
transformed_features, _, placeholders = (
feature_transforms.build_csv_serving_tensors_for_transform_step(
analysis_path=self._analysis_output_dir,
features=self._features,
schema=self._schema,
stats=self._stats,
keep_target=True))
session.run(tf.tables_initializer())
self._session = session
self._transformed_features = transformed_features
self._input_placeholder_tensor = placeholders['csv_example']
def finish_bundle(self, element=None):
self._session.close()
def process(self, element):
"""Run the transformation graph on batched input data
Args:
element: list of csv strings, representing one batch input to the TF graph.
Returns:
dict containing the transformed data. Results are un-batched. Sparse
tensors are converted to lists.
"""
import apache_beam as beam
import six
import tensorflow as tf
# This function is invoked by a separate sub-process so setting the logging level
# does not affect Datalab's kernel process.
tf.logging.set_verbosity(tf.logging.ERROR)
try:
clean_element = []
for line in element:
clean_element.append(line.rstrip())
# batch_result is list of numpy arrays with batch_size many rows.
batch_result = self._session.run(
fetches=self._transformed_features,
feed_dict={self._input_placeholder_tensor: clean_element})
# ex batch_result.
# Dense tensor: {'col1': array([[batch_1], [batch_2]])}
# Sparse tensor: {'col1': tf.SparseTensorValue(
# indices=array([[batch_1, 0], [batch_1, 1], ...,
# [batch_2, 0], [batch_2, 1], ...]],
# values=array[value, value, value, ...])}
# Unbatch the results.
for i in range(len(clean_element)):
transformed_features = {}
for name, value in six.iteritems(batch_result):
if isinstance(value, tf.SparseTensorValue):
batch_i_indices = value.indices[:, 0] == i
batch_i_values = value.values[batch_i_indices]
transformed_features[name] = batch_i_values.tolist()
else:
transformed_features[name] = value[i].tolist()
yield transformed_features
except Exception as e: # pylint: disable=broad-except
yield beam.pvalue.TaggedOutput('errors', (str(e), element))
def decode_csv(csv_string, column_names):
"""Parse a csv line into a dict.
Args:
csv_string: a csv string. May contain missing values "a,,c"
column_names: list of column names
Returns:
Dict of {column_name, value_from_csv}. If there are missing values,
value_from_csv will be ''.
"""
import csv
r = next(csv.reader([csv_string]))
if len(r) != len(column_names):
raise ValueError('csv line %s does not have %d columns' % (csv_string, len(column_names)))
return {k: v for k, v in zip(column_names, r)}
def encode_csv(data_dict, column_names):
"""Builds a csv string.
Args:
data_dict: dict of {column_name: 1 value}
column_names: list of column names
Returns:
A csv string version of data_dict
"""
import csv
import six
values = [str(data_dict[x]) for x in column_names]
str_buff = six.StringIO()
writer = csv.writer(str_buff, lineterminator='')
writer.writerow(values)
return str_buff.getvalue()
def serialize_example(transformed_json_data, info_dict):
"""Makes a serialized tf.example.
Args:
transformed_json_data: dict of transformed data.
info_dict: output of feature_transforms.get_transfrormed_feature_info()
Returns:
The serialized tf.example version of transformed_json_data.
"""
import six
import tensorflow as tf
def _make_int64_list(x):
return tf.train.Feature(int64_list=tf.train.Int64List(value=x))
def _make_bytes_list(x):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=x))
def _make_float_list(x):
return tf.train.Feature(float_list=tf.train.FloatList(value=x))
if sorted(six.iterkeys(transformed_json_data)) != sorted(six.iterkeys(info_dict)):
raise ValueError('Keys do not match %s, %s' % (list(six.iterkeys(transformed_json_data)),
list(six.iterkeys(info_dict))))
ex_dict = {}
for name, info in six.iteritems(info_dict):
if info['dtype'] == tf.int64:
ex_dict[name] = _make_int64_list(transformed_json_data[name])
elif info['dtype'] == tf.float32:
ex_dict[name] = _make_float_list(transformed_json_data[name])
elif info['dtype'] == tf.string:
ex_dict[name] = _make_bytes_list(transformed_json_data[name])
else:
raise ValueError('Unsupported data type %s' % info['dtype'])
ex = tf.train.Example(features=tf.train.Features(feature=ex_dict))
return ex.SerializeToString()
def preprocess(pipeline, args):
"""Transfrom csv data into transfromed tf.example files.
Outline:
1) read the input data (as csv or bigquery) into a dict format
2) replace image paths with base64 encoded image files
3) build a csv input string with images paths replaced with base64. This
matches the serving csv that a trained model would expect.
4) batch the csv strings
5) run the transformations
6) write the results to tf.example files and save any errors.
"""
from tensorflow.python.lib.io import file_io
from trainer import feature_transforms
schema = json.loads(file_io.read_file_to_string(
os.path.join(args.analysis, feature_transforms.SCHEMA_FILE)).decode())
features = json.loads(file_io.read_file_to_string(
os.path.join(args.analysis, feature_transforms.FEATURES_FILE)).decode())
stats = json.loads(file_io.read_file_to_string(
os.path.join(args.analysis, feature_transforms.STATS_FILE)).decode())
column_names = [col['name'] for col in schema]
if args.csv:
all_files = []
for i, file_pattern in enumerate(args.csv):
all_files.append(pipeline | ('ReadCSVFile%d' % i) >> beam.io.ReadFromText(file_pattern))
raw_data = (
all_files
| 'MergeCSVFiles' >> beam.Flatten()
| 'ParseCSVData' >> beam.Map(decode_csv, column_names))
else:
columns = ', '.join(column_names)
query = 'SELECT {columns} FROM `{table}`'.format(columns=columns,
table=args.bigquery)
raw_data = (
pipeline
| 'ReadBiqQueryData'
>> beam.io.Read(beam.io.BigQuerySource(query=query,
use_standard_sql=True)))
# Note that prepare_image_transforms does not make embeddings, it justs reads
# the image files and converts them to byte stings. TransformFeaturesDoFn()
# will make the image embeddings.
image_columns = image_transform_columns(features)
clean_csv_data = (
raw_data
| 'PreprocessTransferredLearningTransformations'
>> beam.Map(prepare_image_transforms, image_columns)
| 'BuildCSVString'
>> beam.Map(encode_csv, column_names))
if args.shuffle:
clean_csv_data = clean_csv_data | 'ShuffleData' >> shuffle()
transform_dofn = TransformFeaturesDoFn(args.analysis, features, schema, stats)
(transformed_data, errors) = (
clean_csv_data
| 'Batch Input'
>> beam.ParDo(EmitAsBatchDoFn(args.batch_size))
| 'Run TF Graph on Batches'
>> beam.ParDo(transform_dofn).with_outputs('errors', main='main'))
_ = (transformed_data
| 'SerializeExamples' >> beam.Map(serialize_example, feature_transforms.get_transformed_feature_info(features, schema))
| 'WriteExamples'
>> beam.io.WriteToTFRecord(
os.path.join(args.output, args.prefix),
file_name_suffix='.tfrecord.gz'))
_ = (errors
| 'WriteErrors'
>> beam.io.WriteToText(
os.path.join(args.output, 'errors_' + args.prefix),
file_name_suffix='.txt'))
def main(argv=None):
"""Run Preprocessing as a Dataflow."""
args = parse_arguments(sys.argv if argv is None else argv)
temp_dir = os.path.join(args.output, 'tmp')
if args.cloud:
pipeline_name = 'DataflowRunner'
else:
pipeline_name = 'DirectRunner'
# Suppress TF warnings.
os.environ['TF_CPP_MIN_LOG_LEVEL']='3'
options = {
'job_name': args.job_name,
'temp_location': temp_dir,
'project': args.project_id,
'setup_file':
os.path.abspath(os.path.join(
os.path.dirname(__file__),
'setup.py')),
}
if args.num_workers:
options['num_workers'] = args.num_workers
if args.worker_machine_type:
options['worker_machine_type'] = args.worker_machine_type
pipeline_options = beam.pipeline.PipelineOptions(flags=[], **options)
p = beam.Pipeline(pipeline_name, options=pipeline_options)
preprocess(pipeline=p, args=args)
pipeline_result = p.run()
if not args.async:
pipeline_result.wait_until_finish()
if args.async and args.cloud:
print('View job at https://console.developers.google.com/dataflow/job/%s?project=%s' %
(pipeline_result.job_id(), args.project_id))
if __name__ == '__main__':
main()
| apache-2.0 | 2,258,065,721,742,861,000 | 32.340619 | 127 | 0.661385 | false |
weblyzard/inscriptis | tests/test_block.py | 1 | 1720 | """
Test cases for the Block class.
"""
from inscriptis.model.canvas.block import Block
from inscriptis.model.canvas.prefix import Prefix
def test_merge_normal_text_collapsable_whitespaces():
"""
test cases where the block has collapsable whitespaces
"""
b = Block(0, Prefix())
b.merge_normal_text("Hallo")
assert b._content == 'Hallo'
assert not b.collapsable_whitespace
b = Block(0, Prefix())
b.merge_normal_text(" Hallo ")
assert b._content == 'Hallo '
assert b.collapsable_whitespace
b = Block(0, Prefix())
b.merge_normal_text('')
assert b._content == ''
assert b.collapsable_whitespace
b.merge_normal_text(' ')
assert b._content == ''
assert b.collapsable_whitespace
b.merge_normal_text(' ')
assert b._content == ''
assert b.collapsable_whitespace
def test_merge_normal_non_collapsable_whitespaces():
b = Block(0, Prefix())
b.collapsable_whitespace = False
b.merge_normal_text("Hallo")
assert b._content == 'Hallo'
assert not b.collapsable_whitespace
b = Block(0, Prefix())
b.collapsable_whitespace = False
b.merge_normal_text(" Hallo ")
assert b._content == ' Hallo '
assert b.collapsable_whitespace
b = Block(0, Prefix())
b.collapsable_whitespace = False
b.merge_normal_text('')
assert b._content == ''
assert not b.collapsable_whitespace
b = Block(0, Prefix())
b.collapsable_whitespace = False
b.merge_normal_text(' ')
assert b._content == ' '
assert b.collapsable_whitespace
b = Block(0, Prefix())
b.collapsable_whitespace = False
b.merge_normal_text(' ')
assert b._content == ' '
assert b.collapsable_whitespace
| gpl-2.0 | -7,282,760,581,919,414,000 | 25.461538 | 58 | 0.64593 | false |
maschwanden/boxsimu | boxsimu/solver.py | 1 | 31181 | # -*- coding: utf-8 -*-
"""
Created on Thu Jun 23 2016 at 10:37UTC
@author: Mathias Aschwanden ([email protected])
"""
import os
import pdb
import copy
import time as time_module
import datetime
import numpy as np
import dill as pickle
import matplotlib.pyplot as plt
from attrdict import AttrDict
import math
from . import solution as bs_solution
from . import utils as bs_utils
from . import ur
def save_simulation_state(system):
filename = '{:%Y%m%d}_{}_TS{}.pickle'.format(
datetime.date.today(), system.name, timestep)
with open(filename, 'wb') as f:
pickle.dump(system, f)
def load_simulation_state(system):
pass
def solve(system, total_integration_time, dt, save_frequency=100, debug=False):
"""Simulate the time evolution of all variables within the system.
Collect all information about the system, create differential
equations from this information and integrate them (numercially)
into the future.
Args:
system (System): The system that is simulated.
total_integration_time (pint.Quantity [T]): The time span which
the system should be solved into the "future". The system will
be simulated for the time period zero to approximately
total_integration_time (depending whether
total_integration_time is a multiple of dt; if not the real
integration horizon will be be bigger than
[0, total_integration_time]).
dt (pint.Quantity [T]): Size of the timestep for the simulation.
The bigger the timestep the faster the simulation will be
calculated, however, if the timestep is chosen too high
there can arise numerical instabilites!
save_frequency (int): Number of timesteps after which the solve
progress is saved to a pickle file. If the solver
is interupted after the state was saved to a pickle file,
the solve function will automatically progress at the latest
saved state.
debug (bool): Activates debugging mode (pdb.set_trace()).
Defaults to False.
"""
# Start time of function
func_start_time = time_module.time()
# Saves the time since the start of the simulate at which the last
# save (pickling) was conducted
last_save_timedelta = 0
if debug:
pdb.set_trace()
# Get number of time steps - round up if there is a remainder
N_timesteps = math.ceil(total_integration_time / dt)
# Recalculate total integration time based on the number of timesteps
total_integration_time = N_timesteps * dt
print('DDATTEE')
print('Start solving the BoxModelSystem...')
print('- total integration time: {}'.format(total_integration_time))
print('- dt (time step): {}'.format(dt))
print('- number of time steps: {}'.format(N_timesteps))
time = total_integration_time * 0
sol = bs_solution.Solution(system, N_timesteps, dt)
# Save initial state to solution
for box in system.box_list:
# sol.df.loc[0] = np.nan
sol.df.loc[0, (box.name,'mass')] = box.fluid.mass.magnitude
sol.df.loc[0, (box.name,'volume')] = \
system.get_box_volume(box).magnitude
for variable in system.variable_list:
var_name = variable.name
sol.df.loc[0, (box.name,var_name)] = \
box.variables[var_name].mass.magnitude
timetesteps_since_last_save = 0
progress = 0
for timestep in range(N_timesteps):
# Calculate progress in percentage of processed timesteps
progress_old = progress
progress = int(float(timestep) / float(N_timesteps)*10) * 10.0
if progress != progress_old:
print("{}%".format(progress))
#print(timetesteps_since_last_save)
# Check if simulation is running long enough to save the state
if timetesteps_since_last_save >= save_frequency:
timetesteps_since_last_save = 1
else:
timetesteps_since_last_save += 1
time += dt
##################################################
# Calculate Mass fluxes
##################################################
dm, f_flow = _calculate_mass_flows(system, time, dt)
##################################################
# Calculate Variable changes due to PROCESSES,
# REACTIONS, FUXES and FLOWS
##################################################
dvar = _calculate_changes_of_all_variables(
system, time, dt, f_flow)
##################################################
# Apply changes to Boxes and save values to
# Solution instance
##################################################
for box in system.box_list:
# Write changes to box objects
box.fluid.mass += dm[box.id]
# Save mass to Solution instance
sol.df.loc[timestep, (box.name, 'mass')] = \
box.fluid.mass.magnitude
sol.df.loc[timestep, (box.name, 'volume')] = \
system.get_box_volume(box).magnitude
for variable in system.variable_list:
var_name = variable.name
system.boxes[box.name].variables[var_name].mass += \
dvar[box.id, variable.id]
sol.df.loc[timestep, (box.name,variable.name)] = \
box.variables[variable.name].mass.magnitude
# End Time of Function
func_end_time = time_module.time()
print(
'Function "solve(...)" used {:3.3f}s'.format(
func_end_time - func_start_time))
return sol
def _calculate_mass_flows(system, time, dt):
"""Calculate mass changes of every box.
Args:
time (pint.Quantity [T]): Current time (age) of the system.
dt (pint.Quantity [T]): Timestep used.
Returns:
dm (numpy 1D array of pint.Quantities): Mass changes of every box.
f_flow (numpy 1D array): Reduction coefficient of the mass
flows (due to becoming-empty boxes -> box mass cannot
decrase below 0kg).
"""
# f_flow is the reduction coefficent of the "sink-flows" of each box
# scaling factor for sinks of each box
f_flow = np.ones(system.N_boxes)
v1 = np.ones(system.N_boxes)
m_ini = system.get_fluid_mass_1Darray()
# get internal flow matrix and calculate the internal souce and sink
# vectors. Also get the external sink and source vector
A = system.get_fluid_mass_internal_flow_2Darray(time)
# internal
s_i = bs_utils.dot(A, v1)
q_i = bs_utils.dot(A.T, v1)
s_e = system.get_fluid_mass_flow_sink_1Darray(time)
q_e = system.get_fluid_mass_flow_source_1Darray(time)
# calculate first estimate of mass change vector
dm = (q_e + q_i - s_e - s_i) * dt
# calculate first estimate of mass after timestep
m = m_ini + dm
while np.any(m.magnitude < 0):
argmin = np.argmin(m)
# Calculate net sink and source and mass of the 'empty' box.
net_source = (q_e[argmin] + q_i[argmin])*dt
net_sink = (s_e[argmin] + s_i[argmin])*dt
available_mass = m_ini[argmin]
total_mass = (net_source + available_mass).to_base_units()
if total_mass.magnitude > 0:
f_new = (total_mass / net_sink).to_base_units().magnitude
f_flow[argmin] = min(f_new, f_flow[argmin] * 0.98)
else:
f_flow[argmin] = 0
# Apply reduction of sinks of the box
A = (A.T * f_flow).T
s_i = bs_utils.dot(A, v1)
q_i = bs_utils.dot(A.T, v1)
s_e = f_flow * s_e
dm = (q_e + q_i - s_e - s_i) * dt
m = m_ini + dm
return dm, f_flow
def _calculate_changes_of_all_variables(system, time, dt, f_flow):
""" Calculates the changes of all variable in every box.
Args:
time (pint.Quantity [T]): Current time (age) of the system.
dt (pint.Quantity [T]): Timestep used.
f_flow (numpy 1D array): Reduction coefficient of the mass flows
due to empty boxes.
Returns:
dvar (numpy 2D array of pint.Quantities): Variables changes of
every box. First dimension are the boxes, second dimension
are the variables.
"""
# reduction coefficent of the "variable-sinks" of each box for the
# treated variable
# scaling factor for sinks of each box
f_var = np.ones([system.N_boxes, system.N_variables])
var_ini = bs_utils.stack([system.get_variable_mass_1Darray(
variable) for variable in system.variable_list], axis=-1)
while True:
dvar_list, net_sink_list, net_source_list = zip(*[_get_dvar(
system, variable, time, dt, f_var, f_flow)
for variable in system.variable_list])
dvar = bs_utils.stack(dvar_list, axis=-1)
net_sink = bs_utils.stack(net_sink_list, axis=-1)
net_source = bs_utils.stack(net_source_list, axis=-1)
var = (var_ini + dvar).to_base_units()
net_sink[net_sink.magnitude == 0] = np.nan # to evade division by zero
f_var_tmp = ((var_ini + net_source) / net_sink).magnitude
f_var_tmp[np.isnan(f_var_tmp)] = 1
f_var_tmp[f_var_tmp > 1] = 1
# If any element of f_var_tmp is smaller than one this means that
# for at least one variable in one box the sinks are bigger than
# the sum of the source and the already present variable mass.
# Thus: The mass of this variable would fall below zero!
# Reduce the sinks proportional to the ratio of the sources and
# the already present variable mass to the sinks.
if np.any(f_var_tmp < 1):
# To be sure that the sinks are reduced enough and to
# evade any rouding errors the reduction ratio of the sinks
# (f_var_tmp) is further decreased by a very small number.
f_var_tmp[f_var_tmp < 1] -= 1e-15 # np.nextafter(0, 1)
f_var *= f_var_tmp
else:
break
return dvar
def _get_sink_source_flow(system, variable, time, dt, f_var, f_flow):
v1 = np.ones(system.N_boxes)
flows = system.flows
A_flow = system.get_variable_internal_flow_2Darray(variable,
time, f_flow, flows)
A_flow = (A_flow.T * f_var[:, variable.id]).T
s_flow_i = bs_utils.dot(A_flow, v1)
q_flow_i = bs_utils.dot(A_flow.T, v1)
s_flow_e = system.get_variable_flow_sink_1Darray(variable,
time, f_flow, flows)
s_flow_e = system.get_variable_flow_sink_1Darray(variable,
time, f_flow, flows) * f_var[:, variable.id]
q_flow_e = system.get_variable_flow_source_1Darray(variable,
time, flows)
sink_flow = ((s_flow_i + s_flow_e) * dt).to_base_units()
source_flow = ((q_flow_i + q_flow_e) * dt).to_base_units()
return sink_flow, source_flow
def _get_sink_source_flux(system, variable, time, dt, f_var):
v1 = np.ones(system.N_boxes)
fluxes = system.fluxes
A_flux = system.get_variable_internal_flux_2Darray(variable,
time, fluxes)
A_flux = (A_flux.T * f_var[:, variable.id]).T
s_flux_i = bs_utils.dot(A_flux, v1)
q_flux_i = bs_utils.dot(A_flux.T, v1)
s_flux_e = system.get_variable_flux_sink_1Darray(variable,
time, fluxes)
s_flux_e = system.get_variable_flux_sink_1Darray(variable,
time, fluxes) * f_var[:, variable.id]
q_flux_e = system.get_variable_flux_source_1Darray(variable,
time, fluxes)
sink_flux = ((s_flux_i + s_flux_e) * dt).to_base_units()
source_flux = ((q_flux_i + q_flux_e) * dt).to_base_units()
dvar_flux = source_flux - sink_flux
return sink_flux, source_flux
def _get_sink_source_process(system, variable, time, dt, f_var):
processes = system.processes
s_process = system.get_variable_process_sink_1Darray(variable,
time, processes)
s_process = system.get_variable_process_sink_1Darray(variable,
time, processes) * f_var[:, variable.id]
q_process = system.get_variable_process_source_1Darray(variable,
time, processes)
sink_process = (s_process * dt).to_base_units()
source_process = (q_process * dt).to_base_units()
return sink_process, source_process
def _get_sink_source_reaction(system, variable, time, dt, f_var):
reactions = system.reactions
rr_cube = system.get_reaction_rate_3Darray(time, reactions)
## APPLY CORRECTIONS HERE!
if np.any(f_var < 1):
f_rr_cube = np.ones_like(rr_cube)
for index in np.argwhere(f_var < 1):
reduction_factor = f_var[tuple(index)]
box = system.box_list[index[0]]
box_name = box.name
variable_name = system.variable_list[index[1]].name
sink_reaction_indecies = np.argwhere(rr_cube[index[0], index[1], :].magnitude < 0)
sink_reaction_indecies = list(sink_reaction_indecies.flatten())
for sink_reaction_index in sink_reaction_indecies:
if f_rr_cube[index[0], index[1], sink_reaction_index] > reduction_factor:
f_rr_cube[index[0], :, sink_reaction_index] = reduction_factor
rr_cube *= f_rr_cube
# Set all positive values to 0
sink_rr_cube = np.absolute(rr_cube.magnitude.clip(max=0)) * rr_cube.units
# Set all negative values to 0
source_rr_cube = rr_cube.magnitude.clip(min=0) * rr_cube.units
s_reaction = sink_rr_cube.sum(axis=2)[:, variable.id]
q_reaction = source_rr_cube.sum(axis=2)[:, variable.id]
sink_reaction = (s_reaction * dt).to_base_units()
source_reaction = (q_reaction * dt).to_base_units()
return sink_reaction, source_reaction
def _get_dvar(system, variable, time, dt, f_var, f_flow):
# Get variables sources (q) and sinks (s)
# i=internal, e=external
sink_flow, source_flow = _get_sink_source_flow(
system, variable, time, dt, f_var, f_flow)
sink_flux, source_flux = _get_sink_source_flux(
system, variable, time, dt, f_var)
sink_process, source_process = _get_sink_source_process(
system, variable, time, dt, f_var)
sink_reaction, source_reaction = _get_sink_source_reaction(
system, variable, time, dt, f_var)
net_sink = sink_flow + sink_flux + sink_process + sink_reaction
net_source = (source_flow + source_flux + source_process +
source_reaction)
net_sink = net_sink.to_base_units()
net_source = net_source.to_base_units()
dvar = (net_source - net_sink).to_base_units()
return dvar, net_sink, net_source
class Solver:
"""Class that simulates the evolution of a BoxModelSystem in time.
Functions:
solve: Solve the complete system. That means all Fluid mass flows
are calculated together with processes/reactions/fluxes/flows
of variables that are traced within the system. Returns a
Solution instance which contains the time series of all system
quantities and can also plot them.
Attributes:
system (System): System which is simulated.
"""
def __init__(self, system):
self.system_initial = system
def solve(self, total_integration_time, dt, debug=False):
"""Simulate the time evolution of all variables within the system.
Collect all information about the system, create differential
equations from this information and integrate them (numercially)
into the future.
Args:
system (System): The system that is simulated.
total_integration_time (pint.Quantity [T]): The time span which
the system should be solved into the "future". The system will
be simulated for the time period zero to approximately
total_integration_time (depending whether
total_integration_time is a multiple of dt; if not the real
integration horizon will be be bigger than
[0, total_integration_time]).
dt (pint.Quantity [T]): Size of the timestep for the simulation.
The bigger the timestep the faster the simulation will be
calculated, however, if the timestep is chosen too high
there can arise numerical instabilites!
debug (bool): Activates debugging mode (pdb.set_trace()).
Defaults to False.
"""
# Start time of function
func_start_time = time_module.time()
# Saves the time since the start of the simulate at which the last
# save (pickling) was conducted
last_save_timedelta = 0
if debug:
pdb.set_trace()
# Get number of time steps - round up if there is a remainder
N_timesteps = math.ceil(total_integration_time / dt)
# Recalculate total integration time based on the number of timesteps
total_integration_time = N_timesteps * dt
print('Start solving the BoxModelSystem...')
print('- total integration time: {}'.format(total_integration_time))
print('- dt (time step): {}'.format(dt))
print('- number of time steps: {}'.format(N_timesteps))
time = total_integration_time * 0
self.system = copy.deepcopy(self.system_initial)
sol = bs_solution.Solution(self.system, N_timesteps, dt)
# Save initial state to solution
for box in self.system.box_list:
# sol.df.loc[0] = np.nan
sol.df.loc[0, (box.name,'mass')] = box.fluid.mass.magnitude
sol.df.loc[0, (box.name,'volume')] = \
self.system.get_box_volume(box).magnitude
for variable in self.system.variable_list:
var_name = variable.name
sol.df.loc[0, (box.name,var_name)] = \
box.variables[var_name].mass.magnitude
progress = 0
for timestep in range(N_timesteps):
# Calculate progress in percentage of processed timesteps
progress_old = progress
progress = int(float(timestep) / float(N_timesteps)*10) * 10.0
if progress != progress_old:
print("{}%".format(progress))
# Check if simulation is running since more than a minute
# since the last save was conducted.
time_since_last_save = (time_module.time() - func_start_time
- last_save_timedelta)
if time_since_last_save > 6:
last_save_timedelta = time_module.time() - func_start_time
self.save('{}_TS{}.pickle'.format(self.system.name,
timestep))
time += dt
##################################################
# Calculate Mass fluxes
##################################################
dm, f_flow = self._calculate_mass_flows(time, dt)
##################################################
# Calculate Variable changes due to PROCESSES,
# REACTIONS, FUXES and FLOWS
##################################################
dvar = self._calculate_changes_of_all_variables(
time, dt, f_flow)
##################################################
# Apply changes to Boxes and save values to
# Solution instance
##################################################
for box in self.system.box_list:
# Write changes to box objects
box.fluid.mass += dm[box.id]
# Save mass to Solution instance
sol.df.loc[timestep, (box.name, 'mass')] = \
box.fluid.mass.magnitude
sol.df.loc[timestep, (box.name, 'volume')] = \
self.system.get_box_volume(box).magnitude
for variable in self.system.variable_list:
var_name = variable.name
self.system.boxes[box.name].variables[var_name].mass += \
dvar[box.id, variable.id]
sol.df.loc[timestep, (box.name,variable.name)] = \
box.variables[variable.name].mass.magnitude
# End Time of Function
func_end_time = time_module.time()
print(
'Function "solve(...)" used {:3.3f}s'.format(
func_end_time - func_start_time))
return sol
# PICKLING
def save(self, file_name):
"""Pickle instance and save to file_name."""
with open(file_name, 'wb') as f:
pickle.dump(self, f)
@classmethod
def load(self, file_name):
"""Load pickled instance from file_name."""
with open(file_name, 'rb') as f:
solution = pickle.load(f)
if not isinstance(solution, Solution):
raise ValueError(
'Loaded pickle object is not a Solution instance!')
return solution
# HELPER functions
def _calculate_mass_flows(self, time, dt):
"""Calculate mass changes of every box.
Args:
time (pint.Quantity [T]): Current time (age) of the system.
dt (pint.Quantity [T]): Timestep used.
Returns:
dm (numpy 1D array of pint.Quantities): Mass changes of every box.
f_flow (numpy 1D array): Reduction coefficient of the mass
flows (due to becoming-empty boxes -> box mass cannot
decrase below 0kg).
"""
# f_flow is the reduction coefficent of the "sink-flows" of each box
# scaling factor for sinks of each box
f_flow = np.ones(self.system.N_boxes)
v1 = np.ones(self.system.N_boxes)
m_ini = self.system.get_fluid_mass_1Darray()
# get internal flow matrix and calculate the internal souce and sink
# vectors. Also get the external sink and source vector
A = self.system.get_fluid_mass_internal_flow_2Darray(time)
# internal
s_i = bs_utils.dot(A, v1)
q_i = bs_utils.dot(A.T, v1)
s_e = self.system.get_fluid_mass_flow_sink_1Darray(time)
q_e = self.system.get_fluid_mass_flow_source_1Darray(time)
# calculate first estimate of mass change vector
dm = (q_e + q_i - s_e - s_i) * dt
# calculate first estimate of mass after timestep
m = m_ini + dm
while np.any(m.magnitude < 0):
argmin = np.argmin(m)
# Calculate net sink and source and mass of the 'empty' box.
net_source = (q_e[argmin] + q_i[argmin])*dt
net_sink = (s_e[argmin] + s_i[argmin])*dt
available_mass = m_ini[argmin]
total_mass = (net_source + available_mass).to_base_units()
if total_mass.magnitude > 0:
f_new = (total_mass / net_sink).to_base_units().magnitude
f_flow[argmin] = min(f_new, f_flow[argmin] * 0.98)
else:
f_flow[argmin] = 0
# Apply reduction of sinks of the box
A = (A.T * f_flow).T
s_i = bs_utils.dot(A, v1)
q_i = bs_utils.dot(A.T, v1)
s_e = f_flow * s_e
dm = (q_e + q_i - s_e - s_i) * dt
m = m_ini + dm
return dm, f_flow
def _calculate_changes_of_all_variables(self, time, dt, f_flow):
""" Calculates the changes of all variable in every box.
Args:
time (pint.Quantity [T]): Current time (age) of the system.
dt (pint.Quantity [T]): Timestep used.
f_flow (numpy 1D array): Reduction coefficient of the mass flows
due to empty boxes.
Returns:
dvar (numpy 2D array of pint.Quantities): Variables changes of
every box. First dimension are the boxes, second dimension
are the variables.
"""
# reduction coefficent of the "variable-sinks" of each box for the
# treated variable
# scaling factor for sinks of each box
f_var = np.ones([self.system.N_boxes, self.system.N_variables])
var_ini = bs_utils.stack([self.system.get_variable_mass_1Darray(
variable) for variable in self.system.variable_list], axis=-1)
while True:
dvar_list, net_sink_list, net_source_list = zip(*[self._get_dvar(
variable, time, dt, f_var, f_flow)
for variable in self.system.variable_list])
dvar = bs_utils.stack(dvar_list, axis=-1)
net_sink = bs_utils.stack(net_sink_list, axis=-1)
net_source = bs_utils.stack(net_source_list, axis=-1)
var = (var_ini + dvar).to_base_units()
net_sink[net_sink.magnitude == 0] = np.nan # to evade division by zero
f_var_tmp = ((var_ini + net_source) / net_sink).magnitude
f_var_tmp[np.isnan(f_var_tmp)] = 1
f_var_tmp[f_var_tmp > 1] = 1
# If any element of f_var_tmp is smaller than one this means that
# for at least one variable in one box the sinks are bigger than
# the sum of the source and the already present variable mass.
# Thus: The mass of this variable would fall below zero!
# Reduce the sinks proportional to the ratio of the sources and
# the already present variable mass to the sinks.
if np.any(f_var_tmp < 1):
# To be sure that the sinks are reduced enough and to
# evade any rouding errors the reduction ratio of the sinks
# (f_var_tmp) is further decreased by a very small number.
f_var_tmp[f_var_tmp < 1] -= 1e-15 # np.nextafter(0, 1)
f_var *= f_var_tmp
else:
break
return dvar
def _get_sink_source_flow(self, variable, time, dt, f_var, f_flow):
v1 = np.ones(self.system.N_boxes)
flows = self.system.flows
A_flow = self.system.get_variable_internal_flow_2Darray(variable,
time, f_flow, flows)
A_flow = (A_flow.T * f_var[:, variable.id]).T
s_flow_i = bs_utils.dot(A_flow, v1)
q_flow_i = bs_utils.dot(A_flow.T, v1)
s_flow_e = self.system.get_variable_flow_sink_1Darray(variable,
time, f_flow, flows)
s_flow_e = self.system.get_variable_flow_sink_1Darray(variable,
time, f_flow, flows) * f_var[:, variable.id]
q_flow_e = self.system.get_variable_flow_source_1Darray(variable,
time, flows)
sink_flow = ((s_flow_i + s_flow_e) * dt).to_base_units()
source_flow = ((q_flow_i + q_flow_e) * dt).to_base_units()
return sink_flow, source_flow
def _get_sink_source_flux(self, variable, time, dt, f_var):
v1 = np.ones(self.system.N_boxes)
fluxes = self.system.fluxes
A_flux = self.system.get_variable_internal_flux_2Darray(variable,
time, fluxes)
A_flux = (A_flux.T * f_var[:, variable.id]).T
s_flux_i = bs_utils.dot(A_flux, v1)
q_flux_i = bs_utils.dot(A_flux.T, v1)
s_flux_e = self.system.get_variable_flux_sink_1Darray(variable,
time, fluxes)
s_flux_e = self.system.get_variable_flux_sink_1Darray(variable,
time, fluxes) * f_var[:, variable.id]
q_flux_e = self.system.get_variable_flux_source_1Darray(variable,
time, fluxes)
sink_flux = ((s_flux_i + s_flux_e) * dt).to_base_units()
source_flux = ((q_flux_i + q_flux_e) * dt).to_base_units()
dvar_flux = source_flux - sink_flux
return sink_flux, source_flux
def _get_sink_source_process(self, variable, time, dt, f_var):
processes = self.system.processes
s_process = self.system.get_variable_process_sink_1Darray(variable,
time, processes)
s_process = self.system.get_variable_process_sink_1Darray(variable,
time, processes) * f_var[:, variable.id]
q_process = self.system.get_variable_process_source_1Darray(variable,
time, processes)
sink_process = (s_process * dt).to_base_units()
source_process = (q_process * dt).to_base_units()
return sink_process, source_process
def _get_sink_source_reaction(self, variable, time, dt, f_var):
reactions = self.system.reactions
rr_cube = self.system.get_reaction_rate_3Darray(time, reactions)
## APPLY CORRECTIONS HERE!
if np.any(f_var < 1):
f_rr_cube = np.ones_like(rr_cube)
for index in np.argwhere(f_var < 1):
reduction_factor = f_var[tuple(index)]
box = self.system.box_list[index[0]]
box_name = box.name
variable_name = self.system.variable_list[index[1]].name
sink_reaction_indecies = np.argwhere(rr_cube[index[0], index[1], :].magnitude < 0)
sink_reaction_indecies = list(sink_reaction_indecies.flatten())
for sink_reaction_index in sink_reaction_indecies:
if f_rr_cube[index[0], index[1], sink_reaction_index] > reduction_factor:
f_rr_cube[index[0], :, sink_reaction_index] = reduction_factor
rr_cube *= f_rr_cube
# Set all positive values to 0
sink_rr_cube = np.absolute(rr_cube.magnitude.clip(max=0)) * rr_cube.units
# Set all negative values to 0
source_rr_cube = rr_cube.magnitude.clip(min=0) * rr_cube.units
s_reaction = sink_rr_cube.sum(axis=2)[:, variable.id]
q_reaction = source_rr_cube.sum(axis=2)[:, variable.id]
sink_reaction = (s_reaction * dt).to_base_units()
source_reaction = (q_reaction * dt).to_base_units()
return sink_reaction, source_reaction
def _get_dvar(self, variable, time, dt, f_var, f_flow):
# Get variables sources (q) and sinks (s)
# i=internal, e=external
sink_flow, source_flow = self._get_sink_source_flow(variable,
time, dt, f_var, f_flow)
sink_flux, source_flux = self._get_sink_source_flux(variable,
time, dt, f_var)
sink_process, source_process = self._get_sink_source_process(
variable, time, dt, f_var)
sink_reaction, source_reaction = self._get_sink_source_reaction(
variable, time, dt, f_var)
net_sink = sink_flow + sink_flux + sink_process + sink_reaction
net_source = (source_flow + source_flux + source_process +
source_reaction)
net_sink = net_sink.to_base_units()
net_source = net_source.to_base_units()
dvar = (net_source - net_sink).to_base_units()
return dvar, net_sink, net_source
| mit | -289,562,725,548,882,940 | 37.305897 | 98 | 0.57612 | false |
CroatianMeteorNetwork/cmn_binviewer | old/CMN_binViewer_v1.40.py | 1 | 73760 | #!/usr/bin/python
# -*- coding: utf-8 -*-
""" CMN binViewer
Croatian Meteor Network
Author: Denis Vida, 2014.
"""
version = 1.4
import os
import errno
import glob
import time
import wx
import tkFileDialog
import tkMessageBox
import threading
from shutil import copy2
from Tkinter import Tk, W, E, IntVar, BooleanVar, StringVar, DoubleVar, Frame, ACTIVE, END, Listbox, Menu, PhotoImage, NORMAL, DISABLED, Entry, Scale, Button
from ttk import Label, Style, LabelFrame, Checkbutton, Radiobutton, Scrollbar
from PIL import Image as img
from PIL import ImageTk
from FF_bin_suite import readFF, buildFF, colorize_maxframe, max_nomean, load_dark, load_flat, process_array, saveImage, make_flat_frame, makeGIF, get_detection_only, adjust_levels
global_bg = "Black"
global_fg = "Gray"
config_file = 'config.ini'
def mkdir_p(path):
""" Makes a directory and handles all errors"""
try:
os.makedirs(path)
except OSError, exc:
if exc.errno == errno.EEXIST:
pass
else: raise
class StyledButton(Button):
""" Button with style """
def __init__(self, *args, **kwargs):
Button.__init__(self, *args, **kwargs)
self.configure(foreground = global_fg, background = global_bg, borderwidth = 3)
class StyledEntry(Entry):
""" Entry box with style """
def __init__(self, *args, **kwargs):
Entry.__init__(self, *args, **kwargs)
self.configure(foreground = global_fg, background = global_bg, insertbackground = global_fg, disabledbackground = global_bg, disabledforeground = "DimGray")
class ConstrainedEntry(StyledEntry):
""" Entry box with constrained values which can be input (0-255)"""
def __init__(self, *args, **kwargs):
StyledEntry.__init__(self, *args, **kwargs)
vcmd = (self.register(self.on_validate),"%P")
self.configure(validate="key", validatecommand=vcmd)
#self.configure(foreground = global_fg, background = global_bg, insertbackground = global_fg)
def disallow(self):
self.bell()
def on_validate(self, new_value):
try:
if new_value.strip() == "": return True
value = int(new_value)
if value < 0 or value > 255:
self.disallow()
return False
except ValueError:
self.disallow()
return False
return True
class Video(threading.Thread):
""" Class for handling video showing in another thread"""
def __init__(self, viewer_class, img_path):
super(Video, self).__init__()
self.viewer_class = viewer_class #Set main binViewer class to be callable inside Video class
self.img_path = img_path
#global readFF
#self.readFF_video = self.viewer_class.readFF_decorator(readFF) #Decorate readFF function by also passing datatype
def run(self):
temp_frame = self.viewer_class.temp_frame.get()
end_frame = self.viewer_class.end_frame.get()
start_frame = self.viewer_class.start_frame.get()
starting_image = self.viewer_class.current_image
#video_cache = [] #Storing the fist run of reading from file to an array
while self.viewer_class.stop_video.get() == False: #Repeat until video flag is set to stop
start_time = time.clock() #Time the script below to achieve correct FPS
if temp_frame>=end_frame:
self.viewer_class.temp_frame.set(start_frame)
temp_frame = start_frame
else:
temp_frame += 1
##Cache video files during first run
#if len(video_cache) < (end_frame - start_frame + 1):
img_array = buildFF(readFF(self.img_path, datatype = self.viewer_class.data_type.get()), temp_frame)
# video_cache.append(img_array)
#else:
#
# img_array = video_cache[temp_frame - start_frame] #Read cached video frames in consecutive runs
self.viewer_class.img_data = img_array
temp_image = ImageTk.PhotoImage(img.fromarray(img_array).convert("RGB")) #Prepare for showing
self.viewer_class.imagelabel.configure(image = temp_image) #Set image to image label
self.viewer_class.imagelabel.image = temp_image
#Set timestamp
self.viewer_class.set_timestamp(temp_frame)
#Sleep for 1/FPS with corrected time for script running time
end_time = time.clock()
script_time = float(end_time - start_time)
if not script_time > 1/self.viewer_class.fps.get(): #Don't run sleep if the script time is bigger than FPS
time.sleep(1/self.viewer_class.fps.get() - script_time)
class binViewer(Frame):
def __init__(self, parent):
""" Runs only when the viewer class is created (i.e. on the program startup only) """
#parent.geometry("1366x768")
Frame.__init__(self, parent, bg = global_bg)
parent.configure(bg = global_bg) #Set backgound color
parent.grid_columnconfigure(0, weight=1)
parent.grid_rowconfigure(0, weight=1)
self.grid(sticky="NSEW") #Expand frame to all directions
#self.grid_propagate(0)
self.parent = parent
#Define variables
self.filter_no = 6 #Number of filters
self.dir_path = os.path.abspath(os.sep)
self.layout_vertical = BooleanVar() #Layout variable
#Read configuration file
config_content = self.read_config()
self.dir_path = config_content[2]
print self.dir_path
orientation = config_content[0]
if orientation == 0:
self.layout_vertical.set(False)
else:
self.layout_vertical.set(True)
self.mode = IntVar()
self.minimum_frames = IntVar()
self.minimum_frames.set(0)
self.detection_dict = {}
self.data_type_var = IntVar() #For GUI
self.data_type_var.set(0) #Set to Auto
self.data_type = IntVar() #For backend
self.data_type.set(1) #Set to CAMS
self.filter = IntVar()
self.old_filter = IntVar()
self.img_data = 0
self.current_image = ''
self.img_name_type = 'maxpixel'
self.dark_status = BooleanVar()
self.dark_status.set(False)
self.flat_status = BooleanVar()
self.flat_status.set(False)
self.dark_name = StringVar()
self.dark_name.set("dark.bmp")
self.flat_name = StringVar()
self.flat_name.set("flat.bmp")
self.deinterlace = BooleanVar()
self.deinterlace.set(False)
self.hold_levels = BooleanVar()
self.hold_levels.set(False)
self.sort_folder_path = StringVar()
self.sort_folder_path.set("sorted")
self.bin_list = StringVar()
self.print_name_status = BooleanVar()
self.print_name_status.set(False)
self.start_frame = IntVar()
self.start_frame.set(0)
self.end_frame = IntVar()
self.end_frame.set(255)
self.temp_frame = IntVar()
self.temp_frame.set(self.start_frame.get())
self.frame_slider_value = IntVar()
self.frame_slider_value.set(0)
self.starting_image = '' #Used for video
self.stop_video = BooleanVar()
self.stop_video.set(True)
#GIF
self.gif_embed = BooleanVar()
self.gif_embed.set(False)
self.repeat = BooleanVar()
self.repeat.set(True)
self.perfield_var = BooleanVar()
self.perfield_var.set(False)
self.fps = IntVar()
self.fps.set(config_content[1])
#Levels
self.gamma = DoubleVar()
self.gamma.set(1.0)
#Frames visibility
self.save_image_frame = BooleanVar()
self.save_image_frame.set(True)
self.image_levels_frame = BooleanVar()
self.image_levels_frame.set(True)
self.save_animation_frame = BooleanVar()
self.save_animation_frame.set(True)
self.frame_scale_frame = BooleanVar()
self.frame_scale_frame.set(False)
self.old_animation_frame = BooleanVar()
self.old_animation_frame.set(True)
# Misc
global readFF
readFF = self.readFF_decorator(readFF) #Decorate readFF function by also passing datatype, so that readFF doesn't have to be changed through the code
# Initilize GUI
self.initUI()
# Bind key presses, window changes, etc. (key bindings)
parent.bind("<Home>", self.move_top)
parent.bind("<End>", self.move_bottom)
parent.bind("<Up>", self.move_img_up)
parent.bind("<Down>", self.move_img_down)
parent.bind("<Prior>", self.captured_mode_set) #Page up
parent.bind("<Next>", self.detected_mode_set) #Page up
parent.bind("<Left>", self.filter_left)
parent.bind("<Right>", self.filter_right)
#parent.bind("<Key>", self.update_image)
#parent.bind("<Button-1>", self.update_image)
parent.bind("<F1>", self.maxframe_set)
parent.bind("<F2>", self.colorized_set)
parent.bind("<F3>", self.detection_only_set)
parent.bind("<F4>", self.avgframe_set)
parent.bind("<F5>", self.odd_set)
parent.bind("<F6>", self.even_set_toggle)
parent.bind("<F7>", self.frame_filter_set)
parent.bind("<F9>", self.video_set)
parent.bind("<Delete>", self.deinterlace_toggle)
parent.bind("<Insert>", self.hold_levels_toggle)
#parent.bind("<F2>", self.flat_toggle)
#parent.bind("<F1>", self.dark_toggle)
parent.bind("<Return>", self.copy_bin_to_sorted)
def readFF_decorator(self, func):
""" Decorator used to pass self.data_type to readFF without changing all readFF statements in the code """
def inner(*args, **kwargs):
if "datatype" in kwargs:
return func(*args, **kwargs)
else:
return func(*args, datatype = self.data_type.get())
return inner
def correct_datafile_name(self, datafile):
""" Returns True if the given string is a proper FF*.bin or Skypatrol name (depending on data type), else it returns false"""
if self.data_type.get() == 1: #CAMS data type
if len(datafile) == 37: #e.g. FF451_20140819_003718_000_0397568.bin
if len([ch for ch in datafile if ch =="_"]) == 4:
if datafile.split('.')[-1] =='bin':
if datafile[0:2] =="FF":
return True
else: #Skypatrol data type
if len(datafile) == 12: #e.g. 00000171.bmp
if datafile.split('.')[-1] =='bmp':
return True
return False
def read_config(self):
""" Reads the configuration file """
orientation = 1
fps = 25
dir_path = self.dir_path
read_list = (orientation, fps)
try:
config_lines = open(config_file).readlines()
except:
tkMessageBox.showerror("Configuration file "+config_file+" not found! Program files are compromised!")
return read_list
for line in config_lines:
if line[0] == '#' or line == '':
continue
line = line.split('#')[0].split('=')
if 'orientation' in line[0]:
orientation = int(line[1])
if 'fps' in line[0]:
fps = int(line[1])
if 'dir_path' in line[0]:
dir_path = line[1].strip()
read_list = (orientation, fps, dir_path)
return read_list
def write_config(self):
""" Writes the configuration file """
orientation = int(self.layout_vertical.get())
fps = int(self.fps.get())
if not fps in (25, 30):
fps = 25
try:
new_config = open(config_file, 'w')
except:
return False
new_config.write("#Configuration file\n#DO NOT CHANGE VALUES MANUALLY\n\n")
new_config.write("orientation = "+str(orientation)+" # 0 vertical, 1 horizontal\n")
new_config.write("fps = "+str(fps)+"\n")
if ('CAMS' in self.dir_path) or ('Captured' in self.dir_path):
temp_path = self.dir_path
new_path = []
for line in temp_path.split(os.sep):
if 'Captured' in line:
new_path.append(line)
break
new_path.append(line)
temp_path = (os.sep).join(new_path)
new_config.write("dir_path = "+temp_path.strip()+"\n")
return True
def update_data_type(self):
""" Updates the data_type variable to match data type of directory content. If there are CAMS files, it returns 1, if the Skypatrol files prevail, it returns 2"""
data_type_var = self.data_type_var.get()
if data_type_var == 0:
#Auto - determine data type
bin_count = len(glob.glob1(self.dir_path,"*.bin"))
bmp_count = len(glob.glob1(self.dir_path,"*.bmp"))
dir_contents = os.listdir(self.dir_path)
if bin_count >= bmp_count or ("FTPdetectinfo_" in dir_contents):
self.data_type.set(1) #Set to CAMS if there are more bin files
self.end_frame.set(255)
else:
self.data_type.set(2) #Set to Skypatrol if there are more BMP files
self.end_frame.set(1500)
elif data_type_var == 1:
#CAMS
self.data_type.set(1)
self.end_frame.set(255)
elif data_type_var == 2:
#Skypatrol
self.data_type.set(2)
self.end_frame.set(1500)
self.update_listbox(self.get_bin_list()) #Update listbox
self.mode.set(1)
self.filter.set(1)
self.change_mode()
self.move_top(0) #Move listbox cursor to the top
self.update_image(0)
def update_layout(self):
""" Updates the layout (horizontal/vertical) """
self.menuBar.entryconfig("Window", state = "normal")
#List of adjustable frames
layout_frames = [self.save_image_frame, self.image_levels_frame, self.save_animation_frame, self.frame_scale_frame]
enabled_frames = 0
for frame in layout_frames:
if frame.get() == True:
enabled_frames -= 1
#First column of vertical layout
start_column = 3 + 3
if self.layout_vertical.get() == True:
#Vertical
self.ff_list.config(height = 37) #Scrollbar size
self.hold_levels_chk_horizontal.grid_forget()
self.hold_levels_chk.grid(row = 6, column = 1, sticky = "W", pady=5)
#Check if Save image frame is enabled in Windows menu, if not, hide it
if self.save_image_frame.get() == True:
self.save_panel.grid(row = 8, column = start_column+enabled_frames, rowspan = 2, sticky = "NS", padx=2, pady=5, ipadx=3, ipady=3)
enabled_frames += 1
self.print_name_btn.grid(row = 9, column = 6, rowspan = 2)
else:
self.save_panel.grid_forget()
#Check if Image levels frame is enabled in Windows menu, if not, hide it
if self.image_levels_frame.get() == True:
self.levels_label.grid(row = 8, column = start_column+enabled_frames, rowspan = 2, sticky = "NS", padx=2, pady=5, ipadx=3, ipady=3)
enabled_frames += 1
else:
self.levels_label.grid_forget()
#Check if Save animation frame is enabled in Windows menu, if not, hide it
if self.save_animation_frame.get() == True:
self.animation_panel.grid(row = 8, column = start_column+enabled_frames, rowspan = 2, columnspan = 1, sticky = "NS", padx=2, pady=5, ipadx=3, ipady=3)
enabled_frames += 1
self.gif_make_btn.grid(row = 9, column = 7, rowspan = 4, sticky = "NSEW")
else:
self.animation_panel.grid_forget()
#Frame scale if filter "Frames" is chosen
if self.frame_scale_frame.get() == True:
self.frames_slider_panel.grid(row = 8, column = start_column+enabled_frames, rowspan = 2, sticky = "NS", padx=2, pady=5, ipadx=3, ipady=3)
enabled_frames += 1
else:
self.frames_slider_panel.grid_forget()
else:
#Horizontal
self.menuBar.entryconfig("Window", state = "disabled")
self.ff_list.config(height = 30) #Scrollbar size
self.hold_levels_chk.grid_forget()
self.hold_levels_chk_horizontal.grid(row = 11, column = 4, columnspan = 2, sticky = "W")
self.save_panel.grid(row = 3, column = 6, rowspan = 1, sticky = "NEW", padx=2, pady=5, ipadx=3, ipady=3)
self.print_name_btn.grid(row = 11, column = 3, rowspan = 1)
self.animation_panel.grid(row = 4, column = 6, rowspan = 1, columnspan = 1, sticky = "NEW", padx=2, pady=5, ipadx=3, ipady=3)
self.levels_label.config(width = 10)
self.levels_label.grid(row = 5, column = 6, rowspan = 1, padx=2, pady=5, ipadx=3, ipady=3, sticky ="NEW")
self.gif_make_btn.grid(row = 13, column = 4, rowspan = 2, columnspan = 2, sticky = "EW", padx=2, pady=5)
self.frames_slider_panel.grid(row = 6, column = 6, rowspan = 1, padx=2, pady=5, ipadx=3, ipady=3, sticky ="NEW")
self.write_config()
def move_img_up(self, event):
""" Moves one list entry up if the focus is not on the list, when the key Up is pressed"""
if not self.ff_list is self.parent.focus_get():
self.ff_list.focus()
try:
cur_index = int(self.ff_list.curselection()[0])
except:
return None
next_index = cur_index - 1
if next_index < 0:
next_index = 0
self.ff_list.activate(next_index)
self.ff_list.selection_clear(0, END)
self.ff_list.selection_set(next_index)
self.ff_list.see(next_index)
self.update_image(1)
#print 'gore'
def move_img_down(self, event):
""" Moves one list entry down if the focus is not on the list, when the key Down is pressed"""
if not self.ff_list is self.parent.focus_get():
self.ff_list.focus()
try:
cur_index = int(self.ff_list.curselection()[0])
except:
return None
next_index = cur_index + 1
size = self.ff_list.size()-1
if next_index > size:
next_index = size
self.ff_list.activate(next_index)
self.ff_list.selection_clear(0, END)
self.ff_list.selection_set(next_index)
self.ff_list.see(next_index)
self.update_image(1)
#print 'dolje'
def move_top(self, event):
""" Moves to the top entry when Home key is pressed"""
if not self.ff_list is self.parent.focus_get():
self.ff_list.focus()
self.ff_list.activate(0)
self.ff_list.selection_clear(0, END)
self.ff_list.selection_set(0)
self.ff_list.see(0)
self.update_image(0)
def move_bottom(self, event):
""" Moves to the last entry when End key is pressed"""
if not self.ff_list is self.parent.focus_get():
self.ff_list.focus()
self.ff_list.activate(END)
self.ff_list.selection_clear(0, END)
self.ff_list.selection_set(END)
self.ff_list.see(END)
self.update_image(0)
def move_index(self, index):
"""Moves the list cursor to given index"""
if not self.ff_list is self.parent.focus_get():
self.ff_list.focus()
self.ff_list.activate(index)
self.ff_list.selection_clear(0, END)
self.ff_list.selection_set(index)
self.ff_list.see(index)
self.update_image(0)
def captured_mode_set(self, event):
""" Change mode to captured"""
self.mode.set(1)
self.change_mode()
def detected_mode_set(self, event):
""" Change mode to detected"""
self.mode.set(2)
self.change_mode()
def maxframe_set(self, event):
""" Set maxframe filter by pressing F1"""
if self.mode.get() == 1: #Only in captured mode
self.filter.set(1)
self.update_image(0)
def colorized_set(self, event):
""" Set colored filter by pressing F2"""
if self.mode.get() == 1: #Only in captured mode
self.filter.set(2)
self.update_image(0)
def detection_only_set(self, event):
""" Set odd frame filter by pressing F4"""
self.filter.set(3)
self.update_image(0)
def avgframe_set(self, event):
""" Set odd frame filter by pressing F3"""
if self.mode.get() == 1: #Only in captured mode
self.filter.set(4)
self.update_image(0)
def odd_set(self, event):
""" Set odd frame filter by pressing F5"""
if self.mode.get() == 1: #Only in captured mode
self.filter.set(5)
self.update_image(0)
def even_set_toggle(self, event):
"""Set even frame filter by pressing F6, an toggle with odd frame by further pressing"""
if self.mode.get() == 1: #Only in captured mode
if self.filter.get() == 6:
self.filter.set(5)
else:
self.filter.set(6)
self.update_image(0)
def frame_filter_set(self, event):
""" Set Frame filter by pressing F7 """
self.filter.set(7)
self.update_image(0)
def video_set(self, event):
""" Sets VIDEO filter by pressing F9 """
self.filter.set(10)
self.update_image(0)
def filter_left(self, event):
""" Moves the filter field to the left"""
if self.mode.get() == 1: #Only in captured mode
next_filter = self.filter.get() - 1
if next_filter<1 or next_filter>self.filter_no:
next_filter = self.filter_no
self.filter.set(next_filter)
else: #In detected mode
self.filter.set(3)
self.update_image(0)
def filter_right(self, event):
""" Moves the filter field to the right"""
if self.mode.get() == 1: #Only in captured mode
next_filter = self.filter.get() + 1
if next_filter>self.filter_no:
next_filter = 1
self.filter.set(next_filter)
else: #In detected mode
self.filter.set(3)
self.update_image(0)
def deinterlace_toggle(self, event):
""" Turns the deinterlace on/off"""
if self.deinterlace.get() == True:
self.deinterlace.set(False)
else:
self.deinterlace.set(True)
self.update_image(0)
def hold_levels_toggle(self, event):
""" Toggle Hold levels button """
if self.hold_levels.get() == True:
self.hold_levels.set(False)
else:
self.hold_levels.set(True)
def dark_toggle(self, event):
"""Toggles the dark frame on/off"""
if self.dark_status.get() == True:
self.dark_status.set(False)
else:
self.dark_status.set(True)
self.update_image(0)
def open_dark_path(self):
""" Opens dark frame via file dialog"""
temp_dark = tkFileDialog.askopenfilename(initialdir = self.dir_path, parent = self.parent, title = "Choose dark frame file", initialfile = "dark.bmp", defaultextension = ".bmp", filetypes = [('BMP files', '.bmp')])
temp_dark = temp_dark.replace('/', os.sep)
if temp_dark != '':
self.dark_name.set(temp_dark)
def open_flat_path(self):
""" Opens flat frame via file dialog"""
temp_flat = tkFileDialog.askopenfilename(initialdir = self.dir_path, parent = self.parent, title = "Choose flat frame file", initialfile = "flat.bmp", defaultextension = ".bmp", filetypes = [('BMP files', '.bmp')])
temp_flat = temp_flat.replace('/', os.sep)
if temp_flat != '':
self.flat_name.set(temp_flat)
def flat_toggle(self, event):
"""Toggles the flat frame on/off"""
if self.flat_status.get() == True:
self.flat_status.set(False)
else:
self.flat_status.set(True)
self.update_image(0)
def update_image(self, event, update_levels = False):
""" Updates the current image on the screen"""
self.dir_path = self.dir_path.replace('/', os.sep)
self.status_bar.config(text = "View image") #Update status bar
try: #Check if the list is empty. If it is, do nothing.
self.current_image = self.ff_list.get(self.ff_list.curselection()[0])
except:
return 0
self.stop_video.set(True) #Stop video every image update
try:
self.video_thread.join() #Wait for the video thread to finish
del self.video_thread #Delete video thread
except:
pass
if self.mode.get() == 2: #Detection mode preparations, find the right image and set the start and end frames into entry fields
temp_img = self.detection_dict[self.current_image] #Get image data
self.current_image = temp_img[0]
start_frame = temp_img[1][0] #Set start frame
end_frame = temp_img[1][1] #Set end frame
start_temp = start_frame-5
end_temp = end_frame+5
start_temp = 0 if start_temp<0 else start_temp
if self.data_type.get() == 1: #CAMS data type
end_temp = 255 if end_temp>255 else end_temp
else: #Skypatrol data dype
end_temp = 1500 if end_temp>1500 else end_temp
#print start_temp, end_temp
self.start_frame.set(start_temp)
self.end_frame.set(end_temp)
else: #Prepare for Captured mode
if event == 1: #Set only when the image is changed
self.start_frame.set(0)
if self.data_type.get() == 1: #CAMS
self.end_frame.set(255)
self.frame_scale.config(to = 255)
else: #Skypatrol
self.end_frame.set(1500)
self.frame_scale.config(to = 1500)
img_path = self.dir_path+os.sep+self.current_image
if not os.path.isfile(img_path):
tkMessageBox.showerror("File error", "File not found:\n"+img_path)
return 0
dark_frame = None
flat_frame = None
flat_frame_scalar = None
# Do if the dark frame is on
if self.dark_status.get() == True:
if not os.sep in self.dark_name.get():
dark_path = self.dir_path+os.sep+self.dark_name.get()
else:
dark_path = self.dark_name.get()
try:
dark_frame = load_dark(dark_path)
except:
tkMessageBox.showerror("Dark frame file error", "Cannot find dark frame file: "+self.dark_name.get())
self.dark_status.set(False)
# Do if the flat frame is on
if self.flat_status.get() == True:
if not os.sep in self.flat_name.get():
flat_path = self.dir_path+os.sep+self.flat_name.get()
else:
flat_path = self.flat_name.get()
try:
flat_frame, flat_frame_scalar = load_flat(flat_path)
except:
tkMessageBox.showerror("Flat frame file error", "Cannot find flat frame file: "+self.flat_name.get())
self.flat_status.set(False)
# Make changes if the filter has changed
if self.old_filter.get() != self.filter.get():
# Set all butons to be active
self.dark_chk.config(state = NORMAL)
self.flat_chk.config(state = NORMAL)
self.deinterlace_chk.config(state = NORMAL)
self.hold_levels_chk.config(state = NORMAL)
self.max_lvl_scale.config(state = NORMAL)
self.min_lvl_scale.config(state = NORMAL)
self.gamma_scale.config(state = NORMAL)
self.windowMenu.entryconfig("Save animation", state = "normal")
self.frame_scale_frame.set(False)
self.save_animation_frame.set(self.old_animation_frame.get())
self.update_layout()
# Frames filter
if self.filter.get() == 7:
self.frame_scale.config(state = NORMAL)
self.old_animation_frame.set(self.save_animation_frame.get())
else:
self.frame_scale.config(state = DISABLED)
#Apply individual filters
if self.filter.get() == 1: #Maxpixel
img_array = process_array(readFF(img_path).maxpixel, flat_frame, flat_frame_scalar, dark_frame, self.deinterlace.get())
self.img_name_type = 'maxpixel'
self.old_filter.set(1)
elif self.filter.get() == 2: #colorized
if (update_levels == True) or (self.hold_levels.get() == True): #Adjust levels
minv_temp = self.min_lvl_scale.get()
gamma_temp = self.gamma.get()
maxv_temp = self.max_lvl_scale.get()
else:
maxv_temp = None
gamma_temp = None
minv_temp = None
#Disable check buttons, as these parameters are not used
self.dark_chk.config(state = DISABLED)
self.flat_chk.config(state = DISABLED)
self.deinterlace_chk.config(state = DISABLED)
img_array = colorize_maxframe(readFF(img_path), minv_temp, gamma_temp, maxv_temp)
self.img_name_type = 'colorized'
self.old_filter.set(2)
elif self.filter.get() == 3: #Max minus average (just detection)
if self.mode.get() == 1: #Captued mode
self.dark_chk.config(state = DISABLED)
self.deinterlace_chk.config(state = DISABLED)
img_array = max_nomean(readFF(img_path), flat_frame, flat_frame_scalar)
self.img_name_type = 'max_nomean'
elif self.mode.get() == 2: #Deteced mode
self.dark_chk.config(state = NORMAL)
self.deinterlace_chk.config(state = NORMAL)
img_array = get_detection_only(readFF(img_path), start_frame, end_frame, flat_frame, flat_frame_scalar, dark_frame, self.deinterlace.get())
self.img_name_type = 'detected_only'
self.old_filter.set(3)
elif self.filter.get() == 4: #Average pixel
img_array = process_array(readFF(img_path).avepixel, flat_frame, flat_frame_scalar, dark_frame, self.deinterlace.get())
self.img_name_type = 'avepixel'
self.old_filter.set(4)
elif self.filter.get() == 5: #Show only odd frame
self.deinterlace_chk.config(state = DISABLED)
img_array = process_array(readFF(img_path).maxpixel, flat_frame, flat_frame_scalar, dark_frame, deinterlace = False, field = 1)
self.img_name_type = 'odd'
self.old_filter.set(5)
elif self.filter.get() == 6: #Show only even frame
self.deinterlace_chk.config(state = DISABLED)
img_array = process_array(readFF(img_path).maxpixel, flat_frame, flat_frame_scalar, dark_frame, deinterlace = False, field = 2)
self.img_name_type = 'even'
self.old_filter.set(6)
elif self.filter.get() == 7: #Show individual frames
if self.old_filter.get() != self.filter.get():
self.windowMenu.entryconfig("Save animation", state = "disabled")
self.save_animation_frame.set(False)
self.frame_scale_frame.set(True)
self.update_layout()
img_array = process_array(buildFF(readFF(img_path), self.frame_scale.get()), flat_frame, flat_frame_scalar, dark_frame, self.deinterlace.get())
self.set_timestamp(self.frame_scale.get())
self.img_name_type = 'frame_'+str(self.frame_slider_value)
self.old_filter.set(7)
elif self.filter.get() == 10: #Show video
self.dark_chk.config(state = DISABLED)
self.flat_chk.config(state = DISABLED)
self.deinterlace_chk.config(state = DISABLED)
self.hold_levels_chk.config(state = DISABLED)
self.max_lvl_scale.config(state = DISABLED)
self.min_lvl_scale.config(state = DISABLED)
self.gamma_scale.config(state = DISABLED)
self.video_thread = Video(app, img_path) #Create video object, pass binViewer class (app) to video object
self.temp_frame.set(self.start_frame.get()) #Set temporary frame to start frame
self.stop_video.set(False) #Set "stop video" flag to False -> video will run
self.video_thread.start() #Start video thread
self.starting_image = self.current_image #Set image to
self.old_filter.set(10)
return 0
#Adjust levels
if (update_levels == True) or (self.hold_levels.get() == True):
if self.filter.get() != 2:
img_array = adjust_levels(img_array, self.min_lvl_scale.get(), self.gamma.get(), self.max_lvl_scale.get())
elif self.hold_levels.get() == True:
pass #Don't reset values if hold levels button is on
else:
self.min_lvl_scale.set(0)
self.max_lvl_scale.set(255)
self.gamma_scale.set(0)
self.gamma.set(1)
self.img_data = img_array #For reference, otherwise it doesn't work
temp_image = ImageTk.PhotoImage(img.fromarray(img_array).convert("RGB")) #Prepare for showing
self.imagelabel.configure(image = temp_image)
self.imagelabel.image = temp_image
#Generate timestamp
if self.filter.get() != 7:
self.set_timestamp()
def set_timestamp(self, fps = None):
""" Sets timestamp with given parameters """
if fps == None:
fps = " FFF"
else:
fps = str(fps).zfill(4)
if self.correct_datafile_name(self.current_image):
if self.data_type.get() == 1: #CAMS data type
x = self.current_image.split('_')
timestamp = x[1][0:4]+"-"+x[1][4:6]+"-"+x[1][6:8]+" "+x[2][0:2]+":"+x[2][2:4]+":"+x[2][4:6]+"."+x[3]+" "+fps
else: #Skypatrol data type
img_path = self.dir_path+os.sep+self.current_image
(mode, ino, dev, nlink, uid, gid, size, atime, mtime, ctime) = os.stat(img_path)
timestamp = time.strftime("%Y-%m-%d %H:%M:%S.000", time.gmtime(mtime))+" "+fps
else:
timestamp = "YYYY-MM-DD HH:MM.SS.mms FFF"
self.timestamp_label.config(text = timestamp) #Change the timestamp label
def wxDirchoose(self, initialdir, title, _selectedDir = '.'):
""" Opens a dialog for choosing a directory. """
_userCancel = ''
app = wx.App()
dialog = wx.DirDialog(None, title, style=1 ,defaultPath=initialdir, pos = (10,10))
if dialog.ShowModal() == wx.ID_OK:
_selectedDir = dialog.GetPath()
return _selectedDir
else:
dialog.Destroy()
return _userCancel
def askdirectory(self):
"""Returns a selected directoryname."""
self.filter.set(1)
self.stop_video.set(True) #Stop video every image update
try:
self.video_thread.join() #Wait for the video thread to finish
del self.video_thread #Delete video thread
except:
pass
self.status_bar.config(text = "Opening directory...")
old_dir_path = self.dir_path
#Opens the file dialog
self.dir_path = self.wxDirchoose(initialdir = self.dir_path, title = "Open the directory with FF*.bin files, then click OK")
if self.dir_path == '':
self.dir_path = old_dir_path
self.update_listbox(self.get_bin_list()) #Update listbox
self.update_data_type()
self.parent.wm_title("CMN_binViewer: "+self.dir_path) #Update dir label
self.mode.set(1)
self.filter.set(1)
self.change_mode()
self.move_top(0) #Move listbox cursor to the top
self.write_config()
def get_bin_list(self):
""" Get a list of FF*.bin files in a given directory"""
#bin_list = ["a", "b", "c", "d", "e", "f", "g"]
bin_list = [line for line in os.listdir(self.dir_path) if self.correct_datafile_name(line)]
return bin_list
def update_listbox(self, bin_list):
""" Updates the listbox with the current entries"""
self.ff_list.delete(0, END)
for line in bin_list:
self.ff_list.insert(END, line)
def save_image(self, extension, save_as):
""" Saves the current image with given extension and parameters"""
current_image = self.ff_list.get(ACTIVE)
if current_image == '':
tkMessageBox.showerror("Image error", "No image selected! Saving aborted.")
return 0
img_name = current_image+"_"+self.img_name_type+'.'+extension
img_path = self.dir_path+os.sep+img_name
if save_as == True:
img_path = tkFileDialog.asksaveasfilename(initialdir = self.dir_path, parent = self.parent, title = "Save as...", initialfile = img_name, defaultextension = "."+extension)
if img_path == '':
return 0
saveImage(self.img_data, img_path, self.print_name_status.get())
self.status_bar.config(text = "Image saved: "+img_name)
def copy_bin_to_sorted(self, event):
""" Copies the current image FF*.bin file to the given directory"""
if self.current_image == '':
return 0
if (os.sep in self.sort_folder_path.get()) or ('/' in self.sort_folder_path.get()):
sorted_dir = self.sort_folder_path.get()
else:
sorted_dir = self.dir_path+os.sep+self.sort_folder_path.get()
try:
mkdir_p(sorted_dir)
except:
tkMessageBox.showerror("Path error", "The path does not exist or it is a root directory (e.g. C:\\): "+sorted_dir)
return 0
try:
copy2(self.dir_path+os.sep+self.current_image, sorted_dir+os.sep+self.current_image) #Copy the file
except:
tkMessageBox.showerror("Copy error", "Could not copy file: "+self.current_image)
return 0
self.status_bar.config(text = "Copied: "+self.current_image) #Change the status bar
def open_current_folder(self, event):
"""Opens current directory in windows explorer"""
sorted_directory = self.dir_path+os.sep+self.sort_folder_path.get()
try:
os.startfile(sorted_directory)
except:
try:
os.startfile(self.dir_path)
except:
tkMessageBox.showerror("Path not found", "Sorted folder is not created!")
return 1
return 0
def make_master_dark(self):
""" Makes the master dark frame"""
self.status_bar.config(text = "Making master dark frame, please wait...")
dark_dir = self.wxDirchoose(initialdir = self.dir_path, title = "Open the directory with dark frames, then click OK")
if dark_dir == '':
self.status_bar.config(text = "Master dark frame making aborted!")
return 0
dark_file = tkFileDialog.asksaveasfilename(initialdir = dark_dir, parent = self.parent, title = "Choose the master dark file name", initialfile = "dark.bmp", defaultextension = ".bmp", filetypes = [('BMP files', '.bmp')])
if dark_file == '':
self.status_bar.config(text = "Master dark frame making aborted!")
return 0
dark_dir = dark_dir.replace("/", os.sep)
dark_file = dark_file.replace("/", os.sep)
if (dark_file != '') and (dark_dir!=''):
if make_flat_frame(dark_dir, dark_file, col_corrected = False, dark_frame = False) == False:
tkMessageBox.showerror("Master dark frame", "The folder is empty!")
self.status_bar.config(text = "Master dark frame failed!")
return 0
else:
self.status_bar.config(text = "Files for master dark not chosen!")
self.status_bar.config(text = "Master dark frame done!")
tkMessageBox.showinfo("Master dark frame", "Master dark frame done!")
def make_master_flat(self):
""" Make master flat frame. A Directory which contains flat frames is chosen, file where flat frame will be saved, and an optional dark frame"""
self.status_bar.config(text = "Making master flat frame, please wait...")
flat_dir = self.wxDirchoose(initialdir = self.dir_path, title = "Open the directory with flat frames, then click OK")
if flat_dir == '':
self.status_bar.config(text = "Master flat frame making aborted!")
return 0
flat_file = tkFileDialog.asksaveasfilename(initialdir = flat_dir, parent = self.parent, title = "Choose the master flat file name", initialfile = "flat.bmp", defaultextension = ".bmp", filetypes = [('BMP files', '.bmp')])
if flat_file == '':
self.status_bar.config(text = "Master flat frame making aborted!")
return 0
flat_dir = flat_dir.replace("/", os.sep)
flat_file = flat_file.replace("/", os.sep)
dark_file = tkFileDialog.askopenfilename(initialdir = flat_dir, parent = self.parent, title = "OPTIONAL: Choose dark frame, if any. Click cancel for no dark frame.", initialfile = "dark.bmp", defaultextension = ".bmp", filetypes = [('BMP files', '.bmp')])
if dark_file != '':
dark_frame = load_dark(dark_file)
else:
dark_frame = False
if make_flat_frame(flat_dir, flat_file, col_corrected = False, dark_frame = dark_frame) == False:
tkMessageBox.showerror("Master flat frame", "The folder is empty!")
self.status_bar.config(text = "Master flat frame failed!")
return 0
self.status_bar.config(text = "Master flat frame done!")
tkMessageBox.showinfo("Master flat frame", "Master flat frame done!")
def make_gif(self):
""" Makes a GIF animation file with given options"""
current_image = self.current_image
if current_image == '':
tkMessageBox.showerror("Image error", "No image selected! Saving aborted.")
return 0
dark_frame = None
flat_frame = None
flat_frame_scalar = None
if self.dark_status.get() == True:
dark_path = self.dir_path+os.sep+self.dark_name.get()
try:
dark_frame = load_dark(dark_path)
except:
pass
if self.flat_status.get() == True:
flat_path = self.dir_path+os.sep+self.flat_name.get()
try:
flat_frame, flat_frame_scalar = load_flat(flat_path)
except:
pass
self.status_bar.config(text ="Making GIF, please wait... It can take up to 15 or more seconds, depending on the size and options")
gif_name = current_image.split('.')[0]+"fr_"+str(self.start_frame.get())+"-"+str(self.end_frame.get())+".gif"
gif_path = tkFileDialog.asksaveasfilename(initialdir = self.dir_path, parent = self.parent, title = "Save GIF animation", initialfile = gif_name, defaultextension = ".gif").replace("/", os.sep)
#gif_path = (os.sep).join(gif_path.split(os.sep)[:-2])
if gif_path == '': #Abort GIF making if no file is chosen
return 0
repeat_temp = self.repeat.get() #Get the repeat variable (the animation will loop if True)
if (repeat_temp == 0) or (repeat_temp == False):
repeat_temp = False
else:
repeat_temp = True
#Adjust levels
minv_temp = self.min_lvl_scale.get()
gamma_temp = self.gamma.get()
maxv_temp = self.max_lvl_scale.get()
makeGIF(FF_input = current_image, start_frame = self.start_frame.get(), end_frame = self.end_frame.get(), ff_dir=self.dir_path, deinterlace = self.deinterlace.get(), print_name = self.gif_embed.get(), Flat_frame = flat_frame, Flat_frame_scalar = flat_frame_scalar, dark_frame = dark_frame, gif_name_parse = gif_path, repeat = repeat_temp, fps = self.fps.get(), minv = minv_temp, gamma = gamma_temp, maxv = maxv_temp, perfield = self.perfield_var.get())
self.status_bar.config(text ="GIF done!")
tkMessageBox.showinfo("GIF progress", "GIF saved!")
self.write_config() #Write FPS to config file
def get_detected_list(self, minimum_frames = 0):
""" Gets a list of FF_bin files from the FTPdetectinfo with a list of frames. Used for composing the image while in DETECT mode
minimum_frames: the smallest number of detections for showing the meteor"""
minimum_frames = int(self.minimum_frames.get())
def get_frames(frame_list):
"""Gets frames for given FF*.bin file in FTPdetectinfo"""
if len(frame_list)<minimum_frames*2: #Times 2 because len(frames) actually contains every half-frame also
ff_bin_list.pop()
return None
min_frame = int(float(frame_list[0]))
max_frame = int(float(frame_list[-1]))
ff_bin_list[-1].append((min_frame, max_frame))
def convert2str(ff_bin_list):
""" Converts list format: [['FF*.bin', (start_frame, end_frame)], ... ] to string format ['FF*.bin Fr start_frame - end_frame'] """
str_ff_bin_list = []
for line in ff_bin_list:
str_ff_bin_list.append(line[0]+" Fr "+str(line[1][0]).zfill(3)+" - "+str(line[1][1]).zfill(3))
return str_ff_bin_list
ftpdetect_file = [line for line in os.listdir(self.dir_path) if ("FTPdetectinfo_" in line) and (".txt" in line) and (not "original" in line) and (len(line) == 33)]
if len(ftpdetect_file) == 0:
tkMessageBox.showerror("FTPdetectinfo error", "FTPdetectinfo file not found!")
return False
ftpdetect_file = ftpdetect_file[0]
try:
FTPdetect_file_content = open(self.dir_path+os.sep+ftpdetect_file).readlines()
except:
tkMessageBox.showerror("File error", "Could not open file: "+ftpdetect_file)
return False
if int(FTPdetect_file_content[0].split('=')[1]) == 0: #Solving issue when no meteors are in the file
return []
ff_bin_list = []
skip = 0
frame_list = []
for line in FTPdetect_file_content[12:]:
#print line
if ("-------------------------------------------------------" in line):
get_frames(frame_list)
if skip>0:
skip -= 1
continue
line = line.replace('\n', '')
if ("FF in line") and (".bin" in line):
ff_bin_list.append([line.strip()])
skip = 2
del frame_list
frame_list = []
continue
frame_list.append(line.split()[0])
get_frames(frame_list) #Writing the last FF bin file frames in a list
return ff_bin_list, convert2str(ff_bin_list) #Converts list to a list of strings
def get_logsort_list(self, logsort_name = "LOG_SORT.INF", minimum_frames = 0):
""" Gets a list of BMP files from the LOG_SORT.INF with a list of frames. Used for composing the image while in DETECT mode
minimum_frames: the smallest number of detections for showing the meteor"""
minimum_frames = int(self.minimum_frames.get())
def get_frames(frame_list):
"""Gets frames for given BMP file in LOGSORT"""
if len(frame_list)<minimum_frames*2: #Times 2 because len(frames) actually contains every half-frame also
image_list.pop()
return None
min_frame = int(float(frame_list[0]))
max_frame = int(float(frame_list[-1]))
image_list[-1].append((min_frame, max_frame))
def convert2str(ff_bin_list):
""" Converts list format: [['FF*.bin', (start_frame, end_frame)], ... ] to string format ['FF*.bin Fr start_frame - end_frame'] """
str_ff_bin_list = []
for line in ff_bin_list:
str_ff_bin_list.append(line[0]+" Fr "+str(line[1][0]).zfill(4)+" - "+str(line[1][1]).zfill(4))
return str_ff_bin_list
logsort_path = self.dir_path+os.sep+logsort_name
if not os.path.isfile(logsort_path):
tkMessageBox.showerror("LOG_SORT.INF error", "LOG_SORT.INF file not found!")
try:
logsort_contents = open(logsort_path).readlines()
except:
tkMessageBox.showerror("File error", "Could not open file: "+logsort_path)
return False
if logsort_contents[5] == '999': #Return empty list if logsort is empty
return []
image_list = []
frame_list = []
met_no = 0
first = True
for line in logsort_contents[5:]:
if line == '999':
break
line = line.split()
img_name = line[4].split('_')[1]+'.bmp'
met_no = int(line[0])
if not img_name in [image[0] for image in image_list] or old_met != met_no:
if first != True:
get_frames(frame_list)
else:
first = False
image_list.append([img_name])
old_met = met_no
del frame_list
frame_list = []
continue
frame_list.append(line[1])
get_frames(frame_list)
return image_list, convert2str(image_list)
def update_scales(self, value):
""" Updates the size of levels scales, to make the appearence that there are 2 sliders on one scale """
size_var = 0.8
min_value = self.min_lvl_scale.get()
max_value = self.max_lvl_scale.get()
middle = (min_value+max_value)/2
min_size = middle * size_var
max_size = (255 - middle) * size_var
self.min_lvl_scale.config(from_ = 0, to = middle - 1, length = min_size)
self.max_lvl_scale.config(from_ = middle +1, to = 255, length = max_size)
self.gamma.set(1/10**(self.gamma_scale.get()))
self.gamma_scale.config(label = "Gamma: "+"{0:.2f}".format(round(self.gamma.get(), 2)))
self.update_image(0, update_levels = True)
def change_mode(self):
""" Changes the current mode"""
if self.mode.get()==1: #Captured mode
#Enable all filters
self.maxpixel_btn.config(state = NORMAL)
self.colored_btn.config(state = NORMAL)
self.avgpixel_btn.config(state = NORMAL)
self.odd_btn.config(state = NORMAL)
self.even_btn.config(state = NORMAL)
self.min_frames_entry.config(state = DISABLED) #Disable the entry of minimum frame number
self.filter.set(1) #Set filter to maxframe
old_image = self.current_image #Preserve the image position
temp_bin_list = self.get_bin_list()
self.update_listbox(temp_bin_list) #Update listbox
if old_image in temp_bin_list:
temp_index = temp_bin_list.index(old_image)
self.move_index(temp_index) #Move to old image position
else:
self.move_top(0) #Move listbox cursor to the top
self.start_frame.set(0)
if self.data_type.get() == 1: #CAMS data type
self.end_frame.set(255)
self.frame_scale.config(to = 255)
else: #Skypatrol data type
self.end_frame.set(1500)
self.frame_scale.config(to = 1500)
elif self.mode.get() == 2: #Detected mode
if self.data_type.get() == 1: #CAMS data type
detected_list = self.get_detected_list() #Get a list of FF*.bin files from FTPdetectinfo
else: #Skypatrol data type
detected_list = self.get_logsort_list()
if detected_list == False:
self.mode.set(1)
return 0
elif detected_list[0] == []:
tkMessageBox.showinfo("FTPdetectinfo info", "No detections in the FTPdetectinfo file!")
self.mode.set(1)
return 0
self.min_frames_entry.config(state = NORMAL) #Enable the entry of minimum frame number
ff_bin_list, str_ff_bin_list = detected_list
self.detection_dict = dict(zip(str_ff_bin_list, ff_bin_list))
if not self.filter.get() == 10: #Dont change if video filter was set
self.filter.set(3) #Set filter to Detection only
#Disable all other filters
self.maxpixel_btn.config(state = DISABLED)
self.colored_btn.config(state = DISABLED)
self.avgpixel_btn.config(state = DISABLED)
self.odd_btn.config(state = DISABLED)
self.even_btn.config(state = DISABLED)
old_image = self.current_image #Get old image name
self.update_listbox(str_ff_bin_list)
try:
temp_index = str_ff_bin_list.index([bin for bin in str_ff_bin_list if old_image in bin][0])
self.move_index(temp_index) #Move to old image position
except:
self.move_top(0) #Move listbox cursor to the top
def show_about(self):
tkMessageBox.showinfo("About",
"""CMN_binViewer version: """+str(version)+"""\n
Croatian Meteor Network\n
http://cmn.rgn.hr/\n
Copyright © 2014 Denis Vida
E-mail: [email protected]\n
Reading FF*.bin files: based on Matlab scripts by Peter S. Gural
images2gif: Copyright © 2012, Almar Klein, Ant1, Marius van Voorden
gifsicle: Copyright © 1997-2013 Eddie Kohler""")
def show_key_bindings(self):
tkMessageBox.showinfo("Key bindings",
"""Key Bindings:
Changing images:
- Arrow Down - move down by one image
- Arrow Up - move up by one image
- Home - jump to first image
- End - jump to last image
Changing mode:
- Page Up - captured mode
- Page Down - detected mode
Changing filters:
- Arrow Right - move right by one filter
- Arrow Left - move left by one filter
- F1 - maxframe
- F2 - colorized
- F3 - detection only
- F4 - avgframe
- F5 - odd filter set
- F6 - even filter set and toggle with odd frame
- F7 - show individual frames (use slider)
- F9 - show video
Sorting files:
- Enter - copy FF*.bin to sorted folder
Other:
- Delete - toggle Deinterlace
- Insert - toggle Hold levels""")
def onExit(self):
self.quit()
def initUI(self):
""" Initialize GUI elements"""
self.parent.title("CMN_binViewer")
#Configure the style of each element
s = Style()
s.configure("TButton", padding=(0, 5, 0, 5), font='serif 10', background = global_bg)
s.configure('TLabelframe.Label', foreground =global_fg, background=global_bg)
s.configure('TLabelframe', foreground =global_fg, background=global_bg, padding=(3, 3, 3, 3))
s.configure("TRadiobutton", foreground = global_fg, background = global_bg)
s.configure("TLabel", foreground = global_fg, background = global_bg)
s.configure("TCheckbutton", foreground = global_fg, background = global_bg)
s.configure("Vertical.TScrollbar", background=global_bg, troughcolor = global_bg)
#s.configure('TScale', sliderthickness = 1)
self.columnconfigure(0, pad=3)
self.columnconfigure(1, pad=3)
self.columnconfigure(2, pad=3)
self.columnconfigure(3, pad=3)
self.columnconfigure(4, pad=3)
self.columnconfigure(5, pad=3)
self.columnconfigure(6, pad=3)
self.columnconfigure(7, pad=3)
self.columnconfigure(8, pad=3)
self.rowconfigure(0, pad=3)
self.rowconfigure(1, pad=3)
self.rowconfigure(2, pad=3)
self.rowconfigure(3, pad=3)
self.rowconfigure(4, pad=3)
self.rowconfigure(5, pad=3)
self.rowconfigure(6, pad=3)
self.rowconfigure(7, pad=3)
self.rowconfigure(8, pad=3)
self.rowconfigure(9, pad=3)
self.rowconfigure(10, pad=3)
#Make menu
self.menuBar = Menu(self.parent)
self.parent.config(menu=self.menuBar)
#File menu
fileMenu = Menu(self.menuBar, tearoff=0)
fileMenu.add_command(label = "Open FF*.bin folder", command = self.askdirectory)
fileMenu.add_separator()
fileMenu.add_command(label="Exit", underline=0, command=self.onExit)
self.menuBar.add_cascade(label="File", underline=0, menu=fileMenu)
#Data type menu
datatypeMenu = Menu(self.menuBar, tearoff = 0)
datatypeMenu.add_checkbutton(label = "Auto", onvalue = 0, variable = self.data_type_var, command = self.update_data_type)
datatypeMenu.add_separator()
datatypeMenu.add_checkbutton(label = "CAMS", onvalue = 1, variable = self.data_type_var, command = self.update_data_type)
datatypeMenu.add_checkbutton(label = "Skypatrol", onvalue = 2, variable = self.data_type_var, command = self.update_data_type)
self.menuBar.add_cascade(label = "Data type", underline = 0, menu = datatypeMenu)
#Process Menu
processMenu = Menu(self.menuBar, tearoff=0)
processMenu.add_command(label = "Make master dark frame", command = self.make_master_dark)
processMenu.add_command(label = "Make master flat frame", command = self.make_master_flat)
self.menuBar.add_cascade(label="Process", underline=0, menu=processMenu)
#Layout menu
layoutMenu = Menu(self.menuBar, tearoff = 0)
layoutMenu.add_checkbutton(label = "Vertical layout", onvalue = True, offvalue = False, variable = self.layout_vertical, command = self.update_layout)
layoutMenu.add_checkbutton(label = "Horizontal layout", onvalue = False, offvalue = True, variable = self.layout_vertical, command = self.update_layout)
self.menuBar.add_cascade(label = "Layout", menu = layoutMenu)
#Window menu
self.windowMenu = Menu(self.menuBar, tearoff = 0)
self.windowMenu.add_checkbutton(label = "Save image", onvalue = True, offvalue = False, variable = self.save_image_frame, command = self.update_layout)
self.windowMenu.add_checkbutton(label = "Image levels", onvalue = True, offvalue = False, variable = self.image_levels_frame, command = self.update_layout)
self.windowMenu.add_checkbutton(label = "Save animation", onvalue = True, offvalue = False, variable = self.save_animation_frame, command = self.update_layout)
self.menuBar.add_cascade(label = "Window", menu = self.windowMenu)
#Help Menu
helpMenu = Menu(self.menuBar, tearoff=0)
helpMenu.add_command(label = "Key bindings", command = self.show_key_bindings)
helpMenu.add_command(label = "About", command = self.show_about)
self.menuBar.add_cascade(label = "Help", underline=0, menu=helpMenu)
#Panel for mode
mode_panel = LabelFrame(self, text=' Mode ')
mode_panel.grid(row = 1, columnspan = 2, sticky='WE')
captured_btn = Radiobutton(mode_panel, text="Captured", variable = self.mode, value = 1, command = self.change_mode)
self.mode.set(1)
detected_btn = Radiobutton(mode_panel, text="Detected", variable = self.mode, value = 2, command = self.change_mode)
captured_btn.grid(row = 2, column = 0, padx=5, pady=2)
detected_btn.grid(row = 2, column = 1, padx=5, pady=2)
min_frames_label = Label(mode_panel, text = "Min. frames (0 - 255): ")
min_frames_label.grid(row = 3, column = 0)
self.min_frames_entry = ConstrainedEntry(mode_panel, textvariable = self.minimum_frames, width = 5)
self.min_frames_entry.grid(row = 3, column = 1, sticky = "W")
self.min_frames_entry.config(state = DISABLED)
#Calibration & image features
calib_panel = LabelFrame(self, text=' Calibration & image features ')
calib_panel.grid(row = 3, column = 0, columnspan = 2, rowspan = 1, sticky = "NWE")
self.dark_chk = Checkbutton(calib_panel, text = "Dark frame", variable = self.dark_status, command = lambda: self.update_image(0))
self.dark_chk.grid(row = 4, column = 0, sticky = "W")
dark_entry = StyledEntry(calib_panel, textvariable = self.dark_name, width = 25)
dark_entry.grid(row = 4, column = 1, sticky = "W")
dark_button = StyledButton(calib_panel, text = "Open", command = self.open_dark_path, width = 5)
dark_button.grid(row =4, column = 2, sticky ="W")
self.flat_chk = Checkbutton(calib_panel, text = "Flat frame", variable = self.flat_status, command = lambda: self.update_image(0))
self.flat_chk.grid(row = 5, column = 0, sticky = "W")
flat_entry = StyledEntry(calib_panel, textvariable = self.flat_name, width = 25)
flat_entry.grid(row = 5, column = 1, sticky = "W")
flat_button = StyledButton(calib_panel, text = "Open", command = self.open_flat_path, width = 5)
flat_button.grid(row = 5, column = 2, sticky ="W")
self.deinterlace_chk = Checkbutton(calib_panel, text = "Deinterlace", variable = self.deinterlace, command = lambda: self.update_image(0))
self.deinterlace_chk.grid(row = 6, column = 0, sticky = "W")
self.hold_levels_chk = Checkbutton(calib_panel, text = 'Hold levels', variable = self.hold_levels)
self.hold_levels_chk.grid(row = 7, column = 0, sticky = "W")
#Listbox
scrollbar = Scrollbar(self)
scrollbar.grid(row = 4, column = 2, rowspan = 7, sticky = "NS")
self.ff_list = Listbox(self, width = 47, yscrollcommand=scrollbar.set, exportselection=0, activestyle = "none", bg = global_bg, fg = global_fg)
self.ff_list.grid(row = 4, column = 0, rowspan = 7, columnspan = 2, sticky = "NS")
self.ff_list.bind('<<ListboxSelect>>', self.update_image)
scrollbar.config(command = self.ff_list.yview)
#Filters panel
filter_panel = LabelFrame(self, text=' Filters ')
filter_panel.grid(row = 1, column = 3, sticky = "W", padx=5, pady=5, ipadx=5, ipady=5, columnspan = 2)
self.maxpixel_btn = Radiobutton(filter_panel, text = "Maxpixel", variable = self.filter, value = 1, command = lambda: self.update_image(0))
self.maxpixel_btn.grid(row = 2, column = 3)
self.filter.set(1)
self.colored_btn = Radiobutton(filter_panel, text = "Colorized", variable = self.filter, value = 2, command = lambda: self.update_image(0))
self.colored_btn.grid(row = 2, column = 4)
self.detection_btn = Radiobutton(filter_panel, text = "Detection", variable = self.filter, value = 3, command = lambda: self.update_image(0))
self.detection_btn.grid(row = 2, column = 5)
self.avgpixel_btn = Radiobutton(filter_panel, text = "Avgpixel", variable = self.filter, value = 4, command = lambda: self.update_image(0))
self.avgpixel_btn.grid(row = 2, column = 6)
self.odd_btn = Radiobutton(filter_panel, text = "Odd", variable = self.filter, value = 5, command = lambda: self.update_image(0))
self.odd_btn.grid(row = 2, column = 7)
self.even_btn = Radiobutton(filter_panel, text = "Even", variable = self.filter, value = 6, command = lambda: self.update_image(0))
self.even_btn.grid(row = 2, column = 8)
#Frames
self.frames_btn = Radiobutton(filter_panel, text = "Frames", variable = self.filter, value = 7, command = lambda: self.update_image(0))
self.frames_btn.grid(row = 2, column = 9)
#Video
self.video_btn = Radiobutton(filter_panel, text = "VIDEO", variable = self.filter, value = 10, command = lambda: self.update_image(0))
self.video_btn.grid(row = 2, column = 10)
#Sort panel
sort_panel = LabelFrame(self, text=' Sort FF*.bins ')
sort_panel.grid(row = 1, column = 5, sticky = "W", padx=2, pady=5, ipadx=5, ipady=5)
sort_folder_label = Label(sort_panel, text = "Folder:")
sort_folder_label.grid(row = 2, column = 4, sticky = "W")
sort_folder_entry = StyledEntry(sort_panel, textvariable = self.sort_folder_path, width = 15)
sort_folder_entry.grid(row = 3, column = 4)
#previous_button = StyledButton(sort_panel, text ="<", width=3, command = lambda: self.move_img_up(0))
#previous_button.grid(row = 2, column = 6, rowspan = 2)
copy_button = StyledButton(sort_panel, text ="Copy", width=5, command = lambda: self.copy_bin_to_sorted(0))
copy_button.grid(row = 2, column = 7, rowspan = 2)
open_button = StyledButton(sort_panel, text ="Show folder", command = lambda: self.open_current_folder(0))
open_button.grid(row = 2, column = 8, rowspan = 2)
#next_button = StyledButton(sort_panel, text =">", width=3, command = lambda: self.move_img_down(0))
#next_button.grid(row = 2, column = 9, rowspan = 2)
#Image
try: #Show the TV test card image on open
noimage_data = open('noimage.bin', 'rb').read()
noimage = PhotoImage(data = noimage_data)
except:
noimage = None
self.imagelabel = Label(self, image = noimage)
self.imagelabel.image = noimage
self.imagelabel.grid(row=3, column=3, rowspan = 4, columnspan = 3)
#Timestamp label
self.timestamp_label = Label(self, text = "YYYY-MM-DD HH:MM.SS.mms FFF", font=("Courier", 12))
self.timestamp_label.grid(row = 7, column = 5, sticky = "E")
#self.timestamp_label.grid(row = 2, column = 3, sticky = "WNS")
#Save buttons
self.save_panel = LabelFrame(self, text=' Save image ') #Position set in update layout
save_label = Label(self.save_panel, text = "Save")
save_label.grid(row = 9, column = 3, sticky = "W")
save_label = Label(self.save_panel, text = "Save as...")
save_label.grid(row = 10, column = 3, sticky = "W")
save_bmp = StyledButton(self.save_panel, text="BMP", width = 5, command = lambda: self.save_image(extension = 'bmp', save_as = False))
save_bmp.grid(row = 9, column = 4)
save_jpg = StyledButton(self.save_panel, text="JPG", width = 5, command = lambda: self.save_image(extension = 'jpg', save_as = False))
save_jpg.grid(row = 9, column = 5)
save_as_bmp = StyledButton(self.save_panel, text="BMP", width = 5, command = lambda: self.save_image(extension = 'bmp', save_as = True))
save_as_bmp.grid(row = 10, column = 4)
save_as_jpg = StyledButton(self.save_panel, text="JPG", width = 5, command = lambda: self.save_image(extension = 'jpg', save_as = True))
save_as_jpg.grid(row = 10, column = 5)
self.print_name_btn = Checkbutton(self.save_panel, text = "Embed name", variable = self.print_name_status) #Position set in update_label
#Levels
self.levels_label = LabelFrame(self, text =" Image levels ") #position set in update_layout
self.min_lvl_scale = Scale(self.levels_label, orient = "horizontal", width = 12, borderwidth = 0, background = global_bg, foreground = global_fg, highlightthickness = 0, sliderlength = 10, resolution = 2)
self.min_lvl_scale.grid(row = 9, column = 4, sticky = "W")
self.max_lvl_scale = Scale(self.levels_label, orient = "horizontal", width = 12, borderwidth = 0, background = global_bg, foreground = global_fg, highlightthickness = 0, to = 255, sliderlength = 10, resolution = 2)
self.max_lvl_scale.grid(row = 9, column = 5, sticky = "W")
self.gamma_scale = Scale(self.levels_label, orient = "horizontal", width = 12, borderwidth = 0, background = global_bg, foreground = global_fg, highlightthickness = 0, sliderlength = 20, from_ = -1.0, to = 1.0, resolution = 0.01, length = 100, showvalue = 0)
self.gamma_scale.grid(row = 10, column = 4, columnspan = 2, sticky ="WE")
self.gamma_scale.set(0)
self.min_lvl_scale.set(0)
self.min_lvl_scale.config(command = self.update_scales)
self.max_lvl_scale.set(255)
self.max_lvl_scale.config(command = self.update_scales)
self.gamma_scale.config(command = self.update_scales)
self.hold_levels_chk_horizontal = Checkbutton(self.levels_label, text = 'Hold levels', variable = self.hold_levels) #Position set in update_layout
#Animation
self.animation_panel = LabelFrame(self, text=' Save animation ') #Position set in update_layout
start_frame_label = Label(self.animation_panel, text = "Start Frame: ")
start_frame_label.grid(row = 9, column = 4, sticky = "W")
start_frame_entry = ConstrainedEntry(self.animation_panel, textvariable = self.start_frame, width = 5)
start_frame_entry.grid(row = 9, column = 5)
end_frame_label = Label(self.animation_panel, text = "End Frame: ")
end_frame_label.grid(row = 10, column = 4, sticky = "W")
end_frame_entry = ConstrainedEntry(self.animation_panel, textvariable = self.end_frame, width = 5)
end_frame_entry.grid(row = 10, column = 5)
fps_label = Label(self.animation_panel, text ="FPS: ")
fps_label.grid(row = 11, column = 4, rowspan = 2, sticky = "WE")
fps_entry = ConstrainedEntry(self.animation_panel, textvariable = self.fps, width = 4)
fps_entry.grid(row = 11, column = 5, rowspan = 2, sticky = "WE")
gif_embed_btn = Checkbutton(self.animation_panel, text = "Embed name", variable = self.gif_embed)
gif_embed_btn.grid(row = 9, column = 6, sticky = "W")
repeatbtn = Checkbutton(self.animation_panel, text = "Repeat", variable = self.repeat)
repeatbtn.grid(row = 10, column = 6, sticky = "W")
perfield_btn = Checkbutton(self.animation_panel, text = "Per field", variable = self.perfield_var)
perfield_btn.grid(row = 11, column = 6, sticky = "W")
self.gif_make_btn = StyledButton(self.animation_panel, text ="GIF", command = self.make_gif, width = 10) #Position set in update_layout
#Frame slider
self.frames_slider_panel = LabelFrame(self, text=' Frame ') #Position set in update_layout
self.frame_scale = Scale(self.frames_slider_panel, orient = "horizontal", width = 12, borderwidth = 0, background = global_bg, foreground = global_fg, highlightthickness = 0, sliderlength = 20, from_ = 0, to = 255, resolution = 1, length = 100)
self.frame_scale.grid(row = 1, column = 1, columnspan = 4, sticky ="WE")
self.frame_scale.config(command = self.update_image)
frame_start_frame_label = Label(self.frames_slider_panel, text = "Start Frame: ")
frame_start_frame_label.grid(row = 2, column = 1, sticky = "W")
frame_start_frame_entry = ConstrainedEntry(self.frames_slider_panel, textvariable = self.start_frame, width = 5)
frame_start_frame_entry.grid(row = 2, column = 2)
frame_end_frame_label = Label(self.frames_slider_panel, text = "End Frame: ")
frame_end_frame_label.grid(row = 2, column = 3, sticky = "W")
frame_end_frame_entry = ConstrainedEntry(self.frames_slider_panel, textvariable = self.end_frame, width = 5)
frame_end_frame_entry.grid(row = 2, column = 4)
#Status bar
self.status_bar = Label(self, text="Start", relief="sunken", anchor="w")
self.status_bar.grid(row = 11, column = 0, columnspan = 15, sticky = "WE")
self.update_layout()
if __name__ == '__main__':
root = Tk()
try:
root.iconbitmap(r'.'+os.sep+'icon.ico')
except:
pass
app = binViewer(root)
root.mainloop() | bsd-3-clause | -6,645,203,243,643,184,000 | 39.4375 | 460 | 0.580718 | false |
Delosari/dazer | bin/lib/Math_Libraries/linfit_script.py | 1 | 13123 | from numpy import asarray, array, sqrt
from uncertainties import unumpy, ufloat
def linfit(x_true, y, sigmay=None, relsigma=True, cov=False, chisq=False, residuals=False):
"""
Least squares linear fit.
Fit a straight line `f(x_true) = a + bx` to points `(x_true, y)`. Returns
coefficients `a` and `b` that minimize the squared error.
Parameters
----------
x_true : array_like
one dimensional array of `x_true` data with `n`>2 data points.
y : array_like
one dimensional array of `y` data with `n`>2 data points.
sigmay : NoneType or float or array_like, optional
one dimensional array of uncertainties (errors) in `y` data or a single
positive number if all uncertainties are the same. `sigmay` determines
the weighting in the least squares minimization. Leaving `sigmay=None`
uses no weighting and is equivalent to `sigmay=1`.
relsigma : bool, optional
If `relsigma` is True, the residuals are used to scale the covariance
matrix. Use this option if you do not know the absolute uncertainties
(`sigmay`) in the data but still want a covariance matrix whose entries
give meaningful estimates of the uncertainties in the fitting parameters
`a` and `b` (from `f = a + bx`). If `relsigma` is False, the covariance
matrix is calculated (provided `cov` = True) using sigmay assuming
sigmay represents absolute undertainties.
cov : bool, optional
If True, calculate and return the 2x2 covarience matrix of the fitting
parameters.
chisq : bool, optional
If True, calculate and return redchisq.
residuals : bool, optional
If True, calculate and return residuals.
Returns
-------
fit : array([a,b]) ndarray of floats
The best fit model parameters `a` (the slope) and `b` (the
`y`-intercept) for the input data arrays `x_true` and `y`
cvm : array, shape (2,2) : returned only if cov=True
Covarience matrix of the fitting parameters. Diagonal elements are
estimated variances of the fitting parameters a and b; square roots of
the diagonal elements thus provide estimates of the uncertainties in the
fitting parameters `a` and `b`. Off diagonal elements (equal to each
other) are the covarience between the fitting parameters `a` and `b`.
redchisq : float : returned only if chisq=True
Reduced chi-squared goodness of fit parameter.
residuals : ndarray of floats : returned only if residuals=True
Length n array of the differences `y-(ax+b)` between `y`-data and the
fitted data `ax + b`.
Raises
------
TypeError : if `x_true` and `y` have different lengths
TypeError : If `x_true` and `y` have 2 or fewer elements
TypeError : If `sigmay` length is not 1 or the same as `y`
See Also
--------
polyfit : Least squares fit to polynomial.
linalg.lstsq : Least-squares solution to a linear matrix equation.
Notes
-----
By default, ``linfit`` returns optimal fitting parameters `a` and `b` without
weighting of the data. In that case, linfit minimizes the squared error
.. math ::
E = \\sum_{i=0}^n [y_i - (a x_i + b)]^2
If `sigmay` is set equal to the uncertainties in the `y` data points, then
linfit minimizes the `chi-squared` sum
.. math ::
\chi^2 = \\sum_{i=0}^n \\left[ \\frac{y_i-(a x_i + b)}{\\sigma_i} \\right]^2
where :math:`\sigma_i` is given by `sigmay`, the "error" or standard
deviation of :math:`y_i`. `sigmay` can be either a single number that gives the
uncertainty for all elements of `y`, or it can be an array of the same
length as `y` that gives the "error" for each element of `y`.
`redchisq` is :math:`\chi^2/(n-2)` where :math:`n` is the number of data
points (the length of `x_true` or `y`).
If `relsigma` is False, then the uncertainties `sigmay` in `y` are
assumed to be the absolute one-standard-deviation uncertainties in `y`.
In this case, the reduced chi-squared value :math:`\chi^2/(n-2)` provides a
measure of the goodness of the fit. If it is near 1, then the linear
fitting model is considered to be good and the values of the covariance
matrix are appropriately scaled. In particular, the square root of the
diagonal elements of the covariance matrix give the estimated uncertainty
in the fitting parameters `a` and `b`. See Refernece [2] below for more
information.
If `relsigma` is True, then the uncertainties `sigmay` in `y` are
considered to be only relative uncertainties. They are used to weight
the data for the fit, but in this case, the covariance matrix is rescaled
using the residuals between the fit and the data. In this case, the reduced
chi-squared value :math:`\chi^2/(n-2)` does not provide a measure of the
goodness of the fit. Nevertheless, the diagonal elements of the rescaled
covariance matrix (returned by linfit) give the estimated uncertainty in the
fitting parameters `a` and `b`.
The covariance matrix is a 2x2 symmetric matrix where the diagonal elements
are the variance of the fitting parameters. Their square roots provide
estimates of the uncertainties in the fitting parameters. The off-diagonal
elements are equal and give the cross correlation between the two fitting
parameters `a` and `b`.
linfit runs faster, by a factor of 2 to 3, if calculation of the residuals
is suppressed letting `cov`, `chisq`, and `residuals` remain False (the
default setting).
Fitting a straight line to a single set of `(x_true, y)` data using ``linfit`` is
typically 2 to 10 times faster than using either ``polyfit`` or
``linalg.lstsq``, especially when weighting is used and for very large data
sets.
References
----------
.. [1] An Introduction to Error Analysis, 2nd Ed. by John R. Taylor
(University Science Books, 1997)
.. [2] Numerical Recipes, The Art of Scientific Computing, 3rd Edition
by W.H. Press, S. A. Teukolsky, W. T. Vetterling, & B. P. Flannery
(Cambridge University Press, 2007)
Examples
--------
Fit a line, `y = ax + b`, through some noisy `(x_true, y)` data-points without
any weighting (`sigmay` = None) to obtain fitting parameters `a` and `b`:
>>> x_true = np.array([0, 1, 2, 3])
>>> y = np.array([-1, 0.2, 0.9, 2.1])
>>> fit = linfit(x_true, y)
>>> print("a = {0:0.2f}, b = {1:0.2f}".format(fit[0], fit[1]))
a = 1.00, b = -0.95
Setting `cov` = True in the input, returns the covariance matrix `cvm`.
When uncertainties `sigmay` are left unspecified, meaningful estimates of
the uncertainties `da` and `db` in the fitting parameters `a` and `b`
are given by the square roots of the diagonals of the covariance matrix
`cvm`, provided `relsigma` = True (the default state).
>>> fit, cvm = linfit(x_true, y, cov=True)
>>> dfit = [np.sqrt(cvm[i,i]) for i in range(2)]
>>> print("da = {0:0.2f}, db = {1:0.2f}".format(dfit[0], dfit[1]))
da = 0.07, db = 0.13
A better practice is to supply estimates of the uncertainties in the
input argument `sigmay`. `sigmay` can be a single float, if the
uncertainties are the same for all data points, or it can be an array, if
the uncertainties for different data points are different. Here we
enter sigmay as an array.
>>> dy = np.array([0.18, 0.13, 0.15, 0.17])
>>> fit, cvm, redchisq, resids = linfit(x_true, y, cov=True, sigmay=dy, relsigma=False, chisq=True, residuals=True)
>>> print("a = {0:0.2f}, b = {1:0.2f}".format(fit[0], fit[1]))
a = 0.98, b = -0.91
>>> dfit = [np.sqrt(cvm[i,i]) for i in range(2)]
>>> print("da = {0:0.2f}, db = {1:0.2f}".format(dfit[0], dfit[1]))
da = 0.08, db = 0.14
>>> print("reduced chi-squared = {0:0.2f}".format(redchisq))
reduced chi-squared = 1.21
>>> print(resids)
[-0.08856653 0.12781099 -0.1558115 0.06056602]
The value of reduced chi-squared `redchisq` is 1.21 indicating that a
linear model is valid for these data. The residuals :math:`y_i - (a+bx_i)`
are given by the output `resids`.
If absolute estimates of the uncertainties are not available, but relative
estimates of the uncertainties are known, a fit can be obtained with
reasonable estimates of the uncertainties in the fitting parameters by
setting `relsigma` = True.
>>> dy = np.array([1.0, 0.75, 0.75, 1.25])
>>> fit, cvm, redchisq = linfit(x_true, y, cov=True, sigmay=dy, relsigma=True, chisq=True)
>>> print("a = {0:0.2f}, b = {1:0.2f}".format(fit[0], fit[1]))
a = 0.97, b = -0.91
>>> dfit = [np.sqrt(cvm[i,i]) for i in range(2)]
>>> print("da = {0:0.2f}, db = {1:0.2f}".format(dfit[0], dfit[1]))
da = 0.09, db = 0.16
>>> print("reduced chi-squared = {0:0.2f}".format(redchisq))
reduced chi-squared = 0.04
In this case, the value `redchisq` is meaningless, because only the
relative, rather than the absolute uncertainties are known. Nevertheless,
by setting `relsigma` = True, reasonable estimates for the uncertainties
in the fitting parameters are obtained.
Illustration:
.. image:: example.png
:scale: 75 %
"""
x_true = asarray(x_true)
y = asarray(y)
if x_true.size != y.size:
raise TypeError('Expected x_true and y to have same length')
if x_true.size <= 2:
raise TypeError('Expected x_true and y length > 2')
if sigmay is None: sigmay = 1.0
sigmay = asarray(sigmay)
if sigmay.size == 1:
sigy = float(sigmay) # convert 0-d array to a float
wt = 1./(sigy*sigy)
s = wt * y.size
sx = wt * x_true.sum()
sy = wt * y.sum()
t = x_true-sx/s
stt = wt * (t*t).sum()
slope = wt * (t*y).sum()/stt
yint = (sy - sx * slope)/s
else:
if sigmay.size != y.size:
raise TypeError('Expected sigmay size to be 1 or same as y')
wt = 1./(sigmay*sigmay)
s = wt.sum()
sx = (x_true*wt).sum()
sy = (y*wt).sum()
t = (x_true-sx/s)/sigmay
stt = (t*t).sum()
slope = (t*y/sigmay).sum()/stt
yint = (sy - sx * slope)/s
returns = array([slope, yint])
if cov is True:
cvm00 = 1./stt
cvm01 = -sx/(s*stt)
cvm11 = (1.0-sx*cvm01)/s
if relsigma is True:
redchisq, resids = _resids(x_true, y, sigmay, slope, yint)
cvm00 *= redchisq
cvm01 *= redchisq
cvm11 *= redchisq
returns = [returns] + [array([[cvm00, cvm01],
[cvm01, cvm11]])]
if residuals or chisq is True:
if relsigma is False:
redchisq, resids = _resids(x_true, y, sigmay, slope, yint)
if type(returns) is not list:
returns = [returns]
if chisq is True:
returns += [redchisq]
if residuals is True:
returns += [resids]
return returns
def _resids(x_true, y, sigmay, slope, yint):
resids = y - (yint + slope*x_true)
redchisq = ((resids/sigmay)**2).sum()/(x_true.size-2)
return redchisq, resids
def LinfitLinearRegression(x_true, y):
if (x_true != None) and (y != None):
if len(x_true) > 2:
x_mag = unumpy.nominal_values(x_true)
y_mag = unumpy.nominal_values(y)
y_err = unumpy.std_devs(y)
Regression_Fit, Uncertainty_Matrix = linfit(x_mag, y_mag, y_err, cov=True, relsigma=False)
m_n_error = [sqrt(Uncertainty_Matrix[t,t]) for t in range(2)]
gradient, gradient_error = Regression_Fit[0], m_n_error[0]
n, n_error = Regression_Fit[1], m_n_error[1]
Gradient_MagErr = ufloat(gradient, gradient_error)
n_MagError = ufloat(n, n_error)
elif len(x_true) == 2:
x_mag = unumpy.nominal_values(x_true)
y_mag = unumpy.nominal_values(y)
m = (y_mag[1] - y_mag[0]) / (x_mag[1] - x_mag[0])
n = y_mag[0] - m * x_mag[0]
Gradient_MagErr = ufloat(m, 1e-4)
n_MagError = ufloat(n, 1e-4)
else:
print 'WARNING: Only one point to do a linear regression'
else:
Gradient_MagErr, n_MagError = None, None
return Gradient_MagErr, n_MagError | mit | -1,549,250,426,603,208,400 | 42.456954 | 119 | 0.590338 | false |
hirunatan/estelcon_web | activities/services.py | 1 | 11340 | from django.core.mail import send_mail, mail_managers
from django.conf import settings
from django.contrib.auth import authenticate
from django.contrib.auth.models import User
from django.db.models import Count
from datetime import datetime, timedelta
from collections import namedtuple
import locale
import math
from .models import Activity
from functools import reduce
Day = namedtuple('Day', ['name', 'blocks'])
Block = namedtuple('Block', ['hour', 'columns'])
Column = namedtuple('Column', ['rowspan', 'colspan', 'activities'])
PendingColumn = namedtuple('PendingColumn', ['current_row', 'column'])
def get_schedule():
# Obtain the list of all activities (they are already ordered by start date) and put them in
# a table divided in days, and then in blocks of half hour, from 8:30h to 05:00h next day.
# Each block contains columns, and in each column fit one or more activities. Columns
# may also span more than one block.
# Set the language for day names
locale.setlocale(locale.LC_ALL, 'es_ES.UTF-8')
# Get the complete list of activities, and split into those with hour and those without
activities = Activity.objects.all()
activ_without_hour = [a for a in activities if a.start is None]
activ_with_hour = [a for a in activities if a.start is not None]
# Create the list of days
days = []
if len(activ_with_hour) > 0:
first_day = activ_with_hour[0].start.replace(hour=0, minute=0, second=0, microsecond=0)
last_day = activ_with_hour[-1].start.replace(hour=0, minute=0, second=0, microsecond=0)
day = first_day
while day <= last_day:
day_blocks = _build_day_blocks(activ_with_hour, day)
days.append(day_blocks)
day = day + timedelta(days=1)
return (activ_without_hour, days)
def _build_day_blocks(activ_with_hour, day):
first_block_hour = day.replace(hour=8, minute=00) # from 08:30h
last_block_hour = first_block_hour + timedelta(hours=20, minutes=30) # until 05:00h next day
pending_cols = [
PendingColumn(0, Column(1, 2, [])),
PendingColumn(0, Column(1, 1, [])),
PendingColumn(0, Column(1, 1, []))
]
# Create a list of 30min blocks
blocks = []
block_hour = first_block_hour
while block_hour <= last_block_hour:
block = _build_block(activ_with_hour, block_hour, pending_cols)
if block:
blocks.append(block)
block_hour = block_hour + timedelta(minutes=30)
# Remove all empty blocks at the beginning and the end of the day
for i in [0, -1]:
while len(blocks) > 0:
block = blocks[i]
if not block.columns:
del blocks[i]
else:
break
return Day(day.strftime('%A %d').upper(), blocks)
def _build_block(activ_with_hour, block_hour, pending_cols):
for ncol in range(3):
rowspan, activities = _get_block_activities(activ_with_hour, block_hour, ncol)
current_row, column = pending_cols[ncol]
column.activities.extend(activities)
if rowspan > column.rowspan - current_row:
column = Column(rowspan + current_row, column.colspan, column.activities)
pending_cols[ncol] = PendingColumn(current_row, column)
if pending_cols[0].column.activities:
if pending_cols[0].current_row == 0:
columns = [pending_cols[0].column]
else:
columns = []
if pending_cols[1].column.activities and columns:
columns[0].activities.extend(pending_cols[1].column.activities)
if pending_cols[2].column.activities and columns:
columns[0].activities.extend(pending_cols[2].column.activities)
else:
columns = []
if pending_cols[1].current_row == 0 and pending_cols[1].column.activities:
columns.append(pending_cols[1].column)
if pending_cols[2].current_row == 0 and pending_cols[2].column.activities:
columns.append(pending_cols[2].column)
for ncol in range(3):
current_row, column = pending_cols[ncol]
current_row += 1
if current_row >= column.rowspan:
current_row = 0
column = Column(1, column.colspan, [])
pending_cols[ncol] = PendingColumn(current_row, column)
return Block(block_hour.strftime('%H:%M'), columns)
def _get_block_activities(activ_with_hour, block_hour, ncol):
activities = []
rowspan = 1
for activity in activ_with_hour:
if (activity.start >= block_hour) and \
(activity.start < (block_hour + timedelta(minutes=30))) and \
(activity.column == ncol):
activities.append(activity)
if activity.end is None:
duration = 0
else:
duration = math.ceil((activity.end - activity.start).seconds / 60)
activ_span = math.ceil(duration / 30)
if activ_span > rowspan:
rowspan = activ_span
return (rowspan, activities)
def get_activity_and_status(activity_id, user):
try:
activity = Activity.objects.get(pk = activity_id)
except Activity.DoesNotExist:
return (None, {})
is_owner = False
is_organizer = False
is_participant = False
is_admin = False
if user.is_authenticated():
if user in activity.owners.all():
is_owner = True
if user in activity.organizers.all():
is_organizer = True
if user in activity.participants.all():
is_participant = True
if user.is_staff:
is_admin = True
user_status = {
'is_owner': is_owner,
'is_organizer': is_organizer,
'is_participant': is_participant,
'is_admin': is_admin
}
return (activity, user_status)
def subscribe_to_activity(user, activity_id):
#TODO: refactor to receive an actual activity object instead of an id
try:
activity = Activity.objects.get(pk = activity_id)
except Activity.DoesNotExist:
return
# User is always added, even if the limit is reached
activity.participants.add(user)
activity.save()
# Subscription limit control
maxplacesreached = False
if len(activity.participants.all()) > activity.max_places:
maxplacesreached = True
mail_managers(
subject = '[Estelcon Admin] Inscripción en actividad %s' % (activity.title),
message =
'''
El usuario %s (%s) se ha inscrito en la actividad %s.
'''
% (user.username, user.get_full_name(), activity.title),
)
for owner in activity.owners.all():
send_mail(
subject = '[Estelcon] Inscripción en actividad de la Estelcon que tú organizas',
message =
'''
El usuario %s (%s, %s) se ha inscrito en la actividad %s.
'''
% (user.username, user.get_full_name(), user.email, activity.title),
from_email = settings.MAIL_FROM,
recipient_list = [owner.email],
fail_silently = False
)
if maxplacesreached:
send_mail(
subject = '[Estelcon] ATENCION: Tu actividad ha superado el máximo de plazas.',
message =
'''
Ponte en contacto con la organización, por favor, ya que tu actividad '%s' ya ha sobrepasado el máximo de plazas.
Actualmente tienes %d inscritos en una actividad con un máximo establecido por ti de %d.
'''
% (activity.title, len(activity.participants.all()), activity.max_places),
from_email = settings.MAIL_FROM,
recipient_list = [owner.email],
fail_silently = False
)
if maxplacesreached:
message_participants_maxplaces = \
'''
ATENCION, tu inscripción ha superado el número máximo de plazas disponibles. Los responsables
ya han sido notificados de este hecho y tomarán una decisión en breve. Si no recibes
contestación en pocos días no dudes en escribir directamente a la organización.
'''
else:
message_participants_maxplaces = 'Te encuentras dentro del número máximo de plazas.'
send_mail(
subject = '[Estelcon] Inscripción en actividad de la Estelcon',
message =
'''
Se ha registrado tu inscripción en la actividad con título '%s'.
Si en el futuro deseas cancelarla, escribe a la organización.
%s
'''
% (activity.title, message_participants_maxplaces),
from_email = settings.MAIL_FROM,
recipient_list = [user.email],
fail_silently = True
)
def change_activity(user, activity, home_url):
mail_managers(
subject = '[Estelcon Admin] Modificación de actividad "%s"' % (activity.title),
message =
'''
El usuario %s (%s) ha modificado una actividad
Título: %s
Subtítulo: %s
Duración: %s
Nº máximo de plazas: %d
Mostrar responsables: %s
Texto:
%s
Necesidades logísticas:
%s
Notas para la organización:
%s'''
% (
user.username, user.get_full_name(), activity.title, activity.subtitle,
activity.duration, activity.max_places or 0, activity.show_owners,
activity.text, activity.logistics, activity.notes_organization),
)
send_mail(
subject = '[Estelcon] Se ha modificado la actividad "%s"' % (activity.title),
message =
'''
Se ha modificado correctamente la actividad con título '%s'.
¡Muchas gracias por participar! Entre todos haremos una gran Mereth Aderthad.
El equipo organizador.
%s
'''
% (activity.title, home_url),
from_email = settings.MAIL_FROM,
recipient_list = [user.email],
fail_silently = True
)
def send_proposal(user, data, home_url):
mail_managers(
subject = '[Estelcon Admin] Actividad propuesta: %s' % (data['title']),
message =
'''
El usuario %s (%s) ha propuesto una actividad.
Título: %s
Subtítulo: %s
Duración: %s
Nº máximo de plazas: %d
Mostrar responsables: %s
Requiere inscripción: %s
Responsables:
%s
Organizadores:
%s
Texto:
%s
Necesidades logísticas:
%s
Notas para la organización:
%s'''
% (
user.username, user.get_full_name(), data['title'], data['subtitle'],
data['duration'], data['max_places'] or 0, data['show_owners'],
data['requires_inscription'], data['owners'], data['organizers'],
data['text'], data['logistics'], data['notes_organization']),
)
send_mail(
subject = '[Estelcon] Actividad propuesta para la Estelcon',
message =
'''
Se ha enviado a los organizadores tu propuesta de actividad con título
'%s'.
Estudiaremos la actividad que propones y le buscaremos un hueco en la Estelcon. En cuanto
lo hagamos, podrás ver cómo aparece en el Programa de actividades, incluyendo una ficha
rellena con los datos que nos has enviado (al menos con la parte pública). Y si tú o
cualquiera de las personas designadas como responsables accedéis a la web con vuestro
usuario y contraseña, podréis consultar y modificar todos los datos.
Si tenemos alguna duda o consulta que hacerte, contactaremos contigo a través del correo
electrónico o el teléfono que indicaste al registrarte.
¡Muchas gracias por participar! Entre todos haremos una gran Mereth Aderthad.
El equipo organizador.
%s
'''
% (data['title'], home_url),
from_email = settings.MAIL_FROM,
recipient_list = [user.email],
fail_silently = True
)
| agpl-3.0 | -6,948,931,239,060,189,000 | 29.928767 | 113 | 0.647002 | false |
ruohoruotsi/Wavelet-Tree-Synth | nnet/VAE-RyotaKatoh-chainer/VAE_YZ_X.py | 1 | 5152 | import os
import time
import numpy as np
from chainer import cuda, Variable, function, FunctionSet, optimizers
from chainer import functions as F
class VAE_YZ_X(FunctionSet):
def __init__(self, **layers):
super(VAE_YZ_X, self).__init__(**layers)
def softplus(self, x):
return F.log(F.exp(x) + 1)
def identity(self, x):
return x
def forward_one_step(self, x_data, y_data, n_layers_recog, n_layers_gen, nonlinear_q='softplus', nonlinear_p='softplus', output_f = 'sigmoid', type_qx='gaussian', type_px='gaussian', gpu=-1):
x = Variable(x_data)
y = Variable(y_data)
# set non-linear function
nonlinear = {'sigmoid': F.sigmoid, 'tanh': F.tanh, 'softplus': self.softplus, 'relu': F.relu}
nonlinear_f_q = nonlinear[nonlinear_q]
nonlinear_f_p = nonlinear[nonlinear_p]
output_activation = {'sigmoid': F.sigmoid, 'identity': self.identity, 'tanh': F.tanh}
output_a_f = output_activation[output_f]
hidden_q = [ nonlinear_f_q( self.recog_x( x ) + self.recog_y( y ) ) ]
# compute q(z|x, y)
for i in range(n_layers_recog-1):
hidden_q.append(nonlinear_f_q(getattr(self, 'recog_%i' % i)(hidden_q[-1])))
q_mean = getattr(self, 'recog_mean')(hidden_q[-1])
q_log_sigma = 0.5 * getattr(self, 'recog_log')(hidden_q[-1])
eps = np.random.normal(0, 1, (x.data.shape[0], q_log_sigma.data.shape[1])).astype('float32')
if gpu >= 0:
eps = cuda.to_gpu(eps)
eps = Variable(eps)
z = q_mean + F.exp(q_log_sigma) * eps
# compute q(x |y, z)
hidden_p = [ nonlinear_f_p( self.gen_y( y ) + self.gen_z( z ) ) ]
for i in range(n_layers_gen-1):
hidden_p.append(nonlinear_f_p(getattr(self, 'gen_%i' % i)(hidden_p[-1])))
hidden_p.append(output_a_f(getattr(self, 'gen_out')(hidden_p[-1])))
output = hidden_p[-1]
rec_loss = F.mean_squared_error(output, x)
KLD = -0.5 * F.sum(1 + q_log_sigma - q_mean**2 - F.exp(q_log_sigma)) / (x_data.shape[0]*x_data.shape[1])
return rec_loss, KLD, output
def generate(self, sample_x, sample_y, n_layers_recog, n_layers_gen, nonlinear_q='relu', nonlinear_p='relu', output_f='sigmoid', gpu=-1):
x = Variable(sample_x)
y = Variable(sample_y)
# set non-linear function
nonlinear = {'sigmoid': F.sigmoid, 'tanh': F.tanh, 'softplus': self.softplus, 'relu': F.relu}
nonlinear_f_q = nonlinear[nonlinear_q]
nonlinear_f_p = nonlinear[nonlinear_p]
output_activation = {'sigmoid': F.sigmoid, 'identity': self.identity, 'tanh': F.tanh}
output_a_f = output_activation[output_f]
# compute q(z|x, y)
hidden_q = [ nonlinear_f_q( self.recog_x( x ) + self.recog_y( y ) ) ]
for i in range(n_layers_recog-1):
hidden_q.append(nonlinear_f_q(getattr(self, 'recog_%i' % i)(hidden_q[-1])))
q_mean = getattr(self, 'recog_mean')(hidden_q[-1])
q_log_sigma = 0.5 * getattr(self, 'recog_log')(hidden_q[-1])
eps = np.random.normal(0, 1, (x.data.shape[0], q_log_sigma.data.shape[1])).astype('float32')
if gpu >= 0:
eps = cuda.to_gpu(eps)
eps = Variable(eps)
z = q_mean + F.exp(q_log_sigma) * eps
outputs = np.zeros((sample_y.shape[1], sample_x.shape[1]), dtype=np.float32)
for label in range(sample_y.shape[1]):
sample_y = np.zeros((1, sample_y.shape[1]), dtype=np.float32)
sample_y[0][label] = 1.
# compute q(x |y, z)
hidden_p = [ nonlinear_f_p( self.gen_y( Variable(sample_y) ) + self.gen_z( z ) ) ]
for i in range(n_layers_gen-1):
hidden_p.append(nonlinear_f_p(getattr(self, 'gen_%i' % i)(hidden_p[-1])))
hidden_p.append(output_a_f(getattr(self, 'gen_out')(hidden_p[-1])))
output = hidden_p[-1]
outputs[label] = output.data
return outputs
def generate_z_x(self, x_size, sample_z, sample_y, n_layers_recog, n_layers_gen, nonlinear_q='relu', nonlinear_p='relu', output_f='sigmoid', gpu=-1):
# set non-linear function
nonlinear = {'sigmoid': F.sigmoid, 'tanh': F.tanh, 'softplus': self.softplus, 'relu': F.relu}
nonlinear_f_q = nonlinear[nonlinear_q]
nonlinear_f_p = nonlinear[nonlinear_p]
output_activation = {'sigmoid': F.sigmoid, 'identity': self.identity, 'tanh': F.tanh}
output_a_f = output_activation[output_f]
# input variables
z = Variable(sample_z.reshape((1, sample_z.shape[0])))
y = Variable(sample_y.reshape((1, sample_y.shape[0])))
outputs = np.zeros((1, x_size), dtype=np.float32)
# compute q(x |y, z)
hidden_p = [ nonlinear_f_p( self.gen_y( y ) + self.gen_z( z ) ) ]
for i in range(n_layers_gen-1):
hidden_p.append(nonlinear_f_p(getattr(self, 'gen_%i' % i)(hidden_p[-1])))
hidden_p.append(output_a_f(getattr(self, 'gen_out')(hidden_p[-1])))
output = hidden_p[-1]
outputs = output.data
return outputs
| gpl-2.0 | -8,410,004,486,679,140,000 | 36.064748 | 195 | 0.572011 | false |
wangshunzi/Python_code | 02-Python面向对象代码/面向对象-三大特性/封装.py | 1 | 1583 | # _*_ encoding:utf-8 _*_
# import win32com.client
#
#
class Caculator(object):
def __check_num_zsq(func):
def inner(self, n):
if not isinstance(n, int):
raise TypeError("当前这个数据的类型有问题, 应该是一个整型数据")
return func(self, n)
return inner
def __say(self, word):
# 1. 创建一个播报器对象
speaker = win32com.client.Dispatch("SAPI.SpVoice")
# 2. 通过这个播报器对象, 直接, 播放相对应的语音字符串就可以
speaker.Speak(word)
def __create_say_zsq(word=""):
def __say_zsq(func):
def inner(self, n):
self.__say(word + str(n))
return func(self, n)
return inner
return __say_zsq
@__check_num_zsq
@__create_say_zsq()
def __init__(self, num):
self.__result = num
@__check_num_zsq
@__create_say_zsq("加")
def jia(self, n):
self.__result += n
return self
@__check_num_zsq
@__create_say_zsq("减去")
def jian(self, n):
self.__result -= n
return self
@__check_num_zsq
@__create_say_zsq("乘以")
def cheng(self, n):
self.__result *= n
return self
def show(self):
self.__say("sz牌计算机计算的结果是:%d" % self.__result)
print("计算的结果是:%d" % self.__result)
return self
def clear(self):
self.__result = 0
return self
@property
def result(self):
return self.__result
| mit | -5,992,346,275,371,996,000 | 22.459016 | 58 | 0.512229 | false |
flegoff/hcrendu | settings.py | 1 | 5119 | # Django settings for hcrendu project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '/home/dotcloud/rendus_proj', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/Paris'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'fr-fr'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
SITE_URL = 'http://localhost:8000/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '%i0kgcf0pz9$twap*$qt*^qh#la7s7ulj(iq*khjdl5m=v^#$t'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'hcrendu.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'hcrendu.hcstudyprojects'
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
EMAIL_HOST = ''
EMAIL_PORT = 587
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
SENDER = ''
ADMIN_MEDIA_PREFIX = '/static/admin_media/'
| mit | 7,010,484,665,604,025,000 | 32.457516 | 122 | 0.683727 | false |
maxive/erp | addons/point_of_sale/tests/test_frontend.py | 1 | 12623 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.api import Environment
from odoo.tools import DEFAULT_SERVER_DATE_FORMAT
from datetime import date, timedelta
import odoo.tests
class TestUi(odoo.tests.HttpCase):
def test_01_pos_basic_order(self):
env = self.env
journal_obj = env['account.journal']
account_obj = env['account.account']
main_company = env.ref('base.main_company')
main_pos_config = env.ref('point_of_sale.pos_config_main')
account_receivable = account_obj.create({'code': 'X1012',
'name': 'Account Receivable - Test',
'user_type_id': env.ref('account.data_account_type_receivable').id,
'reconcile': True})
field = env['ir.model.fields']._get('res.partner', 'property_account_receivable_id')
env['ir.property'].create({'name': 'property_account_receivable_id',
'company_id': main_company.id,
'fields_id': field.id,
'value': 'account.account,' + str(account_receivable.id)})
# test an extra price on an attribute
pear = env.ref('point_of_sale.poire_conference')
attribute_value = env['product.attribute.value'].create({
'name': 'add 2',
'product_ids': [(6, 0, [pear.id])],
'attribute_id': env['product.attribute'].create({
'name': 'add 2',
}).id,
})
env['product.attribute.price'].create({
'product_tmpl_id': pear.product_tmpl_id.id,
'price_extra': 2,
'value_id': attribute_value.id,
})
fixed_pricelist = env['product.pricelist'].create({
'name': 'Fixed',
'item_ids': [(0, 0, {
'compute_price': 'fixed',
'fixed_price': 1,
}), (0, 0, {
'compute_price': 'fixed',
'fixed_price': 2,
'applied_on': '0_product_variant',
'product_id': env.ref('point_of_sale.boni_orange').id,
}), (0, 0, {
'compute_price': 'fixed',
'fixed_price': 13.95, # test for issues like in 7f260ab517ebde634fc274e928eb062463f0d88f
'applied_on': '0_product_variant',
'product_id': env.ref('point_of_sale.papillon_orange').id,
})],
})
env['product.pricelist'].create({
'name': 'Percentage',
'item_ids': [(0, 0, {
'compute_price': 'percentage',
'percent_price': 100,
'applied_on': '0_product_variant',
'product_id': env.ref('point_of_sale.boni_orange').id,
}), (0, 0, {
'compute_price': 'percentage',
'percent_price': 99,
'applied_on': '0_product_variant',
'product_id': env.ref('point_of_sale.papillon_orange').id,
}), (0, 0, {
'compute_price': 'percentage',
'percent_price': 0,
'applied_on': '0_product_variant',
'product_id': env.ref('point_of_sale.citron').id,
})],
})
env['product.pricelist'].create({
'name': 'Formula',
'item_ids': [(0, 0, {
'compute_price': 'formula',
'price_discount': 6,
'price_surcharge': 5,
'applied_on': '0_product_variant',
'product_id': env.ref('point_of_sale.boni_orange').id,
}), (0, 0, {
# .99 prices
'compute_price': 'formula',
'price_surcharge': -0.01,
'price_round': 1,
'applied_on': '0_product_variant',
'product_id': env.ref('point_of_sale.papillon_orange').id,
}), (0, 0, {
'compute_price': 'formula',
'price_min_margin': 10,
'price_max_margin': 100,
'applied_on': '0_product_variant',
'product_id': env.ref('point_of_sale.citron').id,
}), (0, 0, {
'compute_price': 'formula',
'price_surcharge': 10,
'price_max_margin': 5,
'applied_on': '0_product_variant',
'product_id': env.ref('point_of_sale.limon').id,
}), (0, 0, {
'compute_price': 'formula',
'price_discount': -100,
'price_min_margin': 5,
'price_max_margin': 20,
'applied_on': '0_product_variant',
'product_id': env.ref('point_of_sale.pamplemousse_rouge_pamplemousse').id,
})],
})
env['product.pricelist'].create({
'name': 'min_quantity ordering',
'item_ids': [(0, 0, {
'compute_price': 'fixed',
'fixed_price': 1,
'applied_on': '0_product_variant',
'min_quantity': 2,
'product_id': env.ref('point_of_sale.boni_orange').id,
}), (0, 0, {
'compute_price': 'fixed',
'fixed_price': 2,
'applied_on': '0_product_variant',
'min_quantity': 1,
'product_id': env.ref('point_of_sale.boni_orange').id,
}), (0, 0, {
'compute_price': 'fixed',
'fixed_price': 2,
'applied_on': '0_product_variant',
'min_quantity': 2,
'product_id': env.ref('point_of_sale.product_product_consumable').id,
})],
})
env['product.pricelist'].create({
'name': 'Product template',
'item_ids': [(0, 0, {
'compute_price': 'fixed',
'fixed_price': 1,
'applied_on': '1_product',
'product_tmpl_id': env.ref('point_of_sale.boni_orange_product_template').id,
}), (0, 0, {
'compute_price': 'fixed',
'fixed_price': 2,
})],
})
env['product.pricelist'].create({
# no category has precedence over category
'name': 'Category vs no category',
'item_ids': [(0, 0, {
'compute_price': 'fixed',
'fixed_price': 1,
'applied_on': '2_product_category',
'categ_id': env.ref('product.product_category_3').id, # All / Saleable / Services
}), (0, 0, {
'compute_price': 'fixed',
'fixed_price': 2,
})],
})
p = env['product.pricelist'].create({
'name': 'Category',
'item_ids': [(0, 0, {
'compute_price': 'fixed',
'fixed_price': 2,
'applied_on': '2_product_category',
'categ_id': env.ref('product.product_category_all').id,
}), (0, 0, {
'compute_price': 'fixed',
'fixed_price': 1,
'applied_on': '2_product_category',
'categ_id': env.ref('product.product_category_3').id, # All / Saleable / Services
})],
})
today = date.today()
one_week_ago = today - timedelta(weeks=1)
two_weeks_ago = today - timedelta(weeks=2)
one_week_from_now = today + timedelta(weeks=1)
two_weeks_from_now = today + timedelta(weeks=2)
env['product.pricelist'].create({
'name': 'Dates',
'item_ids': [(0, 0, {
'compute_price': 'fixed',
'fixed_price': 1,
'date_start': two_weeks_ago.strftime(DEFAULT_SERVER_DATE_FORMAT),
'date_end': one_week_ago.strftime(DEFAULT_SERVER_DATE_FORMAT),
}), (0, 0, {
'compute_price': 'fixed',
'fixed_price': 2,
'date_start': today.strftime(DEFAULT_SERVER_DATE_FORMAT),
'date_end': one_week_from_now.strftime(DEFAULT_SERVER_DATE_FORMAT),
}), (0, 0, {
'compute_price': 'fixed',
'fixed_price': 3,
'date_start': one_week_from_now.strftime(DEFAULT_SERVER_DATE_FORMAT),
'date_end': two_weeks_from_now.strftime(DEFAULT_SERVER_DATE_FORMAT),
})],
})
cost_base_pricelist = env['product.pricelist'].create({
'name': 'Cost base',
'item_ids': [(0, 0, {
'base': 'standard_price',
'compute_price': 'percentage',
'percent_price': 55,
})],
})
pricelist_base_pricelist = env['product.pricelist'].create({
'name': 'Pricelist base',
'item_ids': [(0, 0, {
'base': 'pricelist',
'base_pricelist_id': cost_base_pricelist.id,
'compute_price': 'percentage',
'percent_price': 15,
})],
})
env['product.pricelist'].create({
'name': 'Pricelist base 2',
'item_ids': [(0, 0, {
'base': 'pricelist',
'base_pricelist_id': pricelist_base_pricelist.id,
'compute_price': 'percentage',
'percent_price': 3,
})],
})
env['product.pricelist'].create({
'name': 'Pricelist base rounding',
'item_ids': [(0, 0, {
'base': 'pricelist',
'base_pricelist_id': fixed_pricelist.id,
'compute_price': 'percentage',
'percent_price': 0.01,
})],
})
excluded_pricelist = env['product.pricelist'].create({
'name': 'Not loaded'
})
env.ref('base.res_partner_18').property_product_pricelist = excluded_pricelist
# set the company currency to USD, otherwise it will assume
# euro's. this will cause issues as the sales journal is in
# USD, because of this all products would have a different
# price
main_company.currency_id = env.ref('base.USD')
test_sale_journal = journal_obj.create({'name': 'Sales Journal - Test',
'code': 'TSJ',
'type': 'sale',
'company_id': main_company.id})
all_pricelists = env['product.pricelist'].search([('id', '!=', excluded_pricelist.id)])
all_pricelists.write(dict(currency_id=main_company.currency_id.id))
main_pos_config.write({
'journal_id': test_sale_journal.id,
'invoice_journal_id': test_sale_journal.id,
'journal_ids': [(0, 0, {'name': 'Cash Journal - Test',
'code': 'TSC',
'type': 'cash',
'company_id': main_company.id,
'journal_user': True})],
'available_pricelist_ids': [(4, pricelist.id) for pricelist in all_pricelists],
})
# open a session, the /pos/web controller will redirect to it
main_pos_config.open_session_cb()
# needed because tests are run before the module is marked as
# installed. In js web will only load qweb coming from modules
# that are returned by the backend in module_boot. Without
# this you end up with js, css but no qweb.
env['ir.module.module'].search([('name', '=', 'point_of_sale')], limit=1).state = 'installed'
self.phantom_js("/pos/web",
"odoo.__DEBUG__.services['web_tour.tour'].run('pos_pricelist')",
"odoo.__DEBUG__.services['web_tour.tour'].tours.pos_pricelist.ready",
login="admin")
self.phantom_js("/pos/web",
"odoo.__DEBUG__.services['web_tour.tour'].run('pos_basic_order')",
"odoo.__DEBUG__.services['web_tour.tour'].tours.pos_basic_order.ready",
login="admin")
for order in env['pos.order'].search([]):
self.assertEqual(order.state, 'paid', "Validated order has payment of " + str(order.amount_paid) + " and total of " + str(order.amount_total))
| agpl-3.0 | -4,505,519,686,318,333,000 | 41.076667 | 154 | 0.465816 | false |
NineWoranop/loadtesting-kpi | loadtesting/loadtesting-java/loadtesting-showcase-springmvc/src/test/grinder/Console_Joda/convert.py | 1 | 3901 | # Hello World
#
# A minimal script that tests The Grinder logging facility.
#
# This script shows the recommended style for scripts, with a
# TestRunner class. The script is executed just once by each worker
# process and defines the TestRunner class. The Grinder creates an
# instance of TestRunner for each worker thread, and repeatedly calls
# the instance for each run of that thread.
from net.grinder.script.Grinder import grinder
from net.grinder.script import Test
from net.grinder.plugin.http import HTTPPluginControl, HTTPRequest
from HTTPClient import NVPair
# A shorter alias for the grinder.logger.info() method.
log = grinder.logger.info
# Create a Test with a test number and a description. The test will be
# automatically registered with The Grinder console if you are using
# it.
test1 = Test(1, "ClientTime")
headers = []
headers.append(NVPair("Content-type", "application/json"))
headers.append(NVPair("BC_ENABLED", "TRUE"))
convertRequest = HTTPRequest(headers=headers)
url = "http://localhost:8080/loadtesting-showcase-springmvc/service/loadtest/joda/convert3"
#grinder.statistics.registerDataLogExpression("Start time at Server", "userLong0")
#grinder.statistics.registerDataLogExpression("Server time - Total", "userLong1")
#grinder.statistics.registerDataLogExpression("Server time - Web", "userLong2")
#grinder.statistics.registerDataLogExpression("Server time - Busi", "userLong3")
#grinder.statistics.registerDataLogExpression("Server time - External", "userLong4")
#grinder.statistics.registerSummaryExpression("Start time at Server", "userLong0")
#grinder.statistics.registerSummaryExpression("Server time - Total", "userLong1")
#grinder.statistics.registerSummaryExpression("Server time - Web", "userLong2")
#grinder.statistics.registerSummaryExpression("Server time - Busi", "userLong3")
#grinder.statistics.registerSummaryExpression("Server time - External", "userLong4")
# Instrument the info() method with our Test.
test1.record(convertRequest)
#test2 = Test(2, "ServerTime_Total")
class RequestData:
def __init__(self):
userFile = open("requests.txt", "r")
self.datas = []
try:
for line in userFile:
self.datas.append(line),
finally:
userFile.close()
# self.datas = ['{"fromTimeZone": "Asia/Bangkok", "toTimeZone": "GMT", "inputDate": "2015-12-30 18:30:33"}', '{"fromTimeZone": "Asia/Bangkok", "toTimeZone": "GMT", "inputDate": "2013-01-15 02:30:33"}']
def get(self, index):
i = index % len(self.datas)
return self.datas[i]
reqData = RequestData()
# A TestRunner instance is created for each thread. It can be used to
# store thread-specific data.
class TestRunner:
# This method is called for every run.
def __call__(self):
jsonData = reqData.get(grinder.runNumber * grinder.threadNumber)
response = convertRequest.POST(url, jsonData)
# for s in response.listHeaders():
# print s
# startTimeAtServer = response.getHeader("StartTime")
# if(startTimeAtServer is not None):
# grinder.statistics.forLastTest.setLong("userLong0", int(startTimeAtServer))
# totalTime = response.getHeader("TotalTime")
# if(totalTime is not None):
# grinder.statistics.forLastTest.setLong("userLong1", int(totalTime))
# webserviceTime = response.getHeader("WebserviceTime")
# if(webserviceTime is not None):
# grinder.statistics.forLastTest.setLong("userLong2", int(webserviceTime))
# businessTime = response.getHeader("Busi_InternalTime")
# if(businessTime is not None):
# grinder.statistics.forLastTest.setLong("userLong3", int(businessTime))
# externalTime = response.getHeader("Busi_ExternalTime")
# if(externalTime is not None):
# grinder.statistics.forLastTest.setLong("userLong4", int(externalTime))
| apache-2.0 | -1,304,590,826,982,140,700 | 44.360465 | 209 | 0.716996 | false |
wwgong/CVoltDB | tools/vis.py | 1 | 6217 | #!/usr/bin/env python
# This is a visualizer which pulls TPC-C benchmark results from the MySQL
# databases and visualizes them. Four graphs will be generated, latency graph on
# sinigle node and multiple nodes, and throughput graph on single node and
# multiple nodes.
#
# Run it without any arguments to see what arguments are needed.
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))) +
os.sep + 'tests/scripts/')
import time
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from voltdbclient import *
STATS_SERVER = 'volt2'
def COLORS(k):
return (((k ** 3) % 255) / 255.0,
((k * 100) % 255) / 255.0,
((k * k) % 255) / 255.0)
MARKERS = ['+', '*', '<', '>', '^', '_',
'D', 'H', 'd', 'h', 'o', 'p']
def get_stats(hostname, port, days):
"""Get statistics of all runs
Example return value:
{ u'VoltKV': [ { 'lat95': 21,
'lat99': 35,
'nodes': 1,
'throughput': 104805,
'date': datetime object}],
u'Voter': [ { 'lat95': 20,
'lat99': 47,
'nodes': 1,
'throughput': 66287,
'date': datetime object}]}
"""
conn = FastSerializer(hostname, port)
proc = VoltProcedure(conn, 'BestOfPeriod',
[FastSerializer.VOLTTYPE_SMALLINT])
resp = proc.call([days])
conn.close()
# keyed on app name, value is a list of runs sorted chronologically
stats = dict()
run_stat_keys = ['nodes', 'date', 'tps', 'lat95', 'lat99']
for row in resp.tables[0].tuples:
app_stats = []
if row[0] not in stats:
stats[row[0]] = app_stats
else:
app_stats = stats[row[0]]
run_stats = dict(zip(run_stat_keys, row[1:]))
app_stats.append(run_stats)
# sort each one
for app_stats in stats.itervalues():
app_stats.sort(key=lambda x: x['date'])
return stats
class Plot:
DPI = 100.0
def __init__(self, title, xlabel, ylabel, filename, w, h):
self.filename = filename
self.legends = {}
w = w == None and 800 or w
h = h == None and 300 or h
fig = plt.figure(figsize=(w / self.DPI, h / self.DPI),
dpi=self.DPI)
self.ax = fig.add_subplot(111)
self.ax.set_title(title)
plt.xticks(fontsize=10)
plt.yticks(fontsize=10)
plt.ylabel(ylabel, fontsize=8)
plt.xlabel(xlabel, fontsize=8)
fig.autofmt_xdate()
def plot(self, x, y, color, marker_shape, legend):
self.ax.plot(x, y, linestyle="-", label=str(legend),
marker=marker_shape, markerfacecolor=color, markersize=4)
def close(self):
formatter = matplotlib.dates.DateFormatter("%b %d")
self.ax.xaxis.set_major_formatter(formatter)
ymin, ymax = plt.ylim()
plt.ylim((0, ymax * 1.1))
plt.legend(prop={'size': 10}, loc=0)
plt.savefig(self.filename, format="png", transparent=False,
bbox_inches="tight", pad_inches=0.2)
def plot(title, xlabel, ylabel, filename, width, height, app, data, data_type):
plot_data = dict()
for run in data:
if run['nodes'] not in plot_data:
plot_data[run['nodes']] = {'time': [], data_type: []}
datenum = matplotlib.dates.date2num(run['date'])
plot_data[run['nodes']]['time'].append(datenum)
if data_type == 'tps':
value = run['tps']/run['nodes']
else:
value = run[data_type]
plot_data[run['nodes']][data_type].append(value)
if len(plot_data) == 0:
return
i = 0
pl = Plot(title, xlabel, ylabel, filename, width, height)
sorted_data = sorted(plot_data.items(), key=lambda x: x[0])
for k, v in sorted_data:
pl.plot(v['time'], v[data_type], COLORS(i), MARKERS[i], k)
i += 3
pl.close()
def generate_index_file(filenames):
row = """
<tr>
<td>%s</td>
<td><a href="%s"><img src="%s" width="400" height="200"/></a></td>
<td><a href="%s"><img src="%s" width="400" height="200"/></a></td>
</tr>
"""
full_content = """
<html>
<head>
<title>Performance Graphs</title>
</head>
<body>
<table>
%s
</table>
</body>
</html>
""" % (''.join([row % (i[0], i[1], i[1], i[2], i[2]) for i in filenames]))
return full_content
def usage():
print "Usage:"
print "\t", sys.argv[0], "output_dir filename_base" \
" [width] [height]"
print
print "\t", "width in pixels"
print "\t", "height in pixels"
def main():
if len(sys.argv) < 3:
usage()
exit(-1)
if not os.path.exists(sys.argv[1]):
print sys.argv[1], "does not exist"
exit(-1)
prefix = sys.argv[2]
path = os.path.join(sys.argv[1], sys.argv[2])
width = None
height = None
if len(sys.argv) >= 4:
width = int(sys.argv[3])
if len(sys.argv) >= 5:
height = int(sys.argv[4])
stats = get_stats(STATS_SERVER, 21212, 30)
# Plot single node stats for all apps
filenames = [] # (appname, latency, throughput)
for app, data in stats.iteritems():
app_filename = app.replace(' ', '_')
latency_filename = '%s-latency-%s.png' % (prefix, app_filename)
throughput_filename = '%s-throughput-%s.png' % (prefix, app_filename)
filenames.append((app, latency_filename, throughput_filename))
plot(app + " latency", "Time", "Latency (ms)",
path + "-latency-" + app_filename + ".png", width, height, app,
data, 'lat99')
plot(app + " throughput", "Time", "Throughput (txns/sec)",
path + "-throughput-" + app_filename + ".png", width, height, app,
data, 'tps')
# generate index file
index_file = open(path + '-index.html', 'w')
sorted_filenames = sorted(filenames, key=lambda f: f[0].lower())
index_file.write(generate_index_file(sorted_filenames))
index_file.close()
if __name__ == "__main__":
main()
| gpl-3.0 | -5,714,690,900,903,100,000 | 29.326829 | 80 | 0.5493 | false |
nitely/Spirit | setup.py | 1 | 1954 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys
import os
import io
from setuptools import setup, find_packages
BASE_DIR = os.path.join(os.path.dirname(__file__))
with io.open(os.path.join(BASE_DIR, 'README.md'), encoding='utf-8') as f:
README = f.read()
VERSION = __import__('spirit').__version__
with io.open(os.path.join(BASE_DIR, 'requirements.txt'), encoding='utf-8') as fh:
REQUIREMENTS = fh.read()
if sys.platform.startswith(('win32', 'darwin')):
PYTHON_MAGIC_DEP = ['python-magic-bin==0.4.14']
else: # Linux?
PYTHON_MAGIC_DEP = ['python-magic==0.4.15']
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-spirit',
version=VERSION,
description='Spirit is a Python based forum powered by Django.',
author='Esteban Castro Borsani',
author_email='[email protected]',
long_description=README,
long_description_content_type='text/markdown',
url='http://spirit-project.com/',
packages=find_packages(),
test_suite="runtests.start",
entry_points="""
[console_scripts]
spirit=spirit.extra.bin.spirit:main
""",
include_package_data=True,
zip_safe=False,
install_requires=REQUIREMENTS,
extras_require={
'files': PYTHON_MAGIC_DEP,
'huey': 'huey == 2.3.0',
'celery': 'celery[redis] == 4.4.7'},
license='MIT License',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
| mit | 5,437,205,630,062,427,000 | 29.061538 | 81 | 0.635619 | false |
g0v/sunshine.cy | website/cy/api/views.py | 1 | 4839 | #from django.contrib.auth.models import User, Group
from rest_framework import viewsets
from .serializers import *
from journals.models import Journals
from reports.models import Reports
from property.models import Stock, Land, Building, Car, Cash, Deposit, Aircraft, Boat, Bonds, Fund, OtherBonds, Antique, Insurance, Claim, Debt, Investment
class JournalsViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Journals.objects.all()
serializer_class = JournalsSerializer
filter_fields = ('name', 'date')
class ReportsViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Reports.objects.all().prefetch_related('land_set', 'building_set', 'boat_set', 'car_set', 'aircraft_set', 'cash_set', 'deposit_set', 'bonds_set', 'fund_set', 'otherbonds_set', 'antique_set', 'insurance_set', 'claim_set', 'debt_set', 'investment_set', )
serializer_class = ReportsSerializer
filter_fields = ('journal', 'category', 'name', 'department', 'title', 'report_at', 'report_type', 'spouse', 'at_page', 'file_id', )
class StockViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Stock.objects.all()
serializer_class = StockSerializer
filter_fields = ('report', 'name', 'symbol', 'owner', 'quantity', 'face_value', 'currency', 'total')
class LandViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Land.objects.all()
serializer_class = LandSerializer
filter_fields = ('report', 'name', 'area', 'share_portion', 'portion', 'owner', 'register_date', 'register_reason', 'acquire_value', 'total')
class BuildingViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Building.objects.all()
serializer_class = BuildingSerializer
filter_fields = ('report', 'name', 'area', 'share_portion', 'portion', 'owner', 'register_date', 'register_reason', 'acquire_value', 'total')
class CarViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Car.objects.all()
serializer_class = CarSerializer
filter_fields = ('report', 'name', 'capacity', 'owner', 'register_date', 'register_reason', 'acquire_value')
class CashViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Cash.objects.all()
serializer_class = CashSerializer
filter_fields = ('report', 'currency', 'owner', 'total')
class DepositViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Deposit.objects.all()
serializer_class = DepositSerializer
filter_fields = ('report', 'bank', 'deposit_type', 'currency', 'owner', 'total')
class AircraftViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Aircraft.objects.all()
serializer_class = AircraftSerializer
filter_fields = ('report', 'name', 'maker', 'number', 'owner', 'register_date', 'register_reason', 'acquire_value')
class BoatViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Boat.objects.all()
serializer_class = BoatSerializer
filter_fields = ('report', 'name', 'tonnage', 'homeport', 'owner', 'register_date', 'register_reason', 'acquire_value')
class BondsViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Bonds.objects.all()
serializer_class = BondsSerializer
filter_fields = ('report', 'name', 'symbol', 'owner', 'dealer', 'quantity', 'face_value', 'market_value', 'currency', 'total', 'total_value')
class FundViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Fund.objects.all()
serializer_class = FundSerializer
filter_fields = ('report', 'name', 'owner', 'dealer', 'quantity', 'face_value', 'market_value', 'currency', 'total', 'total_value')
class OtherBondsViewSet(viewsets.ReadOnlyModelViewSet):
queryset = OtherBonds.objects.all()
serializer_class = OtherBondsSerializer
filter_fields = ('report', 'name', 'owner', 'quantity', 'face_value', 'market_value', 'currency', 'total', 'total_value')
class AntiqueViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Antique.objects.all()
serializer_class = AntiqueSerializer
filter_fields = ('report', 'name', 'owner', 'quantity', 'total')
class InsuranceViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Insurance.objects.all()
serializer_class = InsuranceSerializer
filter_fields = ('report', 'name', 'company', 'owner')
class ClaimViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Claim.objects.all()
serializer_class = ClaimSerializer
filter_fields = ('report', 'species', 'debtor', 'owner', 'register_date', 'register_reason', 'total')
class DebtViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Debt.objects.all()
serializer_class = DebtSerializer
filter_fields = ('report', 'species', 'debtor', 'owner', 'register_date', 'register_reason', 'total')
class InvestmentViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Investment.objects.all()
serializer_class = InvestmentSerializer
filter_fields = ('report', 'owner', 'company', 'address', 'register_date', 'register_reason', 'total')
| cc0-1.0 | -828,647,415,842,423,900 | 48.377551 | 267 | 0.713784 | false |
arunkgupta/gramps | gramps/gen/filters/rules/person/_hassourcecount.py | 1 | 1763 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2007 Donald N. Allingham
# Copyright (C) 2007-2008 Brian G. Matherly
# Copyright (C) 2008 Jerome Rapinat
# Copyright (C) 2008 Benny Malengier
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....ggettext import gettext as _
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from .._hassourcecountbase import HasSourceCountBase
#-------------------------------------------------------------------------
# "People having sources"
#-------------------------------------------------------------------------
class HasSourceCount(HasSourceCountBase):
"""People with sources"""
name = _('People with <count> sources')
description = _("Matches people with a certain number of sources connected to it")
| gpl-2.0 | 2,533,578,305,603,572,700 | 36.510638 | 86 | 0.548497 | false |
aleksandar-mitrevski/robot_simulation | fault_injector/fault_injector_node.py | 1 | 6522 | #!/usr/bin/env python
import sys, tty, termios
import rospy
from fault_injector.msg import InjectFault
class Commands(object):
InjectFault = 1
RepairSensor = 2
RepairAllSensors = 3
Quit = 4
Unknown = 5
class FaultTypes(object):
Permanent = 1
Transient = 2
Unknown = 3
class FaultInjectorNode(object):
'''Defines a node that sends commands for injecting sensor faults.
The current version allows injecting both permanent and transient faults.
Author -- Aleksandar Mitrevski
'''
def __init__(self):
#stores names of sensor frames representing sensors with injected faults
self.faulty_sensor_frames = list()
#a list of sensor frames that will be 'repaired', i.e. faults
#will stop being injected to the respective sensors
self.sensor_frames_to_remove = list()
self.fault_message_publisher = rospy.Publisher('inject_fault', InjectFault, queue_size=10)
shutdown = False
self.print_instructions()
while not shutdown:
if len(self.sensor_frames_to_remove) > 0:
self.repair_transient_faults()
character = self.read_character()
command = self.read_command(character)
if command == Commands.InjectFault:
print 'Press:\np for injecting a permanent fault\nt for injecting a transient fault\n'
character = self.read_character()
fault_type = self.read_fault_type(character)
self.manage_sensor(command, fault_type)
elif command == Commands.RepairSensor:
self.manage_sensor(command)
elif command == Commands.RepairAllSensors:
self.repair_all_sensors()
elif command == Commands.Quit:
self.repair_all_sensors()
rospy.sleep(0.5)
shutdown = True
print 'Faulty sensors: ', self.faulty_sensor_frames
rospy.sleep(0.5)
#~ def inject_fault(self, request):
#~ if request.frame_id in self.faulty_sensor_frames:
#~ return InjectFaultResponse(True)
#~ return InjectFaultResponse(False)
def print_instructions(self):
print 'Use the following keys:\ni for injecting a fault\nr for "repairing" the sensor (removing the fault)\na for repairing all sensors\nq to quit\n'
def read_character(self):
'''Code used from http://stackoverflow.com/questions/510357/python-read-a-single-character-from-the-user
'''
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
def read_command(self, character):
'''Returns an appropriate 'Commands' value based on the value of the input character.
'''
if character == 'i':
return Commands.InjectFault
elif character == 'r':
return Commands.RepairSensor
elif character == 'a':
return Commands.RepairAllSensors
elif character == 'q':
return Commands.Quit
print 'unknown command ', character
return Commands.Unknown
def read_fault_type(self, character):
'''Returns an appropriate 'FaultTypes' based on the value of the input character.
'''
if character == 'p':
return FaultTypes.Permanent
elif character == 't':
return FaultTypes.Transient
print 'unknown fault type; injecting permanent fault'
return FaultTypes.Permanent
def manage_sensor(self, command, fault_type=None):
'''Publishes 'InjectFault' messages for injecting faults to sensors
or repairing sensor faults based on 'command' and 'fault_type'.
The sensor that should be repeared/to which a fault will be injected
is provided by the user from the command line.
Keyword arguments:
command -- A 'Commands' value.
fault_type -- A 'FaultTypes' value (default None, which means that we are repairing sensors).
'''
sensor_frame = raw_input('Please enter the name of a sensor frame\n')
if command == Commands.InjectFault:
if sensor_frame not in self.faulty_sensor_frames:
fault_msg = InjectFault()
fault_msg.frame_id = sensor_frame
fault_msg.inject_fault = True
self.fault_message_publisher.publish(fault_msg)
self.faulty_sensor_frames.append(sensor_frame)
if fault_type == FaultTypes.Transient:
self.sensor_frames_to_remove.append(sensor_frame)
else:
print 'Faults are already being injected to this sensor'
if command == Commands.RepairSensor:
if sensor_frame in self.faulty_sensor_frames:
fault_msg = InjectFault()
fault_msg.frame_id = sensor_frame
fault_msg.inject_fault = False
self.fault_message_publisher.publish(fault_msg)
self.faulty_sensor_frames.remove(sensor_frame)
else:
print 'Faults have not been injected to this sensor; ignoring command'
def repair_transient_faults(self):
'''Repairs all sensors that are currently in 'self.sensor_frames_to_remove'
by publishing an appropriate 'InjectFault' message.
'''
for sensor_frame in self.sensor_frames_to_remove:
fault_msg = InjectFault()
fault_msg.frame_id = sensor_frame
fault_msg.inject_fault = False
self.fault_message_publisher.publish(fault_msg)
self.faulty_sensor_frames.remove(sensor_frame)
self.sensor_frames_to_remove[:] = []
print 'Faulty sensors: ', self.faulty_sensor_frames
def repair_all_sensors(self):
'''Repairs all sensors by sending appropriate 'InjectFault' commands.
'''
for sensor_frame in self.faulty_sensor_frames:
fault_msg = InjectFault()
fault_msg.frame_id = sensor_frame
fault_msg.inject_fault = False
self.fault_message_publisher.publish(fault_msg)
self.faulty_sensor_frames[:] = []
if __name__ == '__main__':
rospy.init_node('fault_injector')
try:
FaultInjectorNode()
except rospy.ROSInterruptException: pass
| mit | 3,365,828,375,489,635,000 | 37.140351 | 157 | 0.616069 | false |
infobip/infobip-api-python-client | infobip/api/model/omni/logs/OmniLog.py | 1 | 5654 | # -*- coding: utf-8 -*-
"""This is a generated class and is not intended for modification!
"""
from datetime import datetime
from infobip.util.models import DefaultObject, serializable
from infobip.api.model.omni.Price import Price
from infobip.api.model.omni.Status import Status
from infobip.api.model.omni.OmniChannel import OmniChannel
class OmniLog(DefaultObject):
@property
@serializable(name="bulkId", type=unicode)
def bulk_id(self):
"""
Property is of type: unicode
"""
return self.get_field_value("bulk_id")
@bulk_id.setter
def bulk_id(self, bulk_id):
"""
Property is of type: unicode
"""
self.set_field_value("bulk_id", bulk_id)
def set_bulk_id(self, bulk_id):
self.bulk_id = bulk_id
return self
@property
@serializable(name="messageId", type=unicode)
def message_id(self):
"""
Property is of type: unicode
"""
return self.get_field_value("message_id")
@message_id.setter
def message_id(self, message_id):
"""
Property is of type: unicode
"""
self.set_field_value("message_id", message_id)
def set_message_id(self, message_id):
self.message_id = message_id
return self
@property
@serializable(name="to", type=unicode)
def to(self):
"""
Property is of type: unicode
"""
return self.get_field_value("to")
@to.setter
def to(self, to):
"""
Property is of type: unicode
"""
self.set_field_value("to", to)
def set_to(self, to):
self.to = to
return self
@property
@serializable(name="from", type=unicode)
def from_(self):
"""
Property is of type: unicode
"""
return self.get_field_value("from_")
@from_.setter
def from_(self, from_):
"""
Property is of type: unicode
"""
self.set_field_value("from_", from_)
def set_from_(self, from_):
self.from_ = from_
return self
@property
@serializable(name="text", type=unicode)
def text(self):
"""
Property is of type: unicode
"""
return self.get_field_value("text")
@text.setter
def text(self, text):
"""
Property is of type: unicode
"""
self.set_field_value("text", text)
def set_text(self, text):
self.text = text
return self
@property
@serializable(name="sentAt", type=datetime)
def sent_at(self):
"""
Property is of type: datetime
"""
return self.get_field_value("sent_at")
@sent_at.setter
def sent_at(self, sent_at):
"""
Property is of type: datetime
"""
self.set_field_value("sent_at", sent_at)
def set_sent_at(self, sent_at):
self.sent_at = sent_at
return self
@property
@serializable(name="doneAt", type=datetime)
def done_at(self):
"""
Property is of type: datetime
"""
return self.get_field_value("done_at")
@done_at.setter
def done_at(self, done_at):
"""
Property is of type: datetime
"""
self.set_field_value("done_at", done_at)
def set_done_at(self, done_at):
self.done_at = done_at
return self
@property
@serializable(name="messageCount", type=int)
def message_count(self):
"""
Property is of type: int
"""
return self.get_field_value("message_count")
@message_count.setter
def message_count(self, message_count):
"""
Property is of type: int
"""
self.set_field_value("message_count", message_count)
def set_message_count(self, message_count):
self.message_count = message_count
return self
@property
@serializable(name="mccMnc", type=unicode)
def mcc_mnc(self):
"""
Property is of type: unicode
"""
return self.get_field_value("mcc_mnc")
@mcc_mnc.setter
def mcc_mnc(self, mcc_mnc):
"""
Property is of type: unicode
"""
self.set_field_value("mcc_mnc", mcc_mnc)
def set_mcc_mnc(self, mcc_mnc):
self.mcc_mnc = mcc_mnc
return self
@property
@serializable(name="price", type=Price)
def price(self):
"""
Property is of type: Price
"""
return self.get_field_value("price")
@price.setter
def price(self, price):
"""
Property is of type: Price
"""
self.set_field_value("price", price)
def set_price(self, price):
self.price = price
return self
@property
@serializable(name="status", type=Status)
def status(self):
"""
Property is of type: Status
"""
return self.get_field_value("status")
@status.setter
def status(self, status):
"""
Property is of type: Status
"""
self.set_field_value("status", status)
def set_status(self, status):
self.status = status
return self
@property
@serializable(name="channel", type=OmniChannel)
def channel(self):
"""
Property is of type: OmniChannel
"""
return self.get_field_value("channel")
@channel.setter
def channel(self, channel):
"""
Property is of type: OmniChannel
"""
self.set_field_value("channel", channel)
def set_channel(self, channel):
self.channel = channel
return self | apache-2.0 | -4,938,123,351,956,362,000 | 22.5625 | 66 | 0.556951 | false |
Amarchuk/2FInstability | core/n1167.py | 1 | 24998 | __author__ = 'amarch'
# -*- coding: utf-8 -*-
import shutil
from core.main import *
def correctGasData(r_g1, v_g1, dv_g1):
'''Функция, куда убраны все операции подгонки с данными по газу.'''
r_g = r_g1
v_g = v_g1
dv_g = dv_g1
#Если необходимо выпрямить апроксимацию на краю - можно добавить несколько последних точек,
#это должно помочь сгладить. Или обрезать по upperBord.
# upperBord = 200
# r_g, v_g, dv_g = zip(*(filter(lambda x: x[0] < upperBord, zip(r_g, v_g, dv_g))))
# r_g = list(r_g)
# v_g = list(v_g)
# dv_g = list(dv_g)
# multiplate = 5
# addition_points = 2
# r_points = heapq.nlargest(addition_points, r_g)
# v_points = []
# dv_points = []
# for po in r_points:
# v_points.append(v_g[r_g.index(po)])
# dv_points.append(dv_g[r_g.index(po)])
# r_g = r_g + [i[0] + scale * i[1] for i in zip(r_points * multiplate, range(1, multiplate * addition_points + 1))]
# v_g = v_g + v_points * multiplate
# dv_g = dv_g + dv_points * multiplate
# correction = lstqBend(r_g, v_g)
correction = 4952/math.sin(36*math.pi/180)
v_g = map(lambda x: abs(x - correction), v_g)
r_g, v_g, dv_g = map(list, zip(*sorted(zip(r_g, v_g, dv_g))))
# add_points = 5
# r_points = [32]
# v_points = [285]
# dv_points = [1]
#
# r_g = r_g + [i[0] + scale * i[1] for i in zip(r_points * add_points, range(1, add_points + 1))]
# v_g = v_g + v_points * add_points
# dv_g = dv_g + dv_points * add_points
#
# add_points = 54
# r_points = [46]
# v_points = [268]
# dv_points = [1]
#
# r_g = r_g + [i[0] + scale * i[1] for i in zip(r_points * add_points, range(1, add_points + 1))]
# v_g = v_g + v_points * add_points
# dv_g = dv_g + dv_points * add_points
return r_g, v_g, dv_g
def correctStarData(r_ma1, v_ma1, dv_ma1):
'''Корректировка данных по звездам.'''
r_ma = r_ma1
v_ma = v_ma1
dv_ma = dv_ma1
#Если необходимо выпрямить апроксимацию на краю - можно добавить несколько последних точек,
#это должно помочь сгладить. Или обрезать по upperBord.
# upperBord = 3000
# r_ma, v_ma = zip(*(filter(lambda x: x[0] < upperBord, zip(r_ma, v_ma))))
# r_ma = list(r_ma)
# v_ma = list(v_ma)
#
# multiplate = 5
# addition_points = 3
# r_points = heapq.nlargest(addition_points, r_ma)
# v_points = []
# dv_points = []
# for po in r_points:
# v_points.append(v_ma[r_ma.index(po)])
# dv_points.append(dv_ma[r_ma.index(po)])
# r_ma = r_ma + [i[0] + scale * i[1] for i in zip(r_points * multiplate, range(1, multiplate * addition_points + 1))]
# v_ma = v_ma + v_points * multiplate
# dv_ma = dv_ma + dv_points * multiplate
add_points = 50
r_points = [36]
v_points = [340]
dv_points = [2]
r_ma = r_ma + [i[0] + scale * i[1] for i in zip(r_points * add_points, range(1, add_points + 1))]
v_ma = v_ma + v_points * add_points
dv_ma = dv_ma + dv_points * add_points
return r_ma, v_ma, dv_ma
def correctSigmaLosMaj(r_ma1, sig_los_ma1, dsig_los_ma1):
'''Корректируем данные по дисперсии скоростей вдоль главной оси. '''
# Если не сошлось - надо исправить начальное приближение гауссианы ниже:
x0 = array([0, 100, 5, 100])
# на случай если данные из разных источников в одном файле
r_ma, sig_los_ma, dsig_los_ma = map(list, zip(*sorted(zip(r_ma1, sig_los_ma1, dsig_los_ma1))))
# Можно обрезать в случае плохих краев
# r_ma = r_ma[1:-1]
# sig_los_ma = sig_los_ma[1:-1]
# dsig_los_ma = dsig_los_ma[1:-1]
# #Если необходимо выпрямить апроксимацию на краю - можно добавить несколько последних точек,
# #это должно помочь сгладить.
#
# multiplate = 10
# addition_points = 1
# r_points = heapq.nlargest(addition_points, r_ma)
# sig_points = []
# dsig_points = []
# for po in r_points:
# sig_points.append(sig_los_ma[r_ma.index(po)])
# dsig_points.append(dsig_los_ma[r_ma.index(po)])
# r_ma = r_ma + [i[0] + scale * i[1] for i in
# zip(r_points * multiplate, arange(1, 3 * (multiplate * addition_points) + 1, 3))]
# sig_los_ma = sig_los_ma + sig_points * multiplate
# dsig_los_ma = dsig_los_ma + dsig_points * multiplate
add_points = 90
r_points = [7]
v_points = [238]
dv_points = [1]
# Экспоненциальные точки
r_ma = r_ma + [i[0] + i[1] for i in zip(r_points * add_points, arange(1, add_points + 1, 1))]
sig_los_ma = sig_los_ma + [220 * math.exp(-x / 43.0) for x in
[i[0] + i[1] for i in zip(r_points * add_points, arange(1, add_points + 1, 1))]]
dsig_los_ma = dsig_los_ma + dv_points * add_points
return r_ma, sig_los_ma, dsig_los_ma, x0
def correctSigmaLosMin(r_ma1, sig_los_ma1, dsig_los_ma1):
'''Корректируем данные по дисперсии скоростей вдоль главной оси. '''
r_ma, sig_los_ma, dsig_los_ma = map(list, zip(*sorted(zip(r_ma1, sig_los_ma1, dsig_los_ma1))))
# Можно обрезать в случае плохих краев
# r_ma = r_ma[1:-1]
# sig_los_ma = sig_los_ma[1:-1]
# dsig_los_ma = dsig_los_ma[1:-1]
# Если не сошлось - надо исправить начальное приближение гауссианы ниже:
x0 = array([0, 10, 5, 10])
#Если необходимо выпрямить апроксимацию на краю - можно добавить несколько последних точек,
#это должно помочь сгладить.
# multiplate = 10
# addition_points = 1
# r_points = heapq.nlargest(addition_points, r_ma)
# sig_points = []
# dsig_points = []
# for po in r_points:
# sig_points.append(sig_los_ma[r_ma.index(po)])
# dsig_points.append(dsig_los_ma[r_ma.index(po)])
# r_ma = r_ma + [i[0] + scale * i[1] for i in
# zip(r_points * multiplate, arange(1, 5 * (multiplate * addition_points) + 1, 5))]
# sig_los_ma = sig_los_ma + sig_points * multiplate
# dsig_los_ma = dsig_los_ma + dsig_points * multiplate
return r_ma, sig_los_ma, dsig_los_ma, x0
startTime = time.time()
if __name__ == "__main__":
plt.rcParams.update({'font.size': 16})
path = '/home/amarch/Documents/RotationCurves/Diploma/TwoFluidInstAllDataFromSotn17Feb/Sample/RC/U2487_N1167'
name = 'U2487_N1167'
incl = 36
scale = 1
resolution = 330 #pc/arcsec
h_disc = 24.2 # R-band
M_R = 11.69
M_B = 13.40
mu0_c_R = 20.12
r_eff_bulge = 6.7
pol_degree_star = 15
pol_degree_gas = 8
sig_pol_deg = 10
sig_pol_deg_mi = 8
Rmin = 29
Rmax = 75
gas_corr_by_incl = False
M_to_L = mass_to_light(M_B - M_R)
di = 2
monte_carlo_realizations = 1
peculiarities = [59,61]
maxDisc = 4
sig_wings = r_eff_bulge # откуда крылья для дисперсий фитировать
use_minor = False # используется ли дисперсия по малой оси
if not os.path.exists(path+'/EQUAL_BELL/'):
os.makedirs(path+'/EQUAL_BELL/')
else:
for f in os.listdir(path+'/EQUAL_BELL/'):
os.remove(path+'/EQUAL_BELL/'+f)
shutil.copy2(path+'/v_stars_ma.dat', path+'/EQUAL_BELL/v_stars_ma.dat')
shutil.copy2(path+'/v_gas_ma.dat', path+'/EQUAL_BELL/v_gas_ma.dat')
shutil.copy2(path+'/gas_density.dat', path+'/EQUAL_BELL/gas_density.dat')
if os.path.exists(path+'/v_stars_mi.dat'):
shutil.copy2(path+'/v_stars_mi.dat', path+'/EQUAL_BELL/v_stars_mi.dat')
#EQUAL и Белл
mainf(PATH=path+'/EQUAL_BELL',
NAME=name,
INCL=incl,
SCALE=scale,
RESOLUTION=resolution,
H_DISC=h_disc,
MR=M_R,
MB=M_B,
MU0=mu0_c_R,
R_EFF_B=r_eff_bulge,
DEG_STAR=pol_degree_star,
DEG_GAS=pol_degree_gas,
SIG_MA_DEG=sig_pol_deg,
SIG_MI_DEG=sig_pol_deg_mi,
RMIN=Rmin,
RMAX=Rmax,
GAS_CORR=gas_corr_by_incl,
M_TO_L=M_to_L,
DI=di,
MONTE_CARLO=monte_carlo_realizations,
CORRECTION_GAS=correctGasData,
CORRECTION_STAR=correctStarData,
CORRECTION_SIG_MA=correctSigmaLosMaj,
CORRECTION_SIG_MI=correctSigmaLosMin,
SURF_DENS_STAR=surfaceDensityStarR,
METHOD='EQUAL',
PECULIARITIES=peculiarities,
SIG_WINGS = sig_wings, USE_MINOR = use_minor, RUN=1)
renameFilesByMethod(path+'/EQUAL_BELL/', 'EQUAL_BELL')
if not os.path.exists(path+'/HALF_MAX/'):
os.makedirs(path+'/HALF_MAX/')
else:
for f in os.listdir(path+'/HALF_MAX/'):
os.remove(path+'/HALF_MAX/'+f)
shutil.copy2(path+'/v_stars_ma.dat', path+'/HALF_MAX/v_stars_ma.dat')
shutil.copy2(path+'/v_gas_ma.dat', path+'/HALF_MAX/v_gas_ma.dat')
shutil.copy2(path+'/gas_density.dat', path+'/HALF_MAX/gas_density.dat')
if os.path.exists(path+'/v_stars_mi.dat'):
shutil.copy2(path+'/v_stars_mi.dat', path+'/HALF_MAX/v_stars_mi.dat')
#HALF и Макс. диск
mainf(PATH=path+'/HALF_MAX',
NAME=name,
INCL=incl,
SCALE=scale,
RESOLUTION=resolution,
H_DISC=h_disc,
MR=M_R,
MB=M_B,
MU0=mu0_c_R,
R_EFF_B=r_eff_bulge,
DEG_STAR=pol_degree_star,
DEG_GAS=pol_degree_gas,
SIG_MA_DEG=sig_pol_deg,
SIG_MI_DEG=sig_pol_deg_mi,
RMIN=Rmin,
RMAX=Rmax,
GAS_CORR=gas_corr_by_incl,
M_TO_L=maxDisc,
DI=di,
MONTE_CARLO=monte_carlo_realizations,
CORRECTION_GAS=correctGasData,
CORRECTION_STAR=correctStarData,
CORRECTION_SIG_MA=correctSigmaLosMaj,
CORRECTION_SIG_MI=correctSigmaLosMin,
SURF_DENS_STAR=surfaceDensityStarR,
METHOD='HALF',
PECULIARITIES=peculiarities,
SIG_WINGS = sig_wings, USE_MINOR = use_minor, RUN=2)
renameFilesByMethod(path+'/HALF_MAX/', 'HALF_MAX')
if not os.path.exists(path+'/HALF_BELL/'):
os.makedirs(path+'/HALF_BELL/')
else:
for f in os.listdir(path+'/HALF_BELL/'):
os.remove(path+'/HALF_BELL/'+f)
shutil.copy2(path+'/v_stars_ma.dat', path+'/HALF_BELL/v_stars_ma.dat')
shutil.copy2(path+'/v_gas_ma.dat', path+'/HALF_BELL/v_gas_ma.dat')
shutil.copy2(path+'/gas_density.dat', path+'/HALF_BELL/gas_density.dat')
if os.path.exists(path+'/v_stars_mi.dat'):
shutil.copy2(path+'/v_stars_mi.dat', path+'/HALF_BELL/v_stars_mi.dat')
#HALF и Белл
mainf(PATH=path+'/HALF_BELL',
NAME=name,
INCL=incl,
SCALE=scale,
RESOLUTION=resolution,
H_DISC=h_disc,
MR=M_R,
MB=M_B,
MU0=mu0_c_R,
R_EFF_B=r_eff_bulge,
DEG_STAR=pol_degree_star,
DEG_GAS=pol_degree_gas,
SIG_MA_DEG=sig_pol_deg,
SIG_MI_DEG=sig_pol_deg_mi,
RMIN=Rmin,
RMAX=Rmax,
GAS_CORR=gas_corr_by_incl,
M_TO_L=M_to_L,
DI=di,
MONTE_CARLO=monte_carlo_realizations,
CORRECTION_GAS=correctGasData,
CORRECTION_STAR=correctStarData,
CORRECTION_SIG_MA=correctSigmaLosMaj,
CORRECTION_SIG_MI=correctSigmaLosMin,
SURF_DENS_STAR=surfaceDensityStarR,
METHOD='HALF',
PECULIARITIES=peculiarities,
SIG_WINGS = sig_wings, USE_MINOR = use_minor, RUN=3)
renameFilesByMethod(path+'/HALF_BELL/', 'HALF_BELL')
if not os.path.exists(path+'/EQUAL_MAX/'):
os.makedirs(path+'/EQUAL_MAX/')
else:
for f in os.listdir(path+'/EQUAL_MAX/'):
os.remove(path+'/EQUAL_MAX/'+f)
shutil.copy2(path+'/v_stars_ma.dat', path+'/EQUAL_MAX/v_stars_ma.dat')
shutil.copy2(path+'/v_gas_ma.dat', path+'/EQUAL_MAX/v_gas_ma.dat')
shutil.copy2(path+'/gas_density.dat', path+'/EQUAL_MAX/gas_density.dat')
if os.path.exists(path+'/v_stars_mi.dat'):
shutil.copy2(path+'/v_stars_mi.dat', path+'/EQUAL_MAX/v_stars_mi.dat')
#EQUAL и Макс диск
mainf(PATH=path+'/EQUAL_MAX',
NAME=name,
INCL=incl,
SCALE=scale,
RESOLUTION=resolution,
H_DISC=h_disc,
MR=M_R,
MB=M_B,
MU0=mu0_c_R,
R_EFF_B=r_eff_bulge,
DEG_STAR=pol_degree_star,
DEG_GAS=pol_degree_gas,
SIG_MA_DEG=sig_pol_deg,
SIG_MI_DEG=sig_pol_deg_mi,
RMIN=Rmin,
RMAX=Rmax,
GAS_CORR=gas_corr_by_incl,
M_TO_L=maxDisc,
DI=di,
MONTE_CARLO=monte_carlo_realizations,
CORRECTION_GAS=correctGasData,
CORRECTION_STAR=correctStarData,
CORRECTION_SIG_MA=correctSigmaLosMaj,
CORRECTION_SIG_MI=correctSigmaLosMin,
SURF_DENS_STAR=surfaceDensityStarR,
METHOD='EQUAL',
PECULIARITIES=peculiarities,
SIG_WINGS = sig_wings, USE_MINOR = use_minor, RUN=4)
renameFilesByMethod(path+'/EQUAL_MAX/', 'EQUAL_MAX')
# #Логгирование в файл
# sys.stdout = Tee(path + "/log_" + name + '.txt', 'w')
#
# # Работа с фотометрией в I полосе.
# poly_star, poly_gas, star_data, gas_data = bendStarRC(correctGasData, correctStarData, path, incl, 0.0, False,
# pol_degree_star, pol_degree_gas, name,
# scale, gas_corr_by_incl, False)
# h_disc *= scale
# R1, R2 = correctDistanceInterval(path, scale)
# R1 = 10
# R2 = 75
# evaluateSigLosWingsExpScale(path, r_eff_bulge)
# sigLosGaussParams, sigMajData = fitGaussSigLosMaj(correctSigmaLosMaj, path, scale, incl)
# sigLosPolyParams = fitPolySigLosMaj(correctSigmaLosMaj, path, scale, incl, sig_pol_deg, False, min(Rmax, R2))
# sigLosSinhParams = fitSechSigLosMaj(correctSigmaLosMaj, path, scale, incl)
## sigLosGaussParamsMi, sigMiData = fitGaussSigLosMin(correctSigmaLosMin, path, scale, incl)
## sigLosPolyParamsMi = fitPolySigLosMin(correctSigmaLosMin, path, scale, incl, sig_pol_deg_mi, False, min(Rmax, R2))
# eval_SigPhi_to_sigR(poly_star, R1, R2, (R2 - R1) / 1000.0, path)
# evalEpyciclicFreq(poly_gas, arange(R1 + 2, R2, 0.1), path, resolution, h_disc)
# #M_to_L = mass_to_light_Iband(M_B - M_R)
# print '#!!!!!!!!!!!!# Mass-to-light ratio in I band (M/L) = ', M_to_L
# plotSurfDens(M_to_L, h_disc, mu0_c_R, 0, Rmax, 0.1, path, surfaceDensityStarR)
# gas_sf_data = surfaceDensityGas(path)
#
# r_surfd_gas = gas_sf_data[0]
# r_star_and_gas = list(arange(Rmin, Rmax, 0.1)) + r_surfd_gas
# r_star_and_gas.sort()
# # r_star_and_gas = filter(lambda x: ((x <= min(Rmax, R2)) & (x >= max(Rmin, R1))), r_star_and_gas)
# # r_surfd_gas = filter(lambda x: ((x <= min(Rmax, R2)) & (x >= max(Rmin, R1, r_eff_bulge))), r_surfd_gas)
# r_star_and_gas = filter(lambda x: x > r_eff_bulge, r_star_and_gas)
# r_surfd_gas = filter(lambda x: x > r_eff_bulge, r_surfd_gas)
#
# ratioSVEfromSigma(r_star_and_gas, h_disc, path, poly_star, sigLosPolyParams, sigLosPolyParamsMi, 100, incl)
# SVEfunction = simpleSVEfromSigma
# # SVEfunction = simpleSVEwhenPhiEqualsZ
# sig_R2, sig_Phi2, sig_Z2 = SVEfunction(r_star_and_gas, h_disc, path, poly_star, sigMajData,
# sigLosPolyParams, 0.5, 71, incl)
#
# # h_kin, sigR2 = asymmetricDriftEvaluation(r_star_and_gas, h_disc, path, poly_star, poly_gas, 91)
# # sigZ2, sigPhi2 = velosityEllipsoid(h_disc,r_star_and_gas, sigR2, path, incl, sigLosPolyParams, poly_star)
#
#
# # Решаем гравнеустойчивость для точек, где есть данные по газовой плотности
# star_density = [surfaceDensityStarI(M_to_L, h_disc, R, mu0_c_I) for R in r_surfd_gas]
# gas_density = [gas_sf_data[1][gas_sf_data[0].index(R)] for R in r_surfd_gas]
# sigma_corr_gas = [math.sqrt(sig_R2[r_star_and_gas.index(R)]) for R in r_surfd_gas]
# Qeffs = findTwoFluidQeffs(r_surfd_gas, poly_gas, gas_density, star_density, sigma_corr_gas, path, resolution, 60.0)
# hydroQeffs = findTwoFluidHydroQeffs(r_surfd_gas, poly_gas, gas_density, star_density, sigma_corr_gas, path,
# resolution, 60.0)
# hzGas = [zGas(R[1], R[2], resolution) / 2 for R in zip(r_surfd_gas, star_density, gas_density)]
# sigmaZgas = [math.sqrt(sig_Z2[r_star_and_gas.index(R)]) for R in r_surfd_gas]
# hzStar = [zStar(R[1], R[2], resolution, R[3]) / 2 for R in zip(r_surfd_gas, star_density, gas_density, sigmaZgas)]
# plotVerticalScale(star_density, gas_density, resolution, sigmaZgas, r_surfd_gas, path)
# discQeffs = findTwoFluidWithDiscQeffs(r_surfd_gas, poly_gas, gas_density, star_density, sigma_corr_gas, path,
# resolution, hzStar, hzGas, 60.0)
# Qeffs1F = findOneFluidQeffs(r_surfd_gas, poly_gas, gas_density, star_density, sigma_corr_gas, path, resolution,
# 60.0)
#
# # Смотрим, как отразится уменьшение толщины диска в два раза.
# hzStar = [hzs / 2 for hzs in hzStar]
# discQeffs_3 = findTwoFluidWithDiscQeffs(r_surfd_gas, poly_gas, gas_density, star_density, sigma_corr_gas, path,
# resolution, hzStar, hzGas, 60.0)
# # Смотрим, какие результаты в случае однородно толстого диска 0.2h
# # hzStar = [0.1 * h_disc] * r_surfd_gas.__len__()
# hzStar = [0.5] * r_surfd_gas.__len__()
# discQeffs_4 = findTwoFluidWithDiscQeffs(r_surfd_gas, poly_gas, gas_density, star_density, sigma_corr_gas, path,
# resolution, hzStar, hzGas, 60.0)
#
#
# # То же для другого угла - чтобы понять зависимость от угла
# incl = incl + di
#
# poly_star1, poly_gas1, star_data1, gas_data1 = bendStarRC(correctGasData, correctStarData, path, incl, 0.0, False,
# pol_degree_star, pol_degree_gas, name, scale, gas_corr_by_incl, False)
# sigLosPolyParams1 = fitPolySigLosMaj(correctSigmaLosMaj, path, scale, incl, sig_pol_deg, False, min(Rmax, R2))
# eval_SigPhi_to_sigR(poly_star1, R1, R2, 0.1, path)
# evalEpyciclicFreq(poly_gas1, arange(R1 + 2, R2, 0.1), path, resolution, h_disc)
# sig_R2_1, sig_Phi2_1, sig_Z2_1 = SVEfunction(r_star_and_gas, h_disc, path, poly_star, sigMajData,
# sigLosPolyParams, 0.5, 71, incl)
# sigma_corr_gas_1 = [math.sqrt(sig_R2_1[r_star_and_gas.index(R)]) for R in r_surfd_gas]
# Qeffs_1 = findTwoFluidQeffs(r_surfd_gas, poly_gas1, gas_density, star_density, sigma_corr_gas_1, path, resolution,
# 60.0)
# hydroQeffs_1 = findTwoFluidHydroQeffs(r_surfd_gas, poly_gas1, gas_density, star_density, sigma_corr_gas_1, path,
# resolution, 60.0)
# sigmaZgas = [math.sqrt(sig_Z2_1[r_star_and_gas.index(R)]) for R in r_surfd_gas]
# hzStar = [zStar(R[1], R[2], resolution, R[3]) / 2 for R in zip(r_surfd_gas, star_density, gas_density, sigmaZgas)]
# discQeffs_1 = findTwoFluidWithDiscQeffs(r_surfd_gas, poly_gas1, gas_density, star_density, sigma_corr_gas_1, path,
# resolution, hzStar, hzGas, 60.0)
# Qeffs1F_1 = findOneFluidQeffs(r_surfd_gas, poly_gas1, gas_density, star_density, sigma_corr_gas_1, path, resolution,
# 60.0)
#
# # То же для другого угла
# incl = incl - 2 * di
#
# poly_star2, poly_gas2, star_data2, gas_data2 = bendStarRC(correctGasData, correctStarData, path, incl, 0.0, False,
# pol_degree_star, pol_degree_gas, name,
# scale, gas_corr_by_incl, False)
# sigLosPolyParams2 = fitPolySigLosMaj(correctSigmaLosMaj, path, scale, incl, sig_pol_deg, False, min(Rmax, R2))
# eval_SigPhi_to_sigR(poly_star2, R1, R2, 0.1, path)
# evalEpyciclicFreq(poly_gas2, arange(R1 + 2, R2, 0.1), path, resolution, h_disc)
# sig_R2_2, sig_Phi2_2, sig_Z2_2 = SVEfunction(r_star_and_gas, h_disc, path, poly_star, sigMajData,
# sigLosPolyParams, 0.5, 71, incl)
# sigma_corr_gas_2 = [math.sqrt(sig_R2_2[r_star_and_gas.index(R)]) for R in r_surfd_gas]
# Qeffs_2 = findTwoFluidQeffs(r_surfd_gas, poly_gas2, gas_density, star_density, sigma_corr_gas_2, path, resolution,
# 60.0)
# hydroQeffs_2 = findTwoFluidHydroQeffs(r_surfd_gas, poly_gas2, gas_density, star_density, sigma_corr_gas_2, path,
# resolution, 60.0)
# sigmaZgas = [math.sqrt(sig_Z2_2[r_star_and_gas.index(R)]) for R in r_surfd_gas]
# hzStar = [zStar(R[1], R[2], resolution, R[3]) / 2 for R in zip(r_surfd_gas, star_density, gas_density, sigmaZgas)]
# discQeffs_2 = findTwoFluidWithDiscQeffs(r_surfd_gas, poly_gas2, gas_density, star_density, sigma_corr_gas_2, path,
# resolution, hzStar, hzGas, 60.0)
# Qeffs1F_2 = findOneFluidQeffs(r_surfd_gas, poly_gas2, gas_density, star_density, sigma_corr_gas_2, path, resolution,
# 60.0)
#
# # Монте-Карло реализации в количестве monte_carlo_realizations штук.
#
# incl = incl + di
# sigR2_list = [sig_R2]
# sigZ2_list = [sig_Z2]
# sigPhi2_list = [sig_Phi2]
# Qeffs_list = [zip(*Qeffs)[2]]
# hydroQeffs_list = [zip(*hydroQeffs)[2]]
# discQeffs_list = [zip(*discQeffs)[2]]
# Qeffs1F_list = [Qeffs1F]
# MC_iter = 1
#
# while MC_iter < monte_carlo_realizations:
# MC_iter += 1
# print '#!!!!!!!!!!!!# Monte-Carlo iterration number ', MC_iter
# poly_star_mc, poly_gas_mc, star_data_mc, gas_data_mc = bendStarRC(correctGasData, correctStarData, path, incl,
# 0.0, False, pol_degree_star, pol_degree_gas, name, scale, gas_corr_by_incl, True)
# sigLosPolyParams_mc = fitPolySigLosMaj(correctSigmaLosMaj, path, scale, incl, sig_pol_deg, True, min(Rmax, R2))
# eval_SigPhi_to_sigR(poly_star_mc, R1, R2, 0.1, path)
# evalEpyciclicFreq(poly_gas_mc, arange(R1 + 2, R2, 0.1), path, resolution, h_disc)
# sig_R2_mc, sig_Phi2_mc, sig_Z2_mc = SVEfunction(r_star_and_gas, h_disc, path, poly_star, sigMajData,
# sigLosPolyParams, 0.5, 71, incl)
# sigma_corr_gas_mc = [math.sqrt(sig_R2_mc[r_star_and_gas.index(R)]) for R in r_surfd_gas]
# Qeffs_mc = findTwoFluidQeffs(r_surfd_gas, poly_gas_mc, gas_density, star_density, sigma_corr_gas_mc, path,
# resolution, 60.0)
# hydroQeffs_mc = findTwoFluidHydroQeffs(r_surfd_gas, poly_gas_mc, gas_density, star_density, sigma_corr_gas_mc,
# path,
# resolution, 60.0)
# sigmaZgas_mc = [math.sqrt(sig_Z2_mc[r_star_and_gas.index(R)]) for R in r_surfd_gas]
# hzStar_mc = [zStar(R[1], R[2], resolution, R[3]) / 2 for R in
# zip(r_surfd_gas, star_density, gas_density, sigmaZgas_mc)]
# discQeffs_mc = findTwoFluidWithDiscQeffs(r_surfd_gas, poly_gas_mc, gas_density, star_density, sigma_corr_gas_mc,
# path,
# resolution, hzStar_mc, hzGas, 60.0)
# Qeffs1F_mc = findOneFluidQeffs(r_surfd_gas, poly_gas_mc, gas_density, star_density, sigma_corr_gas_mc, path,
# resolution,
# 60.0)
# sigR2_list.append(sig_R2_mc)
# sigZ2_list.append(sig_Z2_mc)
# sigPhi2_list.append(sig_Phi2_mc)
# Qeffs_list.append(zip(*Qeffs_mc)[2])
# hydroQeffs_list.append(zip(*hydroQeffs_mc)[2])
# discQeffs_list.append(zip(*discQeffs_mc)[2])
# Qeffs1F_list.append(Qeffs1F_mc)
#
# plotFinalPics(path, poly_star, poly_gas, di, star_data, gas_data, incl, resolution, h_disc, r_eff_bulge,
# sigMajData, sigLosGaussParams, sigLosPolyParams, sigLosSinhParams, r_surfd_gas,
# zip(Qeffs1F, Qeffs1F_1, Qeffs1F_2) + Qeffs1F_list,
# zip(zip(*hydroQeffs)[2], zip(*hydroQeffs_1)[2], zip(*hydroQeffs_2)[2]) + hydroQeffs_list,
# zip(zip(*Qeffs)[2], zip(*Qeffs_1)[2], zip(*Qeffs_2)[2]) + Qeffs_list,
# zip(zip(*discQeffs)[2], zip(*discQeffs_1)[2], zip(*discQeffs_2)[2], zip(*discQeffs_3)[2], zip(*discQeffs_4)[2])
# + discQeffs_list,
# r_star_and_gas,
# zip(sig_R2, sig_R2_1, sig_R2_2) + sigR2_list,
# zip(sig_Phi2, sig_Phi2_1, sig_Phi2_2) + sigPhi2_list,
# zip(sig_Z2, sig_Z2_1, sig_Z2_2) + sigZ2_list,
# hzStar)
# plt.show()
finishTime = time.time()
print '#!!!!!!!!!!!!# Time total: ', (finishTime - startTime), 's'
print '#!!!!!!!!!!!!# THE END'
| gpl-3.0 | 8,201,307,194,499,260,000 | 41.394643 | 124 | 0.609241 | false |
lahwaacz/wiki-scripts | ws/db/selects/__init__.py | 1 | 8229 | #!/usr/bin/env python3
from collections import OrderedDict
import sqlalchemy as sa
from .namespaces import *
from .interwiki import *
from .lists.recentchanges import *
from .lists.logevents import *
from .lists.allpages import *
from .lists.protectedtitles import *
from .lists.allrevisions import *
from .lists.alldeletedrevisions import *
from .lists.allusers import *
from .props.info import *
from .props.pageprops import *
from .props.revisions import *
from .props.deletedrevisions import *
from .props.templates import *
from .props.transcludedin import *
from .props.links import *
from .props.linkshere import *
from .props.images import *
from .props.categories import *
from .props.langlinks import *
from .props.iwlinks import *
from .props.extlinks import *
from .props.redirects import *
from .props.sections import *
__classes_lists = {
"recentchanges": RecentChanges,
"logevents": LogEvents,
"allpages": AllPages,
"protectedtitles": ProtectedTitles,
"allrevisions": AllRevisions,
"alldeletedrevisions": AllDeletedRevisions,
"allusers": AllUsers,
}
# TODO: generator=allpages works, check the others
__classes_generators = {
"recentchanges": RecentChanges,
"allpages": AllPages,
"protectedtitles": ProtectedTitles,
"allrevisions": AllRevisions,
"alldeletedrevisions": AllDeletedRevisions,
}
# MediaWiki's prop=revisions supports 3 modes:
# 1. multiple pages, but only the latest revision
# 2. single page, but all revisions
# 3. specifying revids
# Fuck it, let's have separate "latestrevisions" for mode 1...
__classes_props = {
"info": Info,
"pageprops": PageProps,
"latestrevisions": Revisions, # custom module
"revisions": Revisions,
"deletedrevisions": DeletedRevisions,
"templates": Templates,
"transcludedin": TranscludedIn,
"links": Links,
"linkshere": LinksHere,
"images": Images,
"categories": Categories,
"langlinks": LanguageLinks,
"iwlinks": InterwikiLinks,
"extlinks": ExternalLinks,
"redirects": Redirects,
"sections": Sections, # custom module
}
def list(db, params):
assert "list" in params
list = params.pop("list")
if list not in __classes_lists:
raise NotImplementedError("Module list={} is not implemented yet.".format(list))
s = __classes_lists[list](db)
# TODO: make sure that all parameters are used (i.e. when all modules take their parameters, params_copy should be empty)
list_params = s.filter_params(params)
s.set_defaults(list_params)
s.sanitize_params(list_params)
query = s.get_select(list_params)
# TODO: some lists like allrevisions should group the results per page like MediaWiki
result = s.execute_sql(query)
for row in result:
yield s.db_to_api(row)
result.close()
def get_pageset(db, titles=None, pageids=None):
"""
:param list titles: list of :py:class:`ws.parser_helpers.title.Title` objects
:param list pageids: list of :py:obj:`int` objects
"""
assert titles is not None or pageids is not None
assert titles is None or pageids is None
# join to get the namespace prefix
page = db.page
nss = db.namespace_starname
tail = page.outerjoin(nss, page.c.page_namespace == nss.c.nss_id)
s = sa.select([page.c.page_id, page.c.page_namespace, page.c.page_title, nss.c.nss_name])
if titles is not None:
ns_title_pairs = [(t.namespacenumber, t.dbtitle()) for t in titles]
s = s.where(sa.tuple_(page.c.page_namespace, page.c.page_title).in_(ns_title_pairs))
s = s.order_by(page.c.page_namespace.asc(), page.c.page_title.asc())
ex = sa.select([page.c.page_namespace, page.c.page_title])
ex = ex.where(sa.tuple_(page.c.page_namespace, page.c.page_title).in_(ns_title_pairs))
elif pageids is not None:
s = s.where(page.c.page_id.in_(pageids))
s = s.order_by(page.c.page_id.asc())
ex = sa.select([page.c.page_id])
ex = ex.where(page.c.page_id.in_(pageids))
return tail, s, ex
def query_pageset(db, params):
params_copy = params.copy()
# TODO: for the lack of better structure, we abuse the AllPages class for execution of titles= and pageids= queries
s = AllPages(db)
assert "titles" in params or "pageids" in params or "generator" in params
if "titles" in params:
titles = params_copy.pop("titles")
if isinstance(titles, str):
titles = {titles}
assert isinstance(titles, set)
titles = [db.Title(t) for t in titles]
tail, pageset, ex = get_pageset(db, titles=titles)
elif "pageids" in params:
pageids = params_copy.pop("pageids")
if isinstance(pageids, int):
pageids = {pageids}
assert isinstance(pageids, set)
tail, pageset, ex = get_pageset(db, pageids=pageids)
elif "generator" in params:
generator = params_copy.pop("generator")
if generator not in __classes_generators:
raise NotImplementedError("Module generator={} is not implemented yet.".format(generator))
s = __classes_generators[generator](db)
# TODO: make sure that all parameters are used (i.e. when all modules take their parameters, params_copy should be empty)
generator_params = s.filter_params(params_copy, generator=True)
s.set_defaults(generator_params)
s.sanitize_params(generator_params)
pageset, tail = s.get_pageset(generator_params)
# report missing pages (does not make sense for generators)
if "generator" not in params:
existing_pages = set()
result = s.execute_sql(ex)
for row in result:
if "titles" in params:
existing_pages.add((row.page_namespace, row.page_title))
elif "pageids" in params:
existing_pages.add(row.page_id)
if "titles" in params:
for t in titles:
if (t.namespacenumber, t.dbtitle()) not in existing_pages:
yield {"missing": "", "ns": t.namespacenumber, "title": t.dbtitle()}
elif "pageids" in params:
for p in pageids:
if p not in existing_pages:
yield {"missing": "", "pageid": p}
# fetch the pageset into an intermediate list
# TODO: query-continuation is probably needed for better efficiency
query = pageset.select_from(tail)
pages = OrderedDict() # for indexed access, like in MediaWiki
result = s.execute_sql(query)
for row in result:
entry = s.db_to_api(row)
pages[entry["pageid"]] = entry
result.close()
if "prop" in params:
prop = params_copy.pop("prop")
if isinstance(prop, str):
prop = {prop}
assert isinstance(prop, set)
for p in prop:
if p not in __classes_props:
raise NotImplementedError("Module prop={} is not implemented yet.".format(p))
_s = __classes_props[p](db)
if p == "latestrevisions":
prop_tail = _s.join_with_pageset(tail, enum_rev_mode=False)
else:
prop_tail = _s.join_with_pageset(tail)
prop_params = _s.filter_params(params_copy)
_s.set_defaults(prop_params)
prop_select, prop_tail = _s.get_select_prop(pageset, prop_tail, prop_params)
query = prop_select.select_from(prop_tail)
result = _s.execute_sql(query)
for row in result:
page = pages[row["page_id"]]
_s.db_to_api_subentry(page, row)
result.close()
yield from pages.values()
def query(db, params=None, **kwargs):
if params is None:
params = kwargs
elif not isinstance(params, dict):
raise ValueError("params must be dict or None")
elif kwargs and params:
raise ValueError("specifying 'params' and 'kwargs' at the same time is not supported")
if "list" in params:
return list(db, params)
elif "titles" in params or "pageids" in params or "generator" in params:
return query_pageset(db, params)
raise NotImplementedError("Unknown query: no recognizable parameter ({}).".format(params))
| gpl-3.0 | -5,223,906,679,752,857,000 | 35.573333 | 129 | 0.645036 | false |
chubbymaggie/angr | angr/storage/memory.py | 1 | 38942 | #!/usr/bin/env python
import logging
l = logging.getLogger("angr.storage.memory")
import claripy
from ..state_plugins.plugin import SimStatePlugin
from ..engines.vex.ccall import _get_flags
stn_map = { 'st%d' % n: n for n in xrange(8) }
tag_map = { 'tag%d' % n: n for n in xrange(8) }
class AddressWrapper(object):
"""
AddressWrapper is used in SimAbstractMemory, which provides extra meta information for an address (or a ValueSet
object) that is normalized from an integer/BVV/StridedInterval.
"""
def __init__(self, region, region_base_addr, address, is_on_stack, function_address):
"""
Constructor for the class AddressWrapper.
:param strregion: Name of the memory regions it belongs to.
:param int region_base_addr: Base address of the memory region
:param address: An address (not a ValueSet object).
:param bool is_on_stack: Whether this address is on a stack region or not.
:param int function_address: Related function address (if any).
"""
self.region = region
self.region_base_addr = region_base_addr
self.address = address
self.is_on_stack = is_on_stack
self.function_address = function_address
def __hash__(self):
return hash((self.region, self.address))
def __eq__(self, other):
return self.region == other.region and self.address == other.address
def __repr__(self):
return "<%s> %s" % (self.region, hex(self.address))
def to_valueset(self, state):
"""
Convert to a ValueSet instance
:param state: A state
:return: The converted ValueSet instance
"""
return state.se.VS(state.arch.bits, self.region, self.region_base_addr, self.address)
class RegionDescriptor(object):
"""
Descriptor for a memory region ID.
"""
def __init__(self, region_id, base_address, related_function_address=None):
self.region_id = region_id
self.base_address = base_address
self.related_function_address = related_function_address
def __repr__(self):
return "<%s - %#x>" % (
self.region_id,
self.related_function_address if self.related_function_address is not None else 0
)
class RegionMap(object):
"""
Mostly used in SimAbstractMemory, RegionMap stores a series of mappings between concrete memory address ranges and
memory regions, like stack frames and heap regions.
"""
def __init__(self, is_stack):
"""
Constructor
:param is_stack: Whether this is a region map for stack frames or not. Different strategies apply for stack
regions.
"""
self.is_stack = is_stack
# An AVLTree, which maps stack addresses to region IDs
self._address_to_region_id = AVLTree()
# A dict, which maps region IDs to memory address ranges
self._region_id_to_address = { }
#
# Properties
#
def __repr__(self):
return "RegionMap<%s>" % (
"S" if self.is_stack else "H"
)
@property
def is_empty(self):
return len(self._address_to_region_id) == 0
@property
def stack_base(self):
if not self.is_stack:
raise SimRegionMapError('Calling "stack_base" on a non-stack region map.')
return self._address_to_region_id.max_key()
@property
def region_ids(self):
return self._region_id_to_address.keys()
#
# Public methods
#
def copy(self):
r = RegionMap(is_stack=self.is_stack)
# A shallow copy should be enough, since we never modify any RegionDescriptor object in-place
if len(self._address_to_region_id) > 0:
# TODO: There is a bug in bintrees 2.0.2 that prevents us from copying a non-empty AVLTree object
# TODO: Consider submit a pull request
r._address_to_region_id = self._address_to_region_id.copy()
r._region_id_to_address = self._region_id_to_address.copy()
return r
def map(self, absolute_address, region_id, related_function_address=None):
"""
Add a mapping between an absolute address and a region ID. If this is a stack region map, all stack regions
beyond (lower than) this newly added regions will be discarded.
:param absolute_address: An absolute memory address.
:param region_id: ID of the memory region.
:param related_function_address: A related function address, mostly used for stack regions.
"""
if self.is_stack:
# Sanity check
if not region_id.startswith('stack_'):
raise SimRegionMapError('Received a non-stack memory ID "%d" in a stack region map' % region_id)
# Remove all stack regions that are lower than the one to add
while True:
try:
addr = self._address_to_region_id.floor_key(absolute_address)
descriptor = self._address_to_region_id[addr]
# Remove this mapping
del self._address_to_region_id[addr]
# Remove this region ID from the other mapping
del self._region_id_to_address[descriptor.region_id]
except KeyError:
break
else:
if absolute_address in self._address_to_region_id:
descriptor = self._address_to_region_id[absolute_address]
# Remove this mapping
del self._address_to_region_id[absolute_address]
del self._region_id_to_address[descriptor.region_id]
# Add this new region mapping
desc = RegionDescriptor(
region_id,
absolute_address,
related_function_address=related_function_address
)
self._address_to_region_id[absolute_address] = desc
self._region_id_to_address[region_id] = desc
def unmap_by_address(self, absolute_address):
"""
Removes a mapping based on its absolute address.
:param absolute_address: An absolute address
"""
desc = self._address_to_region_id[absolute_address]
del self._address_to_region_id[absolute_address]
del self._region_id_to_address[desc.region_id]
def absolutize(self, region_id, relative_address):
"""
Convert a relative address in some memory region to an absolute address.
:param region_id: The memory region ID
:param relative_address: The relative memory offset in that memory region
:return: An absolute address if converted, or an exception is raised when region id does not
exist.
"""
if region_id == 'global':
# The global region always bases 0
return relative_address
if region_id not in self._region_id_to_address:
raise SimRegionMapError('Non-existent region ID "%s"' % region_id)
base_address = self._region_id_to_address[region_id].base_address
return base_address + relative_address
def relativize(self, absolute_address, target_region_id=None):
"""
Convert an absolute address to the memory offset in a memory region.
Note that if an address belongs to heap region is passed in to a stack region map, it will be converted to an
offset included in the closest stack frame, and vice versa for passing a stack address to a heap region.
Therefore you should only pass in address that belongs to the same category (stack or non-stack) of this region
map.
:param absolute_address: An absolute memory address
:return: A tuple of the closest region ID, the relative offset, and the related function
address.
"""
if target_region_id is None:
if self.is_stack:
# Get the base address of the stack frame it belongs to
base_address = self._address_to_region_id.ceiling_key(absolute_address)
else:
try:
base_address = self._address_to_region_id.floor_key(absolute_address)
except KeyError:
# Not found. It belongs to the global region then.
return 'global', absolute_address, None
descriptor = self._address_to_region_id[base_address]
else:
if target_region_id == 'global':
# Just return the absolute address
return 'global', absolute_address, None
if target_region_id not in self._region_id_to_address:
raise SimRegionMapError('Trying to relativize to a non-existent region "%s"' % target_region_id)
descriptor = self._region_id_to_address[target_region_id]
base_address = descriptor.base_address
return descriptor.region_id, absolute_address - base_address, descriptor.related_function_address
class MemoryStoreRequest(object):
"""
A MemoryStoreRequest is used internally by SimMemory to track memory request data.
"""
def __init__(self, addr, data=None, size=None, condition=None, endness=None):
self.addr = addr
self.data = data
self.size = size
self.condition = condition
self.endness = endness
# was this store done?
self.completed = False
# stuff that's determined during handling
self.actual_addresses = None
self.constraints = [ ]
self.fallback_values = None
self.symbolic_sized_values = None
self.conditional_values = None
self.simplified_values = None
self.stored_values = None
def _adjust_condition(self, state):
self.condition = state._adjust_condition(self.condition)
class SimMemory(SimStatePlugin):
"""
Represents the memory space of the process.
"""
def __init__(self, endness=None, abstract_backer=None, stack_region_map=None, generic_region_map=None):
SimStatePlugin.__init__(self)
self.id = None
self.endness = "Iend_BE" if endness is None else endness
# Boolean or None. Indicates whether this memory is internally used inside SimAbstractMemory
self._abstract_backer = abstract_backer
#
# These are some performance-critical thresholds
#
# The maximum range of a normal write operation. If an address range is greater than this number,
# SimMemory will simply concretize it to a single value. Note that this is only relevant when
# the "symbolic" concretization strategy is enabled for writes.
self._write_address_range = 128
self._write_address_range_approx = 128
# The maximum range of a symbolic read address. If an address range is greater than this number,
# SimMemory will simply concretize it.
self._read_address_range = 1024
self._read_address_range_approx = 1024
# The maximum size of a symbolic-sized operation. If a size maximum is greater than this number,
# SimMemory will constrain it to this number. If the size minimum is greater than this
# number, a SimMemoryLimitError is thrown.
self._maximum_symbolic_size = 8 * 1024
self._maximum_symbolic_size_approx = 4*1024
# Same, but for concrete writes
self._maximum_concrete_size = 0x1000000
# Save those arguments first. Since self.state is empty at this moment, we delay the initialization of region
# maps until set_state() is called.
self._temp_stack_region_map = stack_region_map
self._temp_generic_region_map = generic_region_map
self._stack_region_map = None
self._generic_region_map = None
@property
def category(self):
"""
Return the category of this SimMemory instance. It can be one of the three following categories: reg, mem,
or file.
"""
if self.id in ('reg', 'mem'):
return self.id
elif self._abstract_backer:
return 'mem'
elif self.id.startswith('file'):
return 'file'
else:
raise SimMemoryError('Unknown SimMemory category for memory_id "%s"' % self.id)
def set_state(self, state):
"""
Call the set_state method in SimStatePlugin class, and then perform the delayed initialization.
:param state: The SimState instance
"""
SimStatePlugin.set_state(self, state)
# Delayed initialization
stack_region_map, generic_region_map = self._temp_stack_region_map, self._temp_generic_region_map
if stack_region_map or generic_region_map:
# Inherited from its parent
self._stack_region_map = stack_region_map.copy()
self._generic_region_map = generic_region_map.copy()
else:
if not self._abstract_backer and o.REGION_MAPPING in self.state.options:
# Only the top-level SimMemory instance can have region maps.
self._stack_region_map = RegionMap(True)
self._generic_region_map = RegionMap(False)
else:
self._stack_region_map = None
self._generic_region_map = None
def _resolve_location_name(self, name, is_write=False):
if self.category == 'reg':
if self.state.arch.name in ('X86', 'AMD64'):
if name in stn_map:
return (((stn_map[name] + self.load('ftop')) & 7) << 3) + self.state.arch.registers['fpu_regs'][0], 8
elif name in tag_map:
return ((tag_map[name] + self.load('ftop')) & 7) + self.state.arch.registers['fpu_tags'][0], 1
elif name in ('flags', 'eflags', 'rflags'):
# we tweak the state to convert the vex condition registers into the flags register
if not is_write: # this work doesn't need to be done if we're just gonna overwrite it
self.store('cc_dep1', _get_flags(self.state)[0]) # TODO: can constraints be added by this?
self.store('cc_op', 0) # OP_COPY
return self.state.arch.registers['cc_dep1'][0], self.state.arch.bytes
if self.state.arch.name in ('ARMEL', 'ARMHF', 'ARM', 'AARCH64'):
if name == 'flags':
if not is_write:
self.store('cc_dep1', _get_flags(self.state)[0])
self.store('cc_op', 0)
return self.state.arch.registers['cc_dep1'][0], self.state.arch.bytes
return self.state.arch.registers[name]
elif name[0] == '*':
return self.state.registers.load(name[1:]), None
else:
raise SimMemoryError("Trying to address memory with a register name.")
def _convert_to_ast(self, data_e, size_e=None):
"""
Make an AST out of concrete @data_e
"""
if type(data_e) is str:
# Convert the string into a BVV, *regardless of endness*
bits = len(data_e) * 8
data_e = self.state.se.BVV(data_e, bits)
elif type(data_e) in (int, long):
data_e = self.state.se.BVV(data_e, size_e*8 if size_e is not None
else self.state.arch.bits)
else:
data_e = data_e.raw_to_bv()
return data_e
def set_stack_address_mapping(self, absolute_address, region_id, related_function_address=None):
"""
Create a new mapping between an absolute address (which is the base address of a specific stack frame) and a
region ID.
:param absolute_address: The absolute memory address.
:param region_id: The region ID.
:param related_function_address: Related function address.
"""
if self._stack_region_map is None:
raise SimMemoryError('Stack region map is not initialized.')
self._stack_region_map.map(absolute_address, region_id, related_function_address=related_function_address)
def unset_stack_address_mapping(self, absolute_address):
"""
Remove a stack mapping.
:param absolute_address: An absolute memory address, which is the base address of the stack frame to destroy.
"""
if self._stack_region_map is None:
raise SimMemoryError('Stack region map is not initialized.')
self._stack_region_map.unmap_by_address(absolute_address)
def stack_id(self, function_address):
"""
Return a memory region ID for a function. If the default region ID exists in the region mapping, an integer
will appended to the region name. In this way we can handle recursive function calls, or a function that
appears more than once in the call frame.
This also means that `stack_id()` should only be called when creating a new stack frame for a function. You are
not supposed to call this function every time you want to map a function address to a stack ID.
:param int function_address: Address of the function.
:return: ID of the new memory region.
:rtype: str
"""
region_id = 'stack_0x%x' % function_address
# deduplication
region_ids = self._stack_region_map.region_ids
if region_id not in region_ids:
return region_id
else:
for i in xrange(0, 2000):
new_region_id = region_id + '_%d' % i
if new_region_id not in region_ids:
return new_region_id
raise SimMemoryError('Cannot allocate region ID for function %#08x - recursion too deep' % function_address)
def store(self, addr, data, size=None, condition=None, add_constraints=None, endness=None, action=None,
inspect=True, priv=None, disable_actions=False):
"""
Stores content into memory.
:param addr: A claripy expression representing the address to store at.
:param data: The data to store (claripy expression or something convertable to a claripy expression).
:param size: A claripy expression representing the size of the data to store.
The following parameters are optional.
:param condition: A claripy expression representing a condition if the store is conditional.
:param add_constraints: Add constraints resulting from the merge (default: True).
:param endness: The endianness for the data.
:param action: A SimActionData to fill out with the final written value and constraints.
:param bool inspect: Whether this store should trigger SimInspect breakpoints or not.
:param bool disable_actions: Whether this store should avoid creating SimActions or not. When set to False,
state options are respected.
"""
if priv is not None: self.state.scratch.push_priv(priv)
addr_e = _raw_ast(addr)
data_e = _raw_ast(data)
size_e = _raw_ast(size)
condition_e = _raw_ast(condition)
add_constraints = True if add_constraints is None else add_constraints
if isinstance(addr, str):
named_addr, named_size = self._resolve_location_name(addr, is_write=True)
addr = named_addr
addr_e = addr
if size is None:
size = named_size
size_e = size
# store everything as a BV
data_e = self._convert_to_ast(data_e, size_e if isinstance(size_e, (int, long)) else None)
if type(size_e) in (int, long):
size_e = self.state.se.BVV(size_e, self.state.arch.bits)
if inspect is True:
if self.category == 'reg':
self.state._inspect(
'reg_write',
BP_BEFORE,
reg_write_offset=addr_e,
reg_write_length=size_e,
reg_write_expr=data_e)
addr_e = self.state._inspect_getattr('reg_write_offset', addr_e)
size_e = self.state._inspect_getattr('reg_write_length', size_e)
data_e = self.state._inspect_getattr('reg_write_expr', data_e)
elif self.category == 'mem':
self.state._inspect(
'mem_write',
BP_BEFORE,
mem_write_address=addr_e,
mem_write_length=size_e,
mem_write_expr=data_e,
)
addr_e = self.state._inspect_getattr('mem_write_address', addr_e)
size_e = self.state._inspect_getattr('mem_write_length', size_e)
data_e = self.state._inspect_getattr('mem_write_expr', data_e)
# if the condition is false, bail
if condition_e is not None and self.state.se.is_false(condition_e):
if priv is not None: self.state.scratch.pop_priv()
return
if (
o.UNDER_CONSTRAINED_SYMEXEC in self.state.options and
isinstance(addr_e, claripy.ast.Base) and
addr_e.uninitialized
):
self._constrain_underconstrained_index(addr_e)
request = MemoryStoreRequest(addr_e, data=data_e, size=size_e, condition=condition_e, endness=endness)
try:
self._store(request)
except SimSegfaultError as e:
e.original_addr = addr_e
raise
if inspect is True:
if self.category == 'reg': self.state._inspect('reg_write', BP_AFTER)
if self.category == 'mem': self.state._inspect('mem_write', BP_AFTER)
add_constraints = self.state._inspect_getattr('address_concretization_add_constraints', add_constraints)
if add_constraints and len(request.constraints) > 0:
self.state.add_constraints(*request.constraints)
if not disable_actions:
if request.completed and o.AUTO_REFS in self.state.options and action is None and not self._abstract_backer:
ref_size = size * 8 if size is not None else data_e.size()
region_type = self.category
if region_type == 'file':
# Special handling for files to keep compatibility
# We may use some refactoring later
region_type = self.id
action = SimActionData(self.state, region_type, 'write', addr=addr_e, data=data_e, size=ref_size,
condition=condition
)
self.state.history.add_action(action)
if request.completed and action is not None:
action.actual_addrs = request.actual_addresses
action.actual_value = action._make_object(request.stored_values[0]) # TODO
if len(request.constraints) > 0:
action.added_constraints = action._make_object(self.state.se.And(*request.constraints))
else:
action.added_constraints = action._make_object(self.state.se.true)
if priv is not None: self.state.scratch.pop_priv()
def _store(self, request):
raise NotImplementedError()
def store_cases(self, addr, contents, conditions, fallback=None, add_constraints=None, endness=None, action=None):
"""
Stores content into memory, conditional by case.
:param addr: A claripy expression representing the address to store at.
:param contents: A list of bitvectors, not necessarily of the same size. Use None to denote an empty
write.
:param conditions: A list of conditions. Must be equal in length to contents.
The following parameters are optional.
:param fallback: A claripy expression representing what the write should resolve to if all conditions
evaluate to false (default: whatever was there before).
:param add_constraints: Add constraints resulting from the merge (default: True)
:param endness: The endianness for contents as well as fallback.
:param action: A SimActionData to fill out with the final written value and constraints.
:type action: SimActionData
"""
if fallback is None and all(c is None for c in contents):
l.debug("Avoiding an empty write.")
return
addr_e = _raw_ast(addr)
contents_e = _raw_ast(contents)
conditions_e = _raw_ast(conditions)
fallback_e = _raw_ast(fallback)
max_bits = max(c.length for c in contents_e if isinstance(c, claripy.ast.Bits)) \
if fallback is None else fallback.length
# if fallback is not provided by user, load it from memory
# remember to specify the endianness!
fallback_e = self.load(addr, max_bits/8, add_constraints=add_constraints, endness=endness) \
if fallback_e is None else fallback_e
req = self._store_cases(addr_e, contents_e, conditions_e, fallback_e, endness=endness)
add_constraints = self.state._inspect_getattr('address_concretization_add_constraints', add_constraints)
if add_constraints:
self.state.add_constraints(*req.constraints)
if req.completed and o.AUTO_REFS in self.state.options and action is None:
region_type = self.category
if region_type == 'file':
# Special handling for files to keep compatibility
# We may use some refactoring later
region_type = self.id
action = SimActionData(self.state, region_type, 'write', addr=addr_e, data=req.stored_values[-1],
size=max_bits, condition=self.state.se.Or(*conditions), fallback=fallback
)
self.state.history.add_action(action)
if req.completed and action is not None:
action.actual_addrs = req.actual_addresses
action.actual_value = action._make_object(req.stored_values[-1])
action.added_constraints = action._make_object(self.state.se.And(*req.constraints)
if len(req.constraints) > 0 else self.state.se.true)
def _store_cases(self, addr, contents, conditions, fallback, endness=None):
extended_contents = [ ]
for c in contents:
if c is None:
c = fallback
else:
need_bits = fallback.length - c.length
if need_bits > 0:
c = c.concat(fallback[need_bits-1:0])
extended_contents.append(c)
case_constraints = { }
for c,g in zip(extended_contents, conditions):
if c not in case_constraints:
case_constraints[c] = [ ]
case_constraints[c].append(g)
unique_contents = [ ]
unique_constraints = [ ]
for c,g in case_constraints.items():
unique_contents.append(c)
unique_constraints.append(self.state.se.Or(*g))
if len(unique_contents) == 1 and unique_contents[0] is fallback:
req = MemoryStoreRequest(addr, data=fallback, endness=endness)
return self._store(req)
else:
simplified_contents = [ ]
simplified_constraints = [ ]
for c,g in zip(unique_contents, unique_constraints):
simplified_contents.append(self.state.se.simplify(c))
simplified_constraints.append(self.state.se.simplify(g))
cases = zip(simplified_constraints, simplified_contents)
#cases = zip(unique_constraints, unique_contents)
ite = self.state.se.simplify(self.state.se.ite_cases(cases, fallback))
req = MemoryStoreRequest(addr, data=ite, endness=endness)
return self._store(req)
def load(self, addr, size=None, condition=None, fallback=None, add_constraints=None, action=None, endness=None,
inspect=True, disable_actions=False, ret_on_segv=False):
"""
Loads size bytes from dst.
:param dst: The address to load from.
:param size: The size (in bytes) of the load.
:param condition: A claripy expression representing a condition for a conditional load.
:param fallback: A fallback value if the condition ends up being False.
:param add_constraints: Add constraints resulting from the merge (default: True).
:param action: A SimActionData to fill out with the constraints.
:param endness: The endness to load with.
:param bool inspect: Whether this store should trigger SimInspect breakpoints or not.
:param bool disable_actions: Whether this store should avoid creating SimActions or not. When set to False,
state options are respected.
:param bool ret_on_segv: Whether returns the memory that is already loaded before a segmentation fault is triggered. The default is False.
There are a few possible return values. If no condition or fallback are passed in,
then the return is the bytes at the address, in the form of a claripy expression.
For example:
<A BVV(0x41, 32)>
On the other hand, if a condition and fallback are provided, the value is conditional:
<A If(condition, BVV(0x41, 32), fallback)>
"""
add_constraints = True if add_constraints is None else add_constraints
addr_e = _raw_ast(addr)
size_e = _raw_ast(size)
condition_e = _raw_ast(condition)
fallback_e = _raw_ast(fallback)
if isinstance(addr, str):
named_addr, named_size = self._resolve_location_name(addr)
addr = named_addr
addr_e = addr
if size is None:
size = named_size
size_e = size
if size is None:
size = self.state.arch.bits / 8
size_e = size
if inspect is True:
if self.category == 'reg':
self.state._inspect('reg_read', BP_BEFORE, reg_read_offset=addr_e, reg_read_length=size_e)
addr_e = self.state._inspect_getattr("reg_read_offset", addr_e)
size_e = self.state._inspect_getattr("reg_read_length", size_e)
elif self.category == 'mem':
self.state._inspect('mem_read', BP_BEFORE, mem_read_address=addr_e, mem_read_length=size_e)
addr_e = self.state._inspect_getattr("mem_read_address", addr_e)
size_e = self.state._inspect_getattr("mem_read_length", size_e)
if (
o.UNDER_CONSTRAINED_SYMEXEC in self.state.options and
isinstance(addr_e, claripy.ast.Base) and
addr_e.uninitialized
):
self._constrain_underconstrained_index(addr_e)
try:
a,r,c = self._load(addr_e, size_e, condition=condition_e, fallback=fallback_e, inspect=inspect,
events=not disable_actions, ret_on_segv=ret_on_segv)
except SimSegfaultError as e:
e.original_addr = addr_e
raise
add_constraints = self.state._inspect_getattr('address_concretization_add_constraints', add_constraints)
if add_constraints and c:
self.state.add_constraints(*c)
if (self.category == 'mem' and o.SIMPLIFY_MEMORY_READS in self.state.options) or \
(self.category == 'reg' and o.SIMPLIFY_REGISTER_READS in self.state.options): # pylint:disable=too-many-boolean-expressions
l.debug("simplifying %s read...", self.category)
r = self.state.simplify(r)
if not self._abstract_backer and \
o.UNINITIALIZED_ACCESS_AWARENESS in self.state.options and \
self.state.uninitialized_access_handler is not None and \
(r.op == 'Reverse' or r.op == 'I') and \
hasattr(r._model_vsa, 'uninitialized') and \
r._model_vsa.uninitialized:
normalized_addresses = self.normalize_address(addr)
if len(normalized_addresses) > 0 and type(normalized_addresses[0]) is AddressWrapper:
normalized_addresses = [ (aw.region, aw.address) for aw in normalized_addresses ]
self.state.uninitialized_access_handler(self.category, normalized_addresses, size, r, self.state.scratch.bbl_addr, self.state.scratch.stmt_idx)
# the endianess
endness = self.endness if endness is None else endness
if endness == "Iend_LE":
r = r.reversed
if inspect is True:
if self.category == 'mem':
self.state._inspect('mem_read', BP_AFTER, mem_read_expr=r)
r = self.state._inspect_getattr("mem_read_expr", r)
elif self.category == 'reg':
self.state._inspect('reg_read', BP_AFTER, reg_read_expr=r)
r = self.state._inspect_getattr("reg_read_expr", r)
if not disable_actions:
if o.AST_DEPS in self.state.options and self.category == 'reg':
r = SimActionObject(r, reg_deps=frozenset((addr,)))
if o.AUTO_REFS in self.state.options and action is None:
ref_size = size * 8 if size is not None else r.size()
region_type = self.category
if region_type == 'file':
# Special handling for files to keep compatibility
# We may use some refactoring later
region_type = self.id
action = SimActionData(self.state, region_type, 'read', addr=addr, data=r, size=ref_size,
condition=condition, fallback=fallback)
self.state.history.add_action(action)
if action is not None:
action.actual_addrs = a
action.added_constraints = action._make_object(self.state.se.And(*c)
if len(c) > 0 else self.state.se.true)
return r
def _constrain_underconstrained_index(self, addr_e):
if not self.state.uc_manager.is_bounded(addr_e) or self.state.se.max_int(addr_e) - self.state.se.min_int( addr_e) >= self._read_address_range:
# in under-constrained symbolic execution, we'll assign a new memory region for this address
mem_region = self.state.uc_manager.assign(addr_e)
# ... but only if it's not already been constrained to something!
if self.state.se.solution(addr_e, mem_region):
self.state.add_constraints(addr_e == mem_region)
l.debug('Under-constrained symbolic execution: assigned a new memory region @ %s to %s', mem_region, addr_e)
def normalize_address(self, addr, is_write=False): #pylint:disable=no-self-use,unused-argument
"""
Normalize `addr` for use in static analysis (with the abstract memory model). In non-abstract mode, simply
returns the address in a single-element list.
"""
return [ addr ]
def _load(self, addr, size, condition=None, fallback=None, inspect=True, events=True, ret_on_segv=False):
raise NotImplementedError()
def find(self, addr, what, max_search=None, max_symbolic_bytes=None, default=None, step=1):
"""
Returns the address of bytes equal to 'what', starting from 'start'. Note that, if you don't specify a default
value, this search could cause the state to go unsat if no possible matching byte exists.
:param addr: The start address.
:param what: What to search for;
:param max_search: Search at most this many bytes.
:param max_symbolic_bytes: Search through at most this many symbolic bytes.
:param default: The default value, if what you're looking for wasn't found.
:returns: An expression representing the address of the matching byte.
"""
addr = _raw_ast(addr)
what = _raw_ast(what)
default = _raw_ast(default)
if isinstance(what, str):
# Convert it to a BVV
what = claripy.BVV(what, len(what) * 8)
r,c,m = self._find(addr, what, max_search=max_search, max_symbolic_bytes=max_symbolic_bytes, default=default,
step=step)
if o.AST_DEPS in self.state.options and self.category == 'reg':
r = SimActionObject(r, reg_deps=frozenset((addr,)))
return r,c,m
def _find(self, addr, what, max_search=None, max_symbolic_bytes=None, default=None, step=1):
raise NotImplementedError()
def copy_contents(self, dst, src, size, condition=None, src_memory=None, dst_memory=None, inspect=True,
disable_actions=False):
"""
Copies data within a memory.
:param dst: A claripy expression representing the address of the destination
:param src: A claripy expression representing the address of the source
The following parameters are optional.
:param src_memory: Copy data from this SimMemory instead of self
:param src_memory: Copy data to this SimMemory instead of self
:param size: A claripy expression representing the size of the copy
:param condition: A claripy expression representing a condition, if the write should be conditional. If this
is determined to be false, the size of the copy will be 0.
"""
dst = _raw_ast(dst)
src = _raw_ast(src)
size = _raw_ast(size)
condition = _raw_ast(condition)
return self._copy_contents(dst, src, size, condition=condition, src_memory=src_memory, dst_memory=dst_memory,
inspect=inspect, disable_actions=disable_actions)
def _copy_contents(self, dst, src, size, condition=None, src_memory=None, dst_memory=None, inspect=True,
disable_actions=False):
raise NotImplementedError()
from bintrees import AVLTree
from .. import sim_options as o
from ..state_plugins.sim_action import SimActionData
from ..state_plugins.sim_action_object import SimActionObject, _raw_ast
from ..errors import SimMemoryError, SimRegionMapError, SimSegfaultError
from ..state_plugins.inspect import BP_BEFORE, BP_AFTER
| bsd-2-clause | 6,487,748,199,254,424,000 | 43.302617 | 155 | 0.600637 | false |
TheAlgorithms/Python | project_euler/problem_070/sol1.py | 1 | 3189 | """
Project Euler Problem 70: https://projecteuler.net/problem=70
Euler's Totient function, φ(n) [sometimes called the phi function], is used to
determine the number of positive numbers less than or equal to n which are
relatively prime to n. For example, as 1, 2, 4, 5, 7, and 8, are all less than
nine and relatively prime to nine, φ(9)=6.
The number 1 is considered to be relatively prime to every positive number, so
φ(1)=1.
Interestingly, φ(87109)=79180, and it can be seen that 87109 is a permutation
of 79180.
Find the value of n, 1 < n < 10^7, for which φ(n) is a permutation of n and
the ratio n/φ(n) produces a minimum.
-----
This is essentially brute force. Calculate all totients up to 10^7 and
find the minimum ratio of n/φ(n) that way. To minimize the ratio, we want
to minimize n and maximize φ(n) as much as possible, so we can store the
minimum fraction's numerator and denominator and calculate new fractions
with each totient to compare against. To avoid dividing by zero, I opt to
use cross multiplication.
References:
Finding totients
https://en.wikipedia.org/wiki/Euler's_totient_function#Euler's_product_formula
"""
from typing import List
def get_totients(max_one: int) -> List[int]:
"""
Calculates a list of totients from 0 to max_one exclusive, using the
definition of Euler's product formula.
>>> get_totients(5)
[0, 1, 1, 2, 2]
>>> get_totients(10)
[0, 1, 1, 2, 2, 4, 2, 6, 4, 6]
"""
totients = [0] * max_one
for i in range(0, max_one):
totients[i] = i
for i in range(2, max_one):
if totients[i] == i:
for j in range(i, max_one, i):
totients[j] -= totients[j] // i
return totients
def has_same_digits(num1: int, num2: int) -> bool:
"""
Return True if num1 and num2 have the same frequency of every digit, False
otherwise.
digits[] is a frequency table where the index represents the digit from
0-9, and the element stores the number of appearances. Increment the
respective index every time you see the digit in num1, and decrement if in
num2. At the end, if the numbers have the same digits, every index must
contain 0.
>>> has_same_digits(123456789, 987654321)
True
>>> has_same_digits(123, 12)
False
>>> has_same_digits(1234566, 123456)
False
"""
digits = [0] * 10
while num1 > 0 and num2 > 0:
digits[num1 % 10] += 1
digits[num2 % 10] -= 1
num1 //= 10
num2 //= 10
for digit in digits:
if digit != 0:
return False
return True
def solution(max: int = 10000000) -> int:
"""
Finds the value of n from 1 to max such that n/φ(n) produces a minimum.
>>> solution(100)
21
>>> solution(10000)
4435
"""
min_numerator = 1 # i
min_denominator = 0 # φ(i)
totients = get_totients(max + 1)
for i in range(2, max + 1):
t = totients[i]
if i * min_denominator < min_numerator * t and has_same_digits(i, t):
min_numerator = i
min_denominator = t
return min_numerator
if __name__ == "__main__":
print(f"{solution() = }")
| mit | -6,360,014,420,547,202,000 | 25.714286 | 78 | 0.633847 | false |
ukdtom/WebTools.bundle | Contents/Code/jsonExporterV3.py | 1 | 15236 | ######################################################################################################################
# json Exporter module for WebTools
#
# Author: dane22, a Plex Community member
#
######################################################################################################################
import os
import io
from consts import DEBUGMODE, JSONTIMESTAMP
import datetime
import json
from shutil import move
# Consts used here
FILEEXT = '.json' # File ext of export file
statusMsg = 'idle' # Response to getStatus
# Internal tracker of where we are
runningState = 0
# Flag to set if user wants to cancel
bAbort = False
GET = ['GETSTATUS']
PUT = ['EXPORT']
POST = []
DELETE = []
class jsonExporterV3(object):
init_already = False # Make sure init only run once
bResultPresent = False # Do we have a result to present
# Init of the class
@classmethod
def init(self):
self.MediaChuncks = 40
self.CoreUrl = 'http://127.0.0.1:32400/library/sections/'
# Only init once during the lifetime of this
if not jsonExporter.init_already:
jsonExporter.init_already = True
self.populatePrefs()
Log.Debug('******* Starting jsonExporter *******')
#********** Functions below ******************
# This is the main call
@classmethod
def EXPORT(self, req, *args):
''' Return the type of the section '''
def getSectionType(section):
url = 'http://127.0.0.1:32400/library/sections/' + section + \
'/all?X-Plex-Container-Start=1&X-Plex-Container-Size=0'
try:
return XML.ElementFromURL(url).xpath('//MediaContainer/@viewGroup')[0]
except:
return "None"
''' Create a simple entry in the videoDetails tree '''
def makeSimpleEntry(media, videoDetails, el):
try:
entry = unicode(videoDetails.get(el))
if entry != 'None':
media[el] = entry
except:
pass
''' Create an array based entry, based on the tag attribute '''
def makeArrayEntry(media, videoDetails, el):
try:
Entries = videoDetails.xpath('//' + el)
EntryList = []
for Entry in Entries:
try:
EntryList.append(unicode(Entry.xpath('@tag')[0]))
except:
pass
media[el] = EntryList
except:
pass
''' Export the actual .json file, as well as poster and fanart '''
def makeFiles(ratingKey):
videoDetails = XML.ElementFromURL(
'http://127.0.0.1:32400/library/metadata/' + ratingKey).xpath('//Video')[0]
try:
media = {}
''' Now digest the media, and add to the XML '''
# Id
# try:
# media['guid'] = videoDetails.get('guid')
# except:
# pass
media['About This File'] = 'JSON Export Made with WebTools for Plex'
# Simple entries
elements = ['guid', 'title', 'originalTitle', 'titleSort', 'type', 'summary', 'duration', 'rating', 'ratingImage',
'contentRating', 'studio', 'year', 'tagline', 'originallyAvailableAt', 'audienceRatingImage', 'audienceRating']
for element in elements:
makeSimpleEntry(media, videoDetails, element)
arrayElements = ['Genre', 'Collection', 'Director',
'Writer', 'Producer', 'Country', 'Label']
for element in arrayElements:
makeArrayEntry(media, videoDetails, element)
# Locked fields
Locked = []
try:
Fields = videoDetails.xpath('//Field')
for Field in Fields:
try:
if Field.xpath('@locked')[0] == '1':
Locked.append(unicode(Field.xpath('@name')[0]))
except:
pass
media['Field'] = Locked
except:
pass
# Role aka actor
try:
Roles = videoDetails.xpath('//Role')
orderNo = 1
Actors = []
for Role in Roles:
Actor = {}
try:
Actor['name'] = unicode(Role.xpath('@tag')[0])
except:
pass
try:
Actor['role'] = unicode(Role.xpath('@role')[0])
except:
pass
try:
Actor['order'] = orderNo
orderNo += 1
except:
pass
try:
Actor['thumb'] = Role.xpath('@thumb')[0]
except:
pass
Actors.append(Actor)
media['Role'] = Actors
except Exception, e:
Log.Exception('Exception in MakeFiles: ' + str(e))
pass
# Let's start by grapping relevant files for this movie
fileNames = videoDetails.xpath('//Part')
for fileName in fileNames:
filename = fileName.xpath('@file')[0]
filename = String.Unquote(
filename).encode('utf8', 'ignore')
# Get name of json file
plexJSON = os.path.splitext(filename)[0] + FILEEXT
Log.Debug('Name and path to plexJSON file is: ' + plexJSON)
try:
with io.open(plexJSON, 'w', encoding='utf-8') as outfile:
outfile.write(
unicode(json.dumps(media, indent=4, sort_keys=True)))
except Exception, e:
Log.Debug('Exception happend during saving %s. Exception was: %s' % (
plexJSON, str(e)))
# Make poster
posterUrl = 'http://127.0.0.1:32400' + \
videoDetails.get('thumb')
targetFile = os.path.splitext(filename)[0] + '-poster.jpg'
response = HTTP.Request(posterUrl)
with io.open(targetFile, 'wb') as fo:
fo.write(response.content)
Log.Debug('Poster saved as %s' % targetFile)
# Make fanart
posterUrl = 'http://127.0.0.1:32400' + \
videoDetails.get('art')
targetFile = os.path.splitext(filename)[0] + '-fanart.jpg'
response = HTTP.Request(posterUrl)
with io.open(targetFile, 'wb') as fo:
fo.write(response.content)
Log.Debug('FanArt saved as %s' % targetFile)
except Exception, e:
Log.Exception(
'Exception happend in generating json file: ' + str(e))
''' Scan a movie section '''
def scanMovieSection(req, sectionNumber):
Log.Debug('Starting scanMovieSection')
global AmountOfMediasInDatabase
global mediasFromDB
global statusMsg
global runningState
try:
# Start by getting the last timestamp for a scanning:
if sectionNumber in Dict['jsonExportTimeStamps'].keys():
timeStamp = Dict['jsonExportTimeStamps'][sectionNumber]
else:
# Setting key for section to epoch start
Dict['jsonExportTimeStamps'][sectionNumber] = 0
Dict.Save()
timeStamp = 0
# Debug mode?
if JSONTIMESTAMP != 0:
timeStamp = JSONTIMESTAMP
now = int((datetime.datetime.now() -
datetime.datetime(1970, 1, 1)).total_seconds())
Log.Debug('Starting scanMovieDb for section %s' %
(sectionNumber))
Log.Debug('Only grap medias updated since: ' + datetime.datetime.fromtimestamp(
int(timeStamp)).strftime('%Y-%m-%d %H:%M:%S'))
runningState = -1
statusMsg = 'Starting to scan database for section %s' % (
sectionNumber)
# Start by getting the totals of this section
totalSize = XML.ElementFromURL(self.CoreUrl + sectionNumber + '/all?updatedAt>=' + str(
timeStamp) + '&X-Plex-Container-Start=1&X-Plex-Container-Size=0').get('totalSize')
AmountOfMediasInDatabase = totalSize
Log.Debug('Total size of medias are %s' % (totalSize))
if totalSize == '0':
# Stamp dict with new timestamp
Dict['jsonExportTimeStamps'][sectionNumber] = now
Dict.Save()
Log.Debug('Nothing to process...Exiting')
return
iStart = 0
iCount = 0
statusMsg = 'Scanning database item %s of %s : Working' % (
iCount, totalSize)
# So let's walk the library
while True:
# Grap a chunk from the server
videos = XML.ElementFromURL(self.CoreUrl + sectionNumber + '/all?updatedAt>=' + str(
timeStamp) + '&X-Plex-Container-Start=' + str(iStart) + '&X-Plex-Container-Size=' + str(self.MediaChuncks)).xpath('//Video')
# Walk the chunk
for video in videos:
if bAbort:
raise ValueError('Aborted')
iCount += 1
makeFiles(video.get('ratingKey'))
statusMsg = 'Scanning database: item %s of %s : Working' % (
iCount, totalSize)
iStart += self.MediaChuncks
if len(videos) == 0:
statusMsg = 'Scanning database: %s : Done' % (
totalSize)
Log.Debug('***** Done scanning the database *****')
runningState = 1
break
# Stamp dict with new timestamp
Dict['jsonExportTimeStamps'][sectionNumber] = now
Dict.Save()
return
except Exception, e:
Log.Exception('Fatal error in scanMovieDb: ' + str(e))
runningState = 99
# End scanMovieDb
def scanShowSection(req, sectionNumber):
print 'Ged1 scanShowSection'
# ********** Main function **************
Log.Debug('json export called')
try:
section = req.get_argument('section', '_export_missing_')
if section == '_export_missing_':
req.clear()
req.set_status(412)
req.finish(
"<html><body>Missing section parameter</body></html>")
if getSectionType(section) == 'movie':
scanMovieSection(req, section)
elif getSectionType(section) == 'show':
scanShowSection(req, section)
else:
Log.Debug('Unknown section type for section:' +
section + ' type: ' + getSectionType(section))
req.clear()
req.set_status(404)
req.finish("Unknown sectiontype or sectiion")
except Exception, e:
Log.Exception('Exception in json export' + str(e))
# Return current status
@classmethod
def GETSTATUS(self, req, *args):
global runningState
req.clear()
req.set_status(200)
if runningState == 0:
req.finish('Idle')
else:
req.finish(statusMsg)
''' Get the relevant function and call it with optinal params '''
@classmethod
def getFunction(self, metode, req, *args):
self.init()
params = req.request.uri[8:].upper().split('/')
self.function = None
if metode == 'get':
for param in params:
if param in GET:
self.function = param
break
else:
pass
elif metode == 'post':
for param in params:
if param in POST:
self.function = param
break
else:
pass
elif metode == 'put':
for param in params:
if param in PUT:
self.function = param
break
else:
pass
elif metode == 'delete':
for param in params:
if param in DELETE:
self.function = param
break
else:
pass
if self.function == None:
Log.Debug('Function to call is None')
req.clear()
req.set_status(404)
req.finish('Unknown function call')
else:
# Check for optional argument
paramsStr = req.request.uri[req.request.uri.upper().find(
self.function) + len(self.function):]
# remove starting and ending slash
if paramsStr.endswith('/'):
paramsStr = paramsStr[:-1]
if paramsStr.startswith('/'):
paramsStr = paramsStr[1:]
# Turn into a list
params = paramsStr.split('/')
# If empty list, turn into None
if params[0] == '':
params = None
try:
Log.Debug('Function to call is: ' + self.function +
' with params: ' + str(params))
if params == None:
getattr(self, self.function)(req)
else:
getattr(self, self.function)(req, params)
except Exception, e:
Log.Exception('Exception in process of: ' + str(e))
################### Internal functions #############################
''' Populate the defaults, if not already there '''
@classmethod
def populatePrefs(self):
if Dict['jsonExportTimeStamps'] == None:
Dict['jsonExportTimeStamps'] = {}
Dict.Save()
##############################################################################################################
jsonExporter = jsonExporterV3()
| mpl-2.0 | 7,612,324,967,193,343,000 | 40.857143 | 148 | 0.454319 | false |
greggparrish/musicscout | musicscout/musicscout.py | 1 | 6571 | import datetime
import logging
import os
import re
import sys
from time import mktime, sleep
import feedparser
import youtube_dl
from .config import Config
from . import db
from .utils import Utils
from .mpdutil import mpd_update, make_playlists
c = Config().conf_vars()
db = db.Database()
ut = Utils()
CONFIGPATH = os.path.join(os.path.expanduser('~'), '.config/musicscout/')
MEDIA_SITES = ['youtu', 'bandcamp.com', 'soundcloud', 'redditmedia']
logging.basicConfig(format="%(asctime)s [%(levelname)-5.5s] %(message)s",
level=logging.INFO,
handlers=[logging.FileHandler("{}/{}.log".format(CONFIGPATH, 'scout')),
logging.StreamHandler(sys.stdout)
])
class Musicscout:
def __init__(self):
self.dlcount = 0
def __enter__(self):
''' Symlink download dir to mpd dir if not already, start log '''
ut.symlink_musicdir()
logging.info(f'### START: SCOUT RUN')
return self
def __exit__(self, exc_class, exc, traceback):
''' Rm partial dls, update mpd, build playlists, end log '''
ut.clean_cache()
mpd_update()
sleep(10)
make_playlists()
logging.info(f"### DL TOTAL: {self.dlcount}")
logging.info(f'### END: SCOUT RUN\n ')
return True
def compare_feed_date(self, lu, posts):
''' Check if site feed is newer than last scout run
to avoid unnecessary updates '''
try:
ft = datetime.datetime.fromtimestamp(mktime(posts.feed.updated_parsed))
if not lu or not ft or ft > lu:
return ft
else:
return False
except Exception:
return False
def get_feed_urls(self):
''' Open urls file in .config, make list of feeds '''
feeds = []
try:
feedfile = open(CONFIGPATH + 'urls')
except Exception:
feedfile = Config().create_urls()
if feedfile is True:
print(f"Add urls to url file at: {CONFIGPATH + 'urls'}")
sys.exit
else:
for line in feedfile:
line = line.strip()
if not line.startswith("#"):
line = line.replace('\n', '').strip()
line = line.split('|')
try:
genre = re.sub(r'[-\s]+', '-', (re.sub(r'[^\w\s-]', '', line[1]).strip().lower()))
except Exception:
genre = 'uncategorized'
if line[0]:
feed = line[0].strip()
db.add_url(feed)
feeds += [[feed, genre]]
try:
self.get_media_links(feed, genre)
except Exception as exc:
logging.info(f"-- ERROR: {feed} had exception: {exc}")
now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
db.update_time(feed, now)
feedfile.close()
return True
def get_media_links(self, feed, genre):
''' Get posts for a feed, strip media links from posts '''
logging.info(f"-- FEED: checking posts for {feed}")
links = []
posts = feedparser.parse(feed)
last_update = db.feed_time(feed)[0]
if last_update is not None:
try:
lu = datetime.datetime.strptime(last_update, '%Y-%m-%d %H:%M:%S')
except Exception:
lu = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
else:
lu = None
ft = self.compare_feed_date(lu, posts)
if ft is not False:
for p in posts.entries:
pt = datetime.datetime.fromtimestamp(mktime(p.updated_parsed))
if ft is None or lu is None or pt > lu:
if 'reddit' in feed:
links = ut.reddit_links(p)
elif 'tumblr' in feed:
links = ut.tumblr_links(p)
else:
try:
links = ut.blog_links(p)
except:
continue
if links:
self.download_new_media(links, genre)
return ft
def download_new_media(self, links, genre):
for l in links:
if any(m in l for m in MEDIA_SITES):
check_song = db.check_song(l)
if not check_song:
dl = self.yt_dl(l, genre)
if 'youtu' in l and dl is not False:
ut.add_metadata(dl, l, genre)
db.add_song(l)
self.dlcount += 1
return True
def yt_dl(self, link, genre):
genre_dir = os.path.join(c['cache_dir'], genre)
ydl_opts = {'format': 'bestaudio/best',
'get_filename': True,
'max_downloads': '3',
'max-filesize': '10m',
'no_warnings': True,
'nooverwrites': True,
'noplaylist': True,
'outtmpl': genre_dir + '/%(title)s__%(id)s.%(ext)s',
'postprocessors': [{'key': 'FFmpegExtractAudio', 'preferredcodec': 'mp3', 'preferredquality': '192', }],
'quiet': True,
'rejecttitle': True,
'restrict_filenames': True
}
try:
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
vidinfo = ydl.extract_info(link, download=True)
filename = ydl.prepare_filename(vidinfo)
base = '.'.join(filename.split('.')[:-1])
filename = f"{base}.mp3"
vidtitle = vidinfo.get('title', None)
logging.info(f"** DL: {vidtitle} from {link}")
return filename
except Exception as e:
logging.info(f"** FAILED: {link} {e}")
return False
def main():
"""
musicscout
Get media files from an updated list of music blogs
Config file at: ~/.config/musicscout/config
Usage:
musicscout
Code: Gregory Parrish https://github.com/greggparrish/musicscout
"""
try:
with Musicscout() as ms:
ms.get_feed_urls()
except Exception as e:
print(f'ERROR: {e}')
if __name__ == '__main__':
main()
| unlicense | 9,218,062,527,769,342,000 | 34.327957 | 124 | 0.482575 | false |
kaedroho/wagtail | wagtail/core/tests/test_blocks.py | 1 | 163198 | # -*- coding: utf-8 -*
import base64
import collections
import json
import unittest
from datetime import date, datetime
from decimal import Decimal
# non-standard import name for gettext_lazy, to prevent strings from being picked up for translation
from django import forms
from django.core.exceptions import ValidationError
from django.forms.utils import ErrorList
from django.template.loader import render_to_string
from django.test import SimpleTestCase, TestCase
from django.utils.html import format_html
from django.utils.safestring import SafeData, SafeText, mark_safe
from django.utils.translation import gettext_lazy as __
from wagtail.core import blocks
from wagtail.core.models import Page
from wagtail.core.rich_text import RichText
from wagtail.tests.testapp.blocks import LinkBlock as CustomLinkBlock
from wagtail.tests.testapp.blocks import SectionBlock
from wagtail.tests.testapp.models import EventPage, SimplePage
from wagtail.tests.utils import WagtailTestUtils
class FooStreamBlock(blocks.StreamBlock):
text = blocks.CharBlock()
error = 'At least one block must say "foo"'
def clean(self, value):
value = super().clean(value)
if not any(block.value == 'foo' for block in value):
raise blocks.StreamBlockValidationError(non_block_errors=ErrorList([self.error]))
return value
class ContextCharBlock(blocks.CharBlock):
def get_context(self, value, parent_context=None):
value = str(value).upper()
return super(blocks.CharBlock, self).get_context(value, parent_context)
class TestFieldBlock(WagtailTestUtils, SimpleTestCase):
def test_charfield_render(self):
block = blocks.CharBlock()
html = block.render("Hello world!")
self.assertEqual(html, "Hello world!")
def test_charfield_render_with_template(self):
block = blocks.CharBlock(template='tests/blocks/heading_block.html')
html = block.render("Hello world!")
self.assertEqual(html, '<h1>Hello world!</h1>')
def test_charfield_form_classname(self):
"""
Meta data test for FormField; this checks if both the meta values
form_classname and classname are accepted and are rendered
in the form
"""
block = blocks.CharBlock(
form_classname='special-char-formclassname'
)
html = block.render_form("Hello world!")
self.assertEqual(html.count(' special-char-formclassname'), 1)
# Checks if it is backward compatible with classname
block_with_classname = blocks.CharBlock(
classname='special-char-classname'
)
html = block_with_classname.render_form("Hello world!")
self.assertEqual(html.count(' special-char-classname'), 1)
def test_charfield_render_with_template_with_extra_context(self):
block = ContextCharBlock(template='tests/blocks/heading_block.html')
html = block.render("Bonjour le monde!", context={
'language': 'fr',
})
self.assertEqual(html, '<h1 lang="fr">BONJOUR LE MONDE!</h1>')
def test_charfield_render_form(self):
block = blocks.CharBlock()
html = block.render_form("Hello world!")
self.assertIn('<div class="field char_field widget-text_input">', html)
self.assertInHTML('<input id="" name="" placeholder="" type="text" value="Hello world!" />', html)
def test_charfield_render_form_with_prefix(self):
block = blocks.CharBlock()
html = block.render_form("Hello world!", prefix='foo')
self.assertInHTML('<input id="foo" name="foo" placeholder="" type="text" value="Hello world!" />', html)
def test_charfield_render_form_with_error(self):
block = blocks.CharBlock()
html = block.render_form(
"Hello world!",
errors=ErrorList([ValidationError("This field is required.")]))
self.assertIn('This field is required.', html)
def test_charfield_searchable_content(self):
block = blocks.CharBlock()
content = block.get_searchable_content("Hello world!")
self.assertEqual(content, ["Hello world!"])
def test_charfield_with_validator(self):
def validate_is_foo(value):
if value != 'foo':
raise ValidationError("Value must be 'foo'")
block = blocks.CharBlock(validators=[validate_is_foo])
with self.assertRaises(ValidationError):
block.clean("bar")
def test_choicefield_render(self):
class ChoiceBlock(blocks.FieldBlock):
field = forms.ChoiceField(choices=(
('choice-1', "Choice 1"),
('choice-2', "Choice 2"),
))
block = ChoiceBlock()
html = block.render('choice-2')
self.assertEqual(html, "choice-2")
def test_choicefield_render_form(self):
class ChoiceBlock(blocks.FieldBlock):
field = forms.ChoiceField(choices=(
('choice-1', "Choice 1"),
('choice-2', "Choice 2"),
))
block = ChoiceBlock()
html = block.render_form('choice-2')
self.assertIn('<div class="field choice_field widget-select">', html)
self.assertTagInHTML('<select id="" name="" placeholder="">', html)
self.assertInHTML('<option value="choice-1">Choice 1</option>', html)
self.assertInHTML('<option value="choice-2" selected="selected">Choice 2</option>', html)
def test_searchable_content(self):
"""
FieldBlock should not return anything for `get_searchable_content` by
default. Subclasses are free to override it and provide relevant
content.
"""
class CustomBlock(blocks.FieldBlock):
field = forms.CharField(required=True)
block = CustomBlock()
self.assertEqual(block.get_searchable_content("foo bar"), [])
def test_form_handling_is_independent_of_serialisation(self):
class Base64EncodingCharBlock(blocks.CharBlock):
"""A CharBlock with a deliberately perverse JSON (de)serialisation format
so that it visibly blows up if we call to_python / get_prep_value where we shouldn't"""
def to_python(self, jsonish_value):
# decode as base64 on the way out of the JSON serialisation
return base64.b64decode(jsonish_value)
def get_prep_value(self, native_value):
# encode as base64 on the way into the JSON serialisation
return base64.b64encode(native_value)
block = Base64EncodingCharBlock()
form_html = block.render_form('hello world', 'title')
self.assertIn('value="hello world"', form_html)
value_from_form = block.value_from_datadict({'title': 'hello world'}, {}, 'title')
self.assertEqual('hello world', value_from_form)
def test_widget_media(self):
class CalendarWidget(forms.TextInput):
@property
def media(self):
return forms.Media(
css={'all': ('pretty.css',)},
js=('animations.js', 'actions.js')
)
class CalenderBlock(blocks.FieldBlock):
def __init__(self, required=True, help_text=None, max_length=None, min_length=None, **kwargs):
# Set widget to CalenderWidget
self.field = forms.CharField(
required=required,
help_text=help_text,
max_length=max_length,
min_length=min_length,
widget=CalendarWidget(),
)
super(blocks.FieldBlock, self).__init__(**kwargs)
block = CalenderBlock()
self.assertIn('pretty.css', ''.join(block.all_media().render_css()))
self.assertIn('animations.js', ''.join(block.all_media().render_js()))
def test_prepare_value_called(self):
"""
Check that Field.prepare_value is called before sending the value to
the widget for rendering.
Actual real-world use case: A Youtube field that produces YoutubeVideo
instances from IDs, but videos are entered using their full URLs.
"""
class PrefixWrapper:
prefix = 'http://example.com/'
def __init__(self, value):
self.value = value
def with_prefix(self):
return self.prefix + self.value
@classmethod
def from_prefixed(cls, value):
if not value.startswith(cls.prefix):
raise ValueError
return cls(value[len(cls.prefix):])
def __eq__(self, other):
return self.value == other.value
class PrefixField(forms.Field):
def clean(self, value):
value = super().clean(value)
return PrefixWrapper.from_prefixed(value)
def prepare_value(self, value):
return value.with_prefix()
class PrefixedBlock(blocks.FieldBlock):
def __init__(self, required=True, help_text='', **kwargs):
super().__init__(**kwargs)
self.field = PrefixField(required=required, help_text=help_text)
block = PrefixedBlock()
# Check that the form value is serialized with a prefix correctly
value = PrefixWrapper('foo')
html = block.render_form(value, 'url')
self.assertInHTML(
'<input id="url" name="url" placeholder="" type="text" value="{}" />'.format(
value.with_prefix()),
html)
# Check that the value was coerced back to a PrefixValue
data = {'url': 'http://example.com/bar'}
new_value = block.clean(block.value_from_datadict(data, {}, 'url'))
self.assertEqual(new_value, PrefixWrapper('bar'))
class TestIntegerBlock(unittest.TestCase):
def test_type(self):
block = blocks.IntegerBlock()
digit = block.value_from_form(1234)
self.assertEqual(type(digit), int)
def test_render(self):
block = blocks.IntegerBlock()
digit = block.value_from_form(1234)
self.assertEqual(digit, 1234)
def test_render_required_error(self):
block = blocks.IntegerBlock()
with self.assertRaises(ValidationError):
block.clean("")
def test_render_max_value_validation(self):
block = blocks.IntegerBlock(max_value=20)
with self.assertRaises(ValidationError):
block.clean(25)
def test_render_min_value_validation(self):
block = blocks.IntegerBlock(min_value=20)
with self.assertRaises(ValidationError):
block.clean(10)
def test_render_with_validator(self):
def validate_is_even(value):
if value % 2 > 0:
raise ValidationError("Value must be even")
block = blocks.IntegerBlock(validators=[validate_is_even])
with self.assertRaises(ValidationError):
block.clean(3)
class TestEmailBlock(unittest.TestCase):
def test_render(self):
block = blocks.EmailBlock()
email = block.render("[email protected]")
self.assertEqual(email, "[email protected]")
def test_render_required_error(self):
block = blocks.EmailBlock()
with self.assertRaises(ValidationError):
block.clean("")
def test_format_validation(self):
block = blocks.EmailBlock()
with self.assertRaises(ValidationError):
block.clean("example.email.com")
def test_render_with_validator(self):
def validate_is_example_domain(value):
if not value.endswith('@example.com'):
raise ValidationError("E-mail address must end in @example.com")
block = blocks.EmailBlock(validators=[validate_is_example_domain])
with self.assertRaises(ValidationError):
block.clean("[email protected]")
class TestBlockQuoteBlock(unittest.TestCase):
def test_render(self):
block = blocks.BlockQuoteBlock()
quote = block.render("Now is the time...")
self.assertEqual(quote, "<blockquote>Now is the time...</blockquote>")
def test_render_with_validator(self):
def validate_is_proper_story(value):
if not value.startswith('Once upon a time'):
raise ValidationError("Value must be a proper story")
block = blocks.BlockQuoteBlock(validators=[validate_is_proper_story])
with self.assertRaises(ValidationError):
block.clean("A long, long time ago")
class TestFloatBlock(TestCase):
def test_type(self):
block = blocks.FloatBlock()
block_val = block.value_from_form(float(1.63))
self.assertEqual(type(block_val), float)
def test_render(self):
block = blocks.FloatBlock()
test_val = float(1.63)
block_val = block.value_from_form(test_val)
self.assertEqual(block_val, test_val)
def test_raises_required_error(self):
block = blocks.FloatBlock()
with self.assertRaises(ValidationError):
block.clean("")
def test_raises_max_value_validation_error(self):
block = blocks.FloatBlock(max_value=20)
with self.assertRaises(ValidationError):
block.clean('20.01')
def test_raises_min_value_validation_error(self):
block = blocks.FloatBlock(min_value=20)
with self.assertRaises(ValidationError):
block.clean('19.99')
def test_render_with_validator(self):
def validate_is_even(value):
if value % 2 > 0:
raise ValidationError("Value must be even")
block = blocks.FloatBlock(validators=[validate_is_even])
with self.assertRaises(ValidationError):
block.clean('3.0')
class TestDecimalBlock(TestCase):
def test_type(self):
block = blocks.DecimalBlock()
block_val = block.value_from_form(Decimal('1.63'))
self.assertEqual(type(block_val), Decimal)
def test_render(self):
block = blocks.DecimalBlock()
test_val = Decimal(1.63)
block_val = block.value_from_form(test_val)
self.assertEqual(block_val, test_val)
def test_raises_required_error(self):
block = blocks.DecimalBlock()
with self.assertRaises(ValidationError):
block.clean("")
def test_raises_max_value_validation_error(self):
block = blocks.DecimalBlock(max_value=20)
with self.assertRaises(ValidationError):
block.clean('20.01')
def test_raises_min_value_validation_error(self):
block = blocks.DecimalBlock(min_value=20)
with self.assertRaises(ValidationError):
block.clean('19.99')
def test_render_with_validator(self):
def validate_is_even(value):
if value % 2 > 0:
raise ValidationError("Value must be even")
block = blocks.DecimalBlock(validators=[validate_is_even])
with self.assertRaises(ValidationError):
block.clean('3.0')
class TestRegexBlock(TestCase):
def test_render(self):
block = blocks.RegexBlock(regex=r'^[0-9]{3}$')
test_val = '123'
block_val = block.value_from_form(test_val)
self.assertEqual(block_val, test_val)
def test_raises_required_error(self):
block = blocks.RegexBlock(regex=r'^[0-9]{3}$')
with self.assertRaises(ValidationError) as context:
block.clean("")
self.assertIn('This field is required.', context.exception.messages)
def test_raises_custom_required_error(self):
test_message = 'Oops, you missed a bit.'
block = blocks.RegexBlock(regex=r'^[0-9]{3}$', error_messages={
'required': test_message,
})
with self.assertRaises(ValidationError) as context:
block.clean("")
self.assertIn(test_message, context.exception.messages)
def test_raises_validation_error(self):
block = blocks.RegexBlock(regex=r'^[0-9]{3}$')
with self.assertRaises(ValidationError) as context:
block.clean("[/]")
self.assertIn('Enter a valid value.', context.exception.messages)
def test_raises_custom_error_message(self):
test_message = 'Not a valid library card number.'
block = blocks.RegexBlock(regex=r'^[0-9]{3}$', error_messages={
'invalid': test_message
})
with self.assertRaises(ValidationError) as context:
block.clean("[/]")
self.assertIn(test_message, context.exception.messages)
html = block.render_form(
"[/]",
errors=ErrorList([ValidationError(test_message)]))
self.assertIn(test_message, html)
def test_render_with_validator(self):
def validate_is_foo(value):
if value != 'foo':
raise ValidationError("Value must be 'foo'")
block = blocks.RegexBlock(regex=r'^.*$', validators=[validate_is_foo])
with self.assertRaises(ValidationError):
block.clean('bar')
class TestRichTextBlock(TestCase):
fixtures = ['test.json']
def test_get_default_with_fallback_value(self):
default_value = blocks.RichTextBlock().get_default()
self.assertIsInstance(default_value, RichText)
self.assertEqual(default_value.source, '')
def test_get_default_with_default_none(self):
default_value = blocks.RichTextBlock(default=None).get_default()
self.assertIsInstance(default_value, RichText)
self.assertEqual(default_value.source, '')
def test_get_default_with_empty_string(self):
default_value = blocks.RichTextBlock(default='').get_default()
self.assertIsInstance(default_value, RichText)
self.assertEqual(default_value.source, '')
def test_get_default_with_nonempty_string(self):
default_value = blocks.RichTextBlock(default='<p>foo</p>').get_default()
self.assertIsInstance(default_value, RichText)
self.assertEqual(default_value.source, '<p>foo</p>')
def test_get_default_with_richtext_value(self):
default_value = blocks.RichTextBlock(default=RichText('<p>foo</p>')).get_default()
self.assertIsInstance(default_value, RichText)
self.assertEqual(default_value.source, '<p>foo</p>')
def test_render(self):
block = blocks.RichTextBlock()
value = RichText('<p>Merry <a linktype="page" id="4">Christmas</a>!</p>')
result = block.render(value)
self.assertEqual(
result, '<p>Merry <a href="/events/christmas/">Christmas</a>!</p>'
)
def test_render_form(self):
"""
render_form should produce the editor-specific rendition of the rich text value
(which includes e.g. 'data-linktype' attributes on <a> elements)
"""
block = blocks.RichTextBlock(editor='hallo')
value = RichText('<p>Merry <a linktype="page" id="4">Christmas</a>!</p>')
result = block.render_form(value, prefix='richtext')
self.assertIn(
(
'<p>Merry <a data-linktype="page" data-id="4"'
' data-parent-id="3" href="/events/christmas/">Christmas</a>!</p>'
),
result
)
def test_validate_required_richtext_block(self):
block = blocks.RichTextBlock()
with self.assertRaises(ValidationError):
block.clean(RichText(''))
def test_validate_non_required_richtext_block(self):
block = blocks.RichTextBlock(required=False)
result = block.clean(RichText(''))
self.assertIsInstance(result, RichText)
self.assertEqual(result.source, '')
def test_render_with_validator(self):
def validate_contains_foo(value):
if 'foo' not in value:
raise ValidationError("Value must contain 'foo'")
block = blocks.RichTextBlock(validators=[validate_contains_foo])
with self.assertRaises(ValidationError):
block.clean(RichText('<p>bar</p>'))
def test_get_searchable_content(self):
block = blocks.RichTextBlock()
value = RichText(
'<p>Merry <a linktype="page" id="4">Christmas</a>! & a happy new year</p>\n'
'<p>Our Santa pet <b>Wagtail</b> has some cool stuff in store for you all!</p>'
)
result = block.get_searchable_content(value)
self.assertEqual(
result, [
'Merry Christmas! & a happy new year \n'
'Our Santa pet Wagtail has some cool stuff in store for you all!'
]
)
def test_get_searchable_content_whitespace(self):
block = blocks.RichTextBlock()
value = RichText('<p>mashed</p><p>po<i>ta</i>toes</p>')
result = block.get_searchable_content(value)
self.assertEqual(result, ['mashed potatoes'])
class TestChoiceBlock(WagtailTestUtils, SimpleTestCase):
def setUp(self):
from django.db.models.fields import BLANK_CHOICE_DASH
self.blank_choice_dash_label = BLANK_CHOICE_DASH[0][1]
def test_render_required_choice_block(self):
block = blocks.ChoiceBlock(choices=[('tea', 'Tea'), ('coffee', 'Coffee')])
html = block.render_form('coffee', prefix='beverage')
self.assertTagInHTML('<select id="beverage" name="beverage" placeholder="">', html)
# blank option should still be rendered for required fields
# (we may want it as an initial value)
self.assertIn('<option value="">%s</option>' % self.blank_choice_dash_label, html)
self.assertIn('<option value="tea">Tea</option>', html)
self.assertInHTML('<option value="coffee" selected="selected">Coffee</option>', html)
def test_render_required_choice_block_with_default(self):
block = blocks.ChoiceBlock(choices=[('tea', 'Tea'), ('coffee', 'Coffee')], default='tea')
html = block.render_form('coffee', prefix='beverage')
self.assertTagInHTML('<select id="beverage" name="beverage" placeholder="">', html)
# blank option should NOT be rendered if default and required are set.
self.assertNotIn('<option value="">%s</option>' % self.blank_choice_dash_label, html)
self.assertIn('<option value="tea">Tea</option>', html)
self.assertInHTML('<option value="coffee" selected="selected">Coffee</option>', html)
def test_render_required_choice_block_with_callable_choices(self):
def callable_choices():
return [('tea', 'Tea'), ('coffee', 'Coffee')]
block = blocks.ChoiceBlock(choices=callable_choices)
html = block.render_form('coffee', prefix='beverage')
self.assertTagInHTML('<select id="beverage" name="beverage" placeholder="">', html)
# blank option should still be rendered for required fields
# (we may want it as an initial value)
self.assertIn('<option value="">%s</option>' % self.blank_choice_dash_label, html)
self.assertIn('<option value="tea">Tea</option>', html)
self.assertInHTML('<option value="coffee" selected="selected">Coffee</option>', html)
def test_validate_required_choice_block(self):
block = blocks.ChoiceBlock(choices=[('tea', 'Tea'), ('coffee', 'Coffee')])
self.assertEqual(block.clean('coffee'), 'coffee')
with self.assertRaises(ValidationError):
block.clean('whisky')
with self.assertRaises(ValidationError):
block.clean('')
with self.assertRaises(ValidationError):
block.clean(None)
def test_render_non_required_choice_block(self):
block = blocks.ChoiceBlock(choices=[('tea', 'Tea'), ('coffee', 'Coffee')], required=False)
html = block.render_form('coffee', prefix='beverage')
self.assertTagInHTML('<select id="beverage" name="beverage" placeholder="">', html)
self.assertIn('<option value="">%s</option>' % self.blank_choice_dash_label, html)
self.assertIn('<option value="tea">Tea</option>', html)
self.assertInHTML('<option value="coffee" selected="selected">Coffee</option>', html)
def test_render_non_required_choice_block_with_callable_choices(self):
def callable_choices():
return [('tea', 'Tea'), ('coffee', 'Coffee')]
block = blocks.ChoiceBlock(choices=callable_choices, required=False)
html = block.render_form('coffee', prefix='beverage')
self.assertTagInHTML('<select id="beverage" name="beverage" placeholder="">', html)
self.assertIn('<option value="">%s</option>' % self.blank_choice_dash_label, html)
self.assertIn('<option value="tea">Tea</option>', html)
self.assertInHTML('<option value="coffee" selected="selected">Coffee</option>', html)
def test_validate_non_required_choice_block(self):
block = blocks.ChoiceBlock(choices=[('tea', 'Tea'), ('coffee', 'Coffee')], required=False)
self.assertEqual(block.clean('coffee'), 'coffee')
with self.assertRaises(ValidationError):
block.clean('whisky')
self.assertEqual(block.clean(''), '')
self.assertEqual(block.clean(None), '')
def test_render_choice_block_with_existing_blank_choice(self):
block = blocks.ChoiceBlock(
choices=[('tea', 'Tea'), ('coffee', 'Coffee'), ('', 'No thanks')],
required=False)
html = block.render_form(None, prefix='beverage')
self.assertTagInHTML('<select id="beverage" name="beverage" placeholder="">', html)
self.assertNotIn('<option value="">%s</option>' % self.blank_choice_dash_label, html)
self.assertInHTML('<option value="" selected="selected">No thanks</option>', html)
self.assertIn('<option value="tea">Tea</option>', html)
self.assertInHTML('<option value="coffee">Coffee</option>', html)
def test_render_choice_block_with_existing_blank_choice_and_with_callable_choices(self):
def callable_choices():
return [('tea', 'Tea'), ('coffee', 'Coffee'), ('', 'No thanks')]
block = blocks.ChoiceBlock(
choices=callable_choices,
required=False)
html = block.render_form(None, prefix='beverage')
self.assertTagInHTML('<select id="beverage" name="beverage" placeholder="">', html)
self.assertNotIn('<option value="">%s</option>' % self.blank_choice_dash_label, html)
self.assertInHTML('<option value="" selected="selected">No thanks</option>', html)
self.assertIn('<option value="tea">Tea</option>', html)
self.assertIn('<option value="coffee">Coffee</option>', html)
def test_named_groups_without_blank_option(self):
block = blocks.ChoiceBlock(
choices=[
('Alcoholic', [
('gin', 'Gin'),
('whisky', 'Whisky'),
]),
('Non-alcoholic', [
('tea', 'Tea'),
('coffee', 'Coffee'),
]),
])
# test rendering with the blank option selected
html = block.render_form(None, prefix='beverage')
self.assertTagInHTML('<select id="beverage" name="beverage" placeholder="">', html)
self.assertInHTML('<option value="" selected="selected">%s</option>' % self.blank_choice_dash_label, html)
self.assertIn('<optgroup label="Alcoholic">', html)
self.assertIn('<option value="tea">Tea</option>', html)
# test rendering with a non-blank option selected
html = block.render_form('tea', prefix='beverage')
self.assertTagInHTML('<select id="beverage" name="beverage" placeholder="">', html)
self.assertIn('<option value="">%s</option>' % self.blank_choice_dash_label, html)
self.assertIn('<optgroup label="Alcoholic">', html)
self.assertInHTML('<option value="tea" selected="selected">Tea</option>', html)
def test_named_groups_with_blank_option(self):
block = blocks.ChoiceBlock(
choices=[
('Alcoholic', [
('gin', 'Gin'),
('whisky', 'Whisky'),
]),
('Non-alcoholic', [
('tea', 'Tea'),
('coffee', 'Coffee'),
]),
('Not thirsty', [
('', 'No thanks')
]),
],
required=False)
# test rendering with the blank option selected
html = block.render_form(None, prefix='beverage')
self.assertTagInHTML('<select id="beverage" name="beverage" placeholder="">', html)
self.assertNotIn('<option value="">%s</option>' % self.blank_choice_dash_label, html)
self.assertNotInHTML('<option value="" selected="selected">%s</option>' % self.blank_choice_dash_label, html)
self.assertIn('<optgroup label="Alcoholic">', html)
self.assertIn('<option value="tea">Tea</option>', html)
self.assertInHTML('<option value="" selected="selected">No thanks</option>', html)
# test rendering with a non-blank option selected
html = block.render_form('tea', prefix='beverage')
self.assertTagInHTML('<select id="beverage" name="beverage" placeholder="">', html)
self.assertNotIn('<option value="">%s</option>' % self.blank_choice_dash_label, html)
self.assertNotInHTML('<option value="" selected="selected">%s</option>' % self.blank_choice_dash_label, html)
self.assertIn('<optgroup label="Alcoholic">', html)
self.assertInHTML('<option value="tea" selected="selected">Tea</option>', html)
def test_subclassing(self):
class BeverageChoiceBlock(blocks.ChoiceBlock):
choices = [
('tea', 'Tea'),
('coffee', 'Coffee'),
]
block = BeverageChoiceBlock(required=False)
html = block.render_form('tea', prefix='beverage')
self.assertTagInHTML('<select id="beverage" name="beverage" placeholder="">', html)
self.assertInHTML('<option value="tea" selected="selected">Tea</option>', html)
# subclasses of ChoiceBlock should deconstruct to a basic ChoiceBlock for migrations
self.assertEqual(
block.deconstruct(),
(
'wagtail.core.blocks.ChoiceBlock',
[],
{
'choices': [('tea', 'Tea'), ('coffee', 'Coffee')],
'required': False,
},
)
)
def test_searchable_content(self):
block = blocks.ChoiceBlock(choices=[
('choice-1', "Choice 1"),
('choice-2', "Choice 2"),
])
self.assertEqual(block.get_searchable_content("choice-1"),
["Choice 1"])
def test_searchable_content_with_callable_choices(self):
def callable_choices():
return [
('choice-1', "Choice 1"),
('choice-2', "Choice 2"),
]
block = blocks.ChoiceBlock(choices=callable_choices)
self.assertEqual(block.get_searchable_content("choice-1"),
["Choice 1"])
def test_optgroup_searchable_content(self):
block = blocks.ChoiceBlock(choices=[
('Section 1', [
('1-1', "Block 1"),
('1-2', "Block 2"),
]),
('Section 2', [
('2-1', "Block 1"),
('2-2', "Block 2"),
]),
])
self.assertEqual(block.get_searchable_content("2-2"),
["Section 2", "Block 2"])
def test_invalid_searchable_content(self):
block = blocks.ChoiceBlock(choices=[
('one', 'One'),
('two', 'Two'),
])
self.assertEqual(block.get_searchable_content('three'), [])
def test_searchable_content_with_lazy_translation(self):
block = blocks.ChoiceBlock(choices=[
('choice-1', __("Choice 1")),
('choice-2', __("Choice 2")),
])
result = block.get_searchable_content("choice-1")
# result must survive JSON (de)serialisation, which is not the case for
# lazy translation objects
result = json.loads(json.dumps(result))
self.assertEqual(result, ["Choice 1"])
def test_optgroup_searchable_content_with_lazy_translation(self):
block = blocks.ChoiceBlock(choices=[
(__('Section 1'), [
('1-1', __("Block 1")),
('1-2', __("Block 2")),
]),
(__('Section 2'), [
('2-1', __("Block 1")),
('2-2', __("Block 2")),
]),
])
result = block.get_searchable_content("2-2")
# result must survive JSON (de)serialisation, which is not the case for
# lazy translation objects
result = json.loads(json.dumps(result))
self.assertEqual(result, ["Section 2", "Block 2"])
def test_deconstruct_with_callable_choices(self):
def callable_choices():
return [
('tea', 'Tea'),
('coffee', 'Coffee'),
]
block = blocks.ChoiceBlock(choices=callable_choices, required=False)
html = block.render_form('tea', prefix='beverage')
self.assertTagInHTML('<select id="beverage" name="beverage" placeholder="">', html)
self.assertInHTML('<option value="tea" selected="selected">Tea</option>', html)
self.assertEqual(
block.deconstruct(),
(
'wagtail.core.blocks.ChoiceBlock',
[],
{
'choices': callable_choices,
'required': False,
},
)
)
def test_render_with_validator(self):
choices = [
('tea', 'Tea'),
('coffee', 'Coffee'),
]
def validate_tea_is_selected(value):
raise ValidationError("You must select 'tea'")
block = blocks.ChoiceBlock(choices=choices, validators=[validate_tea_is_selected])
with self.assertRaises(ValidationError):
block.clean('coffee')
class TestMultipleChoiceBlock(WagtailTestUtils, SimpleTestCase):
def setUp(self):
from django.db.models.fields import BLANK_CHOICE_DASH
self.blank_choice_dash_label = BLANK_CHOICE_DASH[0][1]
def test_render_required_multiple_choice_block(self):
block = blocks.MultipleChoiceBlock(choices=[('tea', 'Tea'), ('coffee', 'Coffee')])
html = block.render_form('coffee', prefix='beverage')
self.assertTagInHTML('<select multiple id="beverage" name="beverage" placeholder="">', html)
self.assertIn('<option value="tea">Tea</option>', html)
self.assertInHTML('<option value="coffee" selected="selected">Coffee</option>', html)
def test_render_required_multiple_choice_block_with_default(self):
block = blocks.MultipleChoiceBlock(choices=[('tea', 'Tea'), ('coffee', 'Coffee')], default='tea')
html = block.render_form('coffee', prefix='beverage')
self.assertTagInHTML('<select multiple id="beverage" name="beverage" placeholder="">', html)
self.assertIn('<option value="tea">Tea</option>', html)
self.assertInHTML('<option value="coffee" selected="selected">Coffee</option>', html)
def test_render_required_multiple_choice_block_with_callable_choices(self):
def callable_choices():
return [('tea', 'Tea'), ('coffee', 'Coffee')]
block = blocks.MultipleChoiceBlock(choices=callable_choices)
html = block.render_form('coffee', prefix='beverage')
self.assertTagInHTML('<select multiple id="beverage" name="beverage" placeholder="">', html)
self.assertIn('<option value="tea">Tea</option>', html)
self.assertInHTML('<option value="coffee" selected="selected">Coffee</option>', html)
def test_validate_required_multiple_choice_block(self):
block = blocks.MultipleChoiceBlock(choices=[('tea', 'Tea'), ('coffee', 'Coffee')])
self.assertEqual(block.clean(['coffee']), ['coffee'])
with self.assertRaises(ValidationError):
block.clean(['whisky'])
with self.assertRaises(ValidationError):
block.clean('')
with self.assertRaises(ValidationError):
block.clean(None)
def test_render_non_required_multiple_choice_block(self):
block = blocks.MultipleChoiceBlock(choices=[('tea', 'Tea'), ('coffee', 'Coffee')], required=False)
html = block.render_form('coffee', prefix='beverage')
self.assertTagInHTML('<select multiple id="beverage" name="beverage" placeholder="">', html)
self.assertIn('<option value="tea">Tea</option>', html)
self.assertInHTML('<option value="coffee" selected="selected">Coffee</option>', html)
def test_render_non_required_multiple_choice_block_with_callable_choices(self):
def callable_choices():
return [('tea', 'Tea'), ('coffee', 'Coffee')]
block = blocks.MultipleChoiceBlock(choices=callable_choices, required=False)
html = block.render_form('coffee', prefix='beverage')
self.assertTagInHTML('<select multiple id="beverage" name="beverage" placeholder="">', html)
self.assertIn('<option value="tea">Tea</option>', html)
self.assertInHTML('<option value="coffee" selected="selected">Coffee</option>', html)
def test_validate_non_required_multiple_choice_block(self):
block = blocks.MultipleChoiceBlock(choices=[('tea', 'Tea'), ('coffee', 'Coffee')], required=False)
self.assertEqual(block.clean(['coffee']), ['coffee'])
with self.assertRaises(ValidationError):
block.clean(['whisky'])
self.assertEqual(block.clean(''), [])
self.assertEqual(block.clean(None), [])
def test_render_multiple_choice_block_with_existing_blank_choice(self):
block = blocks.MultipleChoiceBlock(
choices=[('tea', 'Tea'), ('coffee', 'Coffee'), ('', 'No thanks')],
required=False)
html = block.render_form("", prefix='beverage')
self.assertTagInHTML('<select multiple id="beverage" name="beverage" placeholder="">', html)
self.assertNotIn('<option value="">%s</option>' % self.blank_choice_dash_label, html)
self.assertInHTML('<option value="" selected="selected">No thanks</option>', html)
self.assertIn('<option value="tea">Tea</option>', html)
self.assertInHTML('<option value="coffee">Coffee</option>', html)
def test_render_multiple_choice_block_with_existing_blank_choice_and_with_callable_choices(self):
def callable_choices():
return [('tea', 'Tea'), ('coffee', 'Coffee'), ('', 'No thanks')]
block = blocks.MultipleChoiceBlock(
choices=callable_choices,
required=False)
html = block.render_form("", prefix='beverage')
self.assertTagInHTML('<select multiple id="beverage" name="beverage" placeholder="">', html)
self.assertNotIn('<option value="">%s</option>' % self.blank_choice_dash_label, html)
self.assertInHTML('<option value="" selected="selected">No thanks</option>', html)
self.assertIn('<option value="tea">Tea</option>', html)
self.assertIn('<option value="coffee">Coffee</option>', html)
def test_named_groups_without_blank_option(self):
block = blocks.MultipleChoiceBlock(
choices=[
('Alcoholic', [
('gin', 'Gin'),
('whisky', 'Whisky'),
]),
('Non-alcoholic', [
('tea', 'Tea'),
('coffee', 'Coffee'),
]),
])
# test rendering with the blank option selected
html = block.render_form(None, prefix='beverage')
self.assertTagInHTML('<select multiple id="beverage" name="beverage" placeholder="">', html)
self.assertIn('<optgroup label="Alcoholic">', html)
self.assertIn('<option value="tea">Tea</option>', html)
# test rendering with a non-blank option selected
html = block.render_form('tea', prefix='beverage')
self.assertTagInHTML('<select multiple id="beverage" name="beverage" placeholder="">', html)
self.assertIn('<optgroup label="Alcoholic">', html)
self.assertInHTML('<option value="tea" selected="selected">Tea</option>', html)
def test_named_groups_with_blank_option(self):
block = blocks.MultipleChoiceBlock(
choices=[
('Alcoholic', [
('gin', 'Gin'),
('whisky', 'Whisky'),
]),
('Non-alcoholic', [
('tea', 'Tea'),
('coffee', 'Coffee'),
]),
('Not thirsty', [
('', 'No thanks')
]),
],
required=False)
# test rendering with the blank option selected
html = block.render_form(None, prefix='beverage')
self.assertTagInHTML('<select multiple id="beverage" name="beverage" placeholder="">', html)
self.assertNotIn('<option value="">%s</option>' % self.blank_choice_dash_label, html)
self.assertIn('<optgroup label="Alcoholic">', html)
self.assertIn('<option value="tea">Tea</option>', html)
# test rendering with a non-blank option selected
html = block.render_form('tea', prefix='beverage')
self.assertTagInHTML('<select multiple id="beverage" name="beverage" placeholder="">', html)
self.assertNotIn('<option value="">%s</option>' % self.blank_choice_dash_label, html)
self.assertNotInHTML('<option value="" selected="selected">%s</option>' % self.blank_choice_dash_label, html)
self.assertIn('<optgroup label="Alcoholic">', html)
self.assertInHTML('<option value="tea" selected="selected">Tea</option>', html)
def test_subclassing(self):
class BeverageMultipleChoiceBlock(blocks.MultipleChoiceBlock):
choices = [
('tea', 'Tea'),
('coffee', 'Coffee'),
]
block = BeverageMultipleChoiceBlock(required=False)
html = block.render_form('tea', prefix='beverage')
self.assertTagInHTML('<select multiple id="beverage" name="beverage" placeholder="">', html)
self.assertInHTML('<option value="tea" selected="selected">Tea</option>', html)
# subclasses of ChoiceBlock should deconstruct to a basic ChoiceBlock for migrations
self.assertEqual(
block.deconstruct(),
(
'wagtail.core.blocks.MultipleChoiceBlock',
[],
{
'choices': [('tea', 'Tea'), ('coffee', 'Coffee')],
'required': False,
},
)
)
def test_searchable_content(self):
block = blocks.MultipleChoiceBlock(choices=[
('choice-1', "Choice 1"),
('choice-2', "Choice 2"),
])
self.assertEqual(block.get_searchable_content("choice-1"),
["Choice 1"])
def test_searchable_content_with_callable_choices(self):
def callable_choices():
return [
('choice-1', "Choice 1"),
('choice-2', "Choice 2"),
]
block = blocks.MultipleChoiceBlock(choices=callable_choices)
self.assertEqual(block.get_searchable_content("choice-1"),
["Choice 1"])
def test_optgroup_searchable_content(self):
block = blocks.MultipleChoiceBlock(choices=[
('Section 1', [
('1-1', "Block 1"),
('1-2', "Block 2"),
]),
('Section 2', [
('2-1', "Block 1"),
('2-2', "Block 2"),
]),
])
self.assertEqual(block.get_searchable_content("2-2"),
["Section 2", "Block 2"])
def test_invalid_searchable_content(self):
block = blocks.MultipleChoiceBlock(choices=[
('one', 'One'),
('two', 'Two'),
])
self.assertEqual(block.get_searchable_content('three'), [])
def test_searchable_content_with_lazy_translation(self):
block = blocks.MultipleChoiceBlock(choices=[
('choice-1', __("Choice 1")),
('choice-2', __("Choice 2")),
])
result = block.get_searchable_content("choice-1")
# result must survive JSON (de)serialisation, which is not the case for
# lazy translation objects
result = json.loads(json.dumps(result))
self.assertEqual(result, ["Choice 1"])
def test_optgroup_searchable_content_with_lazy_translation(self):
block = blocks.MultipleChoiceBlock(choices=[
(__('Section 1'), [
('1-1', __("Block 1")),
('1-2', __("Block 2")),
]),
(__('Section 2'), [
('2-1', __("Block 1")),
('2-2', __("Block 2")),
]),
])
result = block.get_searchable_content("2-2")
# result must survive JSON (de)serialisation, which is not the case for
# lazy translation objects
result = json.loads(json.dumps(result))
self.assertEqual(result, ["Section 2", "Block 2"])
def test_deconstruct_with_callable_choices(self):
def callable_choices():
return [
('tea', 'Tea'),
('coffee', 'Coffee'),
]
block = blocks.MultipleChoiceBlock(choices=callable_choices, required=False)
html = block.render_form('tea', prefix='beverage')
self.assertTagInHTML('<select multiple id="beverage" name="beverage" placeholder="">', html)
self.assertInHTML('<option value="tea" selected="selected">Tea</option>', html)
self.assertEqual(
block.deconstruct(),
(
'wagtail.core.blocks.MultipleChoiceBlock',
[],
{
'choices': callable_choices,
'required': False,
},
)
)
def test_render_with_validator(self):
choices = [
('tea', 'Tea'),
('coffee', 'Coffee'),
]
def validate_tea_is_selected(value):
raise ValidationError("You must select 'tea'")
block = blocks.MultipleChoiceBlock(choices=choices, validators=[validate_tea_is_selected])
with self.assertRaises(ValidationError):
block.clean('coffee')
class TestRawHTMLBlock(unittest.TestCase):
def test_get_default_with_fallback_value(self):
default_value = blocks.RawHTMLBlock().get_default()
self.assertEqual(default_value, '')
self.assertIsInstance(default_value, SafeData)
def test_get_default_with_none(self):
default_value = blocks.RawHTMLBlock(default=None).get_default()
self.assertEqual(default_value, '')
self.assertIsInstance(default_value, SafeData)
def test_get_default_with_empty_string(self):
default_value = blocks.RawHTMLBlock(default='').get_default()
self.assertEqual(default_value, '')
self.assertIsInstance(default_value, SafeData)
def test_get_default_with_nonempty_string(self):
default_value = blocks.RawHTMLBlock(default='<blink>BÖÖM</blink>').get_default()
self.assertEqual(default_value, '<blink>BÖÖM</blink>')
self.assertIsInstance(default_value, SafeData)
def test_serialize(self):
block = blocks.RawHTMLBlock()
result = block.get_prep_value(mark_safe('<blink>BÖÖM</blink>'))
self.assertEqual(result, '<blink>BÖÖM</blink>')
self.assertNotIsInstance(result, SafeData)
def test_deserialize(self):
block = blocks.RawHTMLBlock()
result = block.to_python('<blink>BÖÖM</blink>')
self.assertEqual(result, '<blink>BÖÖM</blink>')
self.assertIsInstance(result, SafeData)
def test_render(self):
block = blocks.RawHTMLBlock()
result = block.render(mark_safe('<blink>BÖÖM</blink>'))
self.assertEqual(result, '<blink>BÖÖM</blink>')
self.assertIsInstance(result, SafeData)
def test_render_form(self):
block = blocks.RawHTMLBlock()
result = block.render_form(mark_safe('<blink>BÖÖM</blink>'), prefix='rawhtml')
self.assertIn('<textarea ', result)
self.assertIn('name="rawhtml"', result)
self.assertIn('<blink>BÖÖM</blink>', result)
def test_form_response(self):
block = blocks.RawHTMLBlock()
result = block.value_from_datadict({'rawhtml': '<blink>BÖÖM</blink>'}, {}, prefix='rawhtml')
self.assertEqual(result, '<blink>BÖÖM</blink>')
self.assertIsInstance(result, SafeData)
def test_value_omitted_from_data(self):
block = blocks.RawHTMLBlock()
self.assertFalse(block.value_omitted_from_data({'rawhtml': 'ohai'}, {}, 'rawhtml'))
self.assertFalse(block.value_omitted_from_data({'rawhtml': ''}, {}, 'rawhtml'))
self.assertTrue(block.value_omitted_from_data({'nothing-here': 'nope'}, {}, 'rawhtml'))
def test_clean_required_field(self):
block = blocks.RawHTMLBlock()
result = block.clean(mark_safe('<blink>BÖÖM</blink>'))
self.assertEqual(result, '<blink>BÖÖM</blink>')
self.assertIsInstance(result, SafeData)
with self.assertRaises(ValidationError):
block.clean(mark_safe(''))
def test_clean_nonrequired_field(self):
block = blocks.RawHTMLBlock(required=False)
result = block.clean(mark_safe('<blink>BÖÖM</blink>'))
self.assertEqual(result, '<blink>BÖÖM</blink>')
self.assertIsInstance(result, SafeData)
result = block.clean(mark_safe(''))
self.assertEqual(result, '')
self.assertIsInstance(result, SafeData)
def test_render_with_validator(self):
def validate_contains_foo(value):
if 'foo' not in value:
raise ValidationError("Value must contain 'foo'")
block = blocks.RawHTMLBlock(validators=[validate_contains_foo])
with self.assertRaises(ValidationError):
block.clean(mark_safe('<p>bar</p>'))
class TestMeta(unittest.TestCase):
def test_set_template_with_meta(self):
class HeadingBlock(blocks.CharBlock):
class Meta:
template = 'heading.html'
block = HeadingBlock()
self.assertEqual(block.meta.template, 'heading.html')
def test_set_template_with_constructor(self):
block = blocks.CharBlock(template='heading.html')
self.assertEqual(block.meta.template, 'heading.html')
def test_set_template_with_constructor_overrides_meta(self):
class HeadingBlock(blocks.CharBlock):
class Meta:
template = 'heading.html'
block = HeadingBlock(template='subheading.html')
self.assertEqual(block.meta.template, 'subheading.html')
def test_meta_nested_inheritance(self):
"""
Check that having a multi-level inheritance chain works
"""
class HeadingBlock(blocks.CharBlock):
class Meta:
template = 'heading.html'
test = 'Foo'
class SubHeadingBlock(HeadingBlock):
class Meta:
template = 'subheading.html'
block = SubHeadingBlock()
self.assertEqual(block.meta.template, 'subheading.html')
self.assertEqual(block.meta.test, 'Foo')
def test_meta_multi_inheritance(self):
"""
Check that multi-inheritance and Meta classes work together
"""
class LeftBlock(blocks.CharBlock):
class Meta:
template = 'template.html'
clash = 'the band'
label = 'Left block'
class RightBlock(blocks.CharBlock):
class Meta:
default = 'hello'
clash = 'the album'
label = 'Right block'
class ChildBlock(LeftBlock, RightBlock):
class Meta:
label = 'Child block'
block = ChildBlock()
# These should be directly inherited from the LeftBlock/RightBlock
self.assertEqual(block.meta.template, 'template.html')
self.assertEqual(block.meta.default, 'hello')
# This should be inherited from the LeftBlock, solving the collision,
# as LeftBlock comes first
self.assertEqual(block.meta.clash, 'the band')
# This should come from ChildBlock itself, ignoring the label on
# LeftBlock/RightBlock
self.assertEqual(block.meta.label, 'Child block')
class TestStructBlock(SimpleTestCase):
def test_initialisation(self):
block = blocks.StructBlock([
('title', blocks.CharBlock()),
('link', blocks.URLBlock()),
])
self.assertEqual(list(block.child_blocks.keys()), ['title', 'link'])
def test_initialisation_from_subclass(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
link = blocks.URLBlock()
block = LinkBlock()
self.assertEqual(list(block.child_blocks.keys()), ['title', 'link'])
def test_initialisation_from_subclass_with_extra(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
link = blocks.URLBlock()
block = LinkBlock([
('classname', blocks.CharBlock())
])
self.assertEqual(list(block.child_blocks.keys()), ['title', 'link', 'classname'])
def test_initialisation_with_multiple_subclassses(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
link = blocks.URLBlock()
class StyledLinkBlock(LinkBlock):
classname = blocks.CharBlock()
block = StyledLinkBlock()
self.assertEqual(list(block.child_blocks.keys()), ['title', 'link', 'classname'])
def test_initialisation_with_mixins(self):
"""
The order of fields of classes with multiple parent classes is slightly
surprising at first. Child fields are inherited in a bottom-up order,
by traversing the MRO in reverse. In the example below,
``StyledLinkBlock`` will have an MRO of::
[StyledLinkBlock, StylingMixin, LinkBlock, StructBlock, ...]
This will result in ``classname`` appearing *after* ``title`` and
``link`` in ``StyleLinkBlock`.child_blocks`, even though
``StylingMixin`` appeared before ``LinkBlock``.
"""
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
link = blocks.URLBlock()
class StylingMixin(blocks.StructBlock):
classname = blocks.CharBlock()
class StyledLinkBlock(StylingMixin, LinkBlock):
source = blocks.CharBlock()
block = StyledLinkBlock()
self.assertEqual(list(block.child_blocks.keys()),
['title', 'link', 'classname', 'source'])
def test_render(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
link = blocks.URLBlock()
block = LinkBlock()
html = block.render(block.to_python({
'title': "Wagtail site",
'link': 'http://www.wagtail.io',
}))
expected_html = '\n'.join([
'<dl>',
'<dt>title</dt>',
'<dd>Wagtail site</dd>',
'<dt>link</dt>',
'<dd>http://www.wagtail.io</dd>',
'</dl>',
])
self.assertHTMLEqual(html, expected_html)
def test_get_api_representation_calls_same_method_on_fields_with_context(self):
"""
The get_api_representation method of a StructBlock should invoke
the block's get_api_representation method on each field and the
context should be passed on.
"""
class ContextBlock(blocks.CharBlock):
def get_api_representation(self, value, context=None):
return context[value]
class AuthorBlock(blocks.StructBlock):
language = ContextBlock()
author = ContextBlock()
block = AuthorBlock()
api_representation = block.get_api_representation(
{
'language': 'en',
'author': 'wagtail',
},
context={
'en': 'English',
'wagtail': 'Wagtail!'
}
)
self.assertDictEqual(
api_representation, {
'language': 'English',
'author': 'Wagtail!'
}
)
def test_render_unknown_field(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
link = blocks.URLBlock()
block = LinkBlock()
html = block.render(block.to_python({
'title': "Wagtail site",
'link': 'http://www.wagtail.io',
'image': 10,
}))
self.assertIn('<dt>title</dt>', html)
self.assertIn('<dd>Wagtail site</dd>', html)
self.assertIn('<dt>link</dt>', html)
self.assertIn('<dd>http://www.wagtail.io</dd>', html)
# Don't render the extra item
self.assertNotIn('<dt>image</dt>', html)
def test_render_bound_block(self):
# the string representation of a bound block should be the value as rendered by
# the associated block
class SectionBlock(blocks.StructBlock):
title = blocks.CharBlock()
body = blocks.RichTextBlock()
block = SectionBlock()
struct_value = block.to_python({
'title': 'hello',
'body': '<b>world</b>',
})
body_bound_block = struct_value.bound_blocks['body']
expected = '<b>world</b>'
self.assertEqual(str(body_bound_block), expected)
def test_get_form_context(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
link = blocks.URLBlock()
block = LinkBlock()
context = block.get_form_context(block.to_python({
'title': "Wagtail site",
'link': 'http://www.wagtail.io',
}), prefix='mylink')
self.assertTrue(isinstance(context['children'], collections.OrderedDict))
self.assertEqual(len(context['children']), 2)
self.assertTrue(isinstance(context['children']['title'], blocks.BoundBlock))
self.assertEqual(context['children']['title'].value, "Wagtail site")
self.assertTrue(isinstance(context['children']['link'], blocks.BoundBlock))
self.assertEqual(context['children']['link'].value, 'http://www.wagtail.io')
self.assertEqual(context['block_definition'], block)
self.assertEqual(context['prefix'], 'mylink')
def test_render_form(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock(required=False)
link = blocks.URLBlock(required=False)
block = LinkBlock()
html = block.render_form(block.to_python({
'title': "Wagtail site",
'link': 'http://www.wagtail.io',
}), prefix='mylink')
self.assertIn('<div class="struct-block">', html)
self.assertIn('<div class="field char_field widget-text_input fieldname-title">', html)
self.assertIn('<label class="field__label" for="mylink-title">Title</label>', html)
self.assertInHTML(
'<input id="mylink-title" name="mylink-title" placeholder="Title" type="text" value="Wagtail site" />', html
)
self.assertIn('<div class="field url_field widget-url_input fieldname-link">', html)
self.assertInHTML(
(
'<input id="mylink-link" name="mylink-link" placeholder="Link"'
' type="url" value="http://www.wagtail.io" />'
),
html
)
self.assertNotIn('<li class="required">', html)
def test_custom_render_form_template(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock(required=False)
link = blocks.URLBlock(required=False)
class Meta:
form_template = 'tests/block_forms/struct_block_form_template.html'
block = LinkBlock()
html = block.render_form(block.to_python({
'title': "Wagtail site",
'link': 'http://www.wagtail.io',
}), prefix='mylink')
self.assertIn('<div>Hello</div>', html)
self.assertHTMLEqual('<div>Hello</div>', html)
self.assertTrue(isinstance(html, SafeText))
def test_custom_render_form_template_jinja(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock(required=False)
link = blocks.URLBlock(required=False)
class Meta:
form_template = 'tests/jinja2/struct_block_form_template.html'
block = LinkBlock()
html = block.render_form(block.to_python({
'title': "Wagtail site",
'link': 'http://www.wagtail.io',
}), prefix='mylink')
self.assertIn('<div>Hello</div>', html)
self.assertHTMLEqual('<div>Hello</div>', html)
self.assertTrue(isinstance(html, SafeText))
def test_render_required_field_indicator(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
link = blocks.URLBlock(required=True)
block = LinkBlock()
html = block.render_form(block.to_python({
'title': "Wagtail site",
'link': 'http://www.wagtail.io',
}), prefix='mylink')
self.assertIn('<div class="field required">', html)
def test_render_form_unknown_field(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
link = blocks.URLBlock()
block = LinkBlock()
html = block.render_form(block.to_python({
'title': "Wagtail site",
'link': 'http://www.wagtail.io',
'image': 10,
}), prefix='mylink')
self.assertInHTML(
(
'<input id="mylink-title" name="mylink-title" placeholder="Title"'
' type="text" value="Wagtail site" />'
),
html
)
self.assertInHTML(
(
'<input id="mylink-link" name="mylink-link" placeholder="Link" type="url"'
' value="http://www.wagtail.io" />'
),
html
)
# Don't render the extra field
self.assertNotIn('mylink-image', html)
def test_render_form_uses_default_value(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock(default="Torchbox")
link = blocks.URLBlock(default="http://www.torchbox.com")
block = LinkBlock()
html = block.render_form(block.to_python({}), prefix='mylink')
self.assertInHTML(
'<input id="mylink-title" name="mylink-title" placeholder="Title" type="text" value="Torchbox" />', html
)
self.assertInHTML(
(
'<input id="mylink-link" name="mylink-link" placeholder="Link"'
' type="url" value="http://www.torchbox.com" />'
),
html
)
def test_render_form_with_help_text(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
link = blocks.URLBlock()
class Meta:
help_text = "Self-promotion is encouraged"
block = LinkBlock()
html = block.render_form(block.to_python({
'title': "Wagtail site",
'link': 'http://www.wagtail.io',
}), prefix='mylink')
self.assertInHTML('<div class="help"> <svg class="icon icon-help default" aria-hidden="true" focusable="false"><use href="#icon-help"></use></svg> Self-promotion is encouraged</div>', html)
# check it can be overridden in the block constructor
block = LinkBlock(help_text="Self-promotion is discouraged")
html = block.render_form(block.to_python({
'title': "Wagtail site",
'link': 'http://www.wagtail.io',
}), prefix='mylink')
self.assertInHTML('<div class="help"> <svg class="icon icon-help default" aria-hidden="true" focusable="false"><use href="#icon-help"></use></svg> Self-promotion is discouraged</div>', html)
def test_media_inheritance(self):
class ScriptedCharBlock(blocks.CharBlock):
media = forms.Media(js=['scripted_char_block.js'])
class LinkBlock(blocks.StructBlock):
title = ScriptedCharBlock(default="Torchbox")
link = blocks.URLBlock(default="http://www.torchbox.com")
block = LinkBlock()
self.assertIn('scripted_char_block.js', ''.join(block.all_media().render_js()))
def test_html_declaration_inheritance(self):
class CharBlockWithDeclarations(blocks.CharBlock):
def html_declarations(self):
return '<script type="text/x-html-template">hello world</script>'
class LinkBlock(blocks.StructBlock):
title = CharBlockWithDeclarations(default="Torchbox")
link = blocks.URLBlock(default="http://www.torchbox.com")
block = LinkBlock()
self.assertIn('<script type="text/x-html-template">hello world</script>', block.all_html_declarations())
def test_searchable_content(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
link = blocks.URLBlock()
block = LinkBlock()
content = block.get_searchable_content(block.to_python({
'title': "Wagtail site",
'link': 'http://www.wagtail.io',
}))
self.assertEqual(content, ["Wagtail site"])
def test_value_from_datadict(self):
block = blocks.StructBlock([
('title', blocks.CharBlock()),
('link', blocks.URLBlock()),
])
struct_val = block.value_from_datadict({
'mylink-title': "Torchbox",
'mylink-link': "http://www.torchbox.com"
}, {}, 'mylink')
self.assertEqual(struct_val['title'], "Torchbox")
self.assertEqual(struct_val['link'], "http://www.torchbox.com")
self.assertTrue(isinstance(struct_val, blocks.StructValue))
self.assertTrue(isinstance(struct_val.bound_blocks['link'].block, blocks.URLBlock))
def test_value_omitted_from_data(self):
block = blocks.StructBlock([
('title', blocks.CharBlock()),
('link', blocks.URLBlock()),
])
# overall value is considered present in the form if any sub-field is present
self.assertFalse(block.value_omitted_from_data({'mylink-title': 'Torchbox'}, {}, 'mylink'))
self.assertTrue(block.value_omitted_from_data({'nothing-here': 'nope'}, {}, 'mylink'))
def test_default_is_returned_as_structvalue(self):
"""When returning the default value of a StructBlock (e.g. because it's
a child of another StructBlock, and the outer value is missing that key)
we should receive it as a StructValue, not just a plain dict"""
class PersonBlock(blocks.StructBlock):
first_name = blocks.CharBlock()
surname = blocks.CharBlock()
class EventBlock(blocks.StructBlock):
title = blocks.CharBlock()
guest_speaker = PersonBlock(default={'first_name': 'Ed', 'surname': 'Balls'})
event_block = EventBlock()
event = event_block.to_python({'title': 'Birthday party'})
self.assertEqual(event['guest_speaker']['first_name'], 'Ed')
self.assertTrue(isinstance(event['guest_speaker'], blocks.StructValue))
def test_default_value_is_distinct_instance(self):
"""
Whenever the default value of a StructBlock is invoked, it should be a distinct
instance of the dict so that modifying it doesn't modify other places where the
default value appears.
"""
class PersonBlock(blocks.StructBlock):
first_name = blocks.CharBlock()
surname = blocks.CharBlock()
class EventBlock(blocks.StructBlock):
title = blocks.CharBlock()
guest_speaker = PersonBlock(default={'first_name': 'Ed', 'surname': 'Balls'})
event_block = EventBlock()
event1 = event_block.to_python({'title': 'Birthday party'}) # guest_speaker will default to Ed Balls
event2 = event_block.to_python({'title': 'Christmas party'}) # guest_speaker will default to Ed Balls, but a distinct instance
event1['guest_speaker']['surname'] = 'Miliband'
self.assertEqual(event1['guest_speaker']['surname'], 'Miliband')
# event2 should not be modified
self.assertEqual(event2['guest_speaker']['surname'], 'Balls')
def test_bulk_to_python_returns_distinct_default_instances(self):
"""
Whenever StructBlock.bulk_to_python invokes a child block's get_default method to
fill in missing fields, it should use a separate invocation for each record so that
we don't end up with the same instance of a mutable value on multiple records
"""
class ShoppingListBlock(blocks.StructBlock):
shop = blocks.CharBlock()
items = blocks.ListBlock(blocks.CharBlock(default='chocolate'))
block = ShoppingListBlock()
shopping_lists = block.bulk_to_python([
{'shop': 'Tesco'}, # 'items' defaults to ['chocolate']
{'shop': 'Asda'}, # 'items' defaults to ['chocolate'], but a distinct instance
])
shopping_lists[0]['items'].append('cake')
self.assertEqual(shopping_lists[0]['items'], ['chocolate', 'cake'])
# shopping_lists[1] should not be updated
self.assertEqual(shopping_lists[1]['items'], ['chocolate'])
def test_clean(self):
block = blocks.StructBlock([
('title', blocks.CharBlock()),
('link', blocks.URLBlock()),
])
value = block.to_python({'title': 'Torchbox', 'link': 'http://www.torchbox.com/'})
clean_value = block.clean(value)
self.assertTrue(isinstance(clean_value, blocks.StructValue))
self.assertEqual(clean_value['title'], 'Torchbox')
value = block.to_python({'title': 'Torchbox', 'link': 'not a url'})
with self.assertRaises(ValidationError):
block.clean(value)
def test_bound_blocks_are_available_on_template(self):
"""
Test that we are able to use value.bound_blocks within templates
to access a child block's own HTML rendering
"""
block = SectionBlock()
value = block.to_python({'title': 'Hello', 'body': '<i>italic</i> world'})
result = block.render(value)
self.assertEqual(result, """<h1>Hello</h1><i>italic</i> world""")
def test_render_block_with_extra_context(self):
block = SectionBlock()
value = block.to_python({'title': 'Bonjour', 'body': 'monde <i>italique</i>'})
result = block.render(value, context={'language': 'fr'})
self.assertEqual(result, """<h1 lang="fr">Bonjour</h1>monde <i>italique</i>""")
def test_render_structvalue(self):
"""
The HTML representation of a StructValue should use the block's template
"""
block = SectionBlock()
value = block.to_python({'title': 'Hello', 'body': '<i>italic</i> world'})
result = value.__html__()
self.assertEqual(result, """<h1>Hello</h1><i>italic</i> world""")
# value.render_as_block() should be equivalent to value.__html__()
result = value.render_as_block()
self.assertEqual(result, """<h1>Hello</h1><i>italic</i> world""")
def test_str_structvalue(self):
"""
The str() representation of a StructValue should NOT render the template, as that's liable
to cause an infinite loop if any debugging / logging code attempts to log the fact that
it rendered a template with this object in the context:
https://github.com/wagtail/wagtail/issues/2874
https://github.com/jazzband/django-debug-toolbar/issues/950
"""
block = SectionBlock()
value = block.to_python({'title': 'Hello', 'body': '<i>italic</i> world'})
result = str(value)
self.assertNotIn('<h1>', result)
# The expected rendering should correspond to the native representation of an OrderedDict:
# "StructValue([('title', u'Hello'), ('body', <wagtail.core.rich_text.RichText object at 0xb12d5eed>)])"
# - give or take some quoting differences between Python versions
self.assertIn('StructValue', result)
self.assertIn('title', result)
self.assertIn('Hello', result)
def test_render_structvalue_with_extra_context(self):
block = SectionBlock()
value = block.to_python({'title': 'Bonjour', 'body': 'monde <i>italique</i>'})
result = value.render_as_block(context={'language': 'fr'})
self.assertEqual(result, """<h1 lang="fr">Bonjour</h1>monde <i>italique</i>""")
class TestStructBlockWithCustomStructValue(SimpleTestCase):
def test_initialisation(self):
class CustomStructValue(blocks.StructValue):
def joined(self):
return self.get('title', '') + self.get('link', '')
block = blocks.StructBlock([
('title', blocks.CharBlock()),
('link', blocks.URLBlock()),
], value_class=CustomStructValue)
self.assertEqual(list(block.child_blocks.keys()), ['title', 'link'])
block_value = block.to_python({'title': 'Birthday party', 'link': 'https://myparty.co.uk'})
self.assertIsInstance(block_value, CustomStructValue)
default_value = block.get_default()
self.assertIsInstance(default_value, CustomStructValue)
value_from_datadict = block.value_from_datadict({
'mylink-title': "Torchbox",
'mylink-link': "http://www.torchbox.com"
}, {}, 'mylink')
self.assertIsInstance(value_from_datadict, CustomStructValue)
value = block.to_python({'title': 'Torchbox', 'link': 'http://www.torchbox.com/'})
clean_value = block.clean(value)
self.assertTrue(isinstance(clean_value, CustomStructValue))
self.assertEqual(clean_value['title'], 'Torchbox')
value = block.to_python({'title': 'Torchbox', 'link': 'not a url'})
with self.assertRaises(ValidationError):
block.clean(value)
def test_initialisation_from_subclass(self):
class LinkStructValue(blocks.StructValue):
def url(self):
return self.get('page') or self.get('link')
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
page = blocks.PageChooserBlock(required=False)
link = blocks.URLBlock(required=False)
class Meta:
value_class = LinkStructValue
block = LinkBlock()
self.assertEqual(list(block.child_blocks.keys()), ['title', 'page', 'link'])
block_value = block.to_python({'title': 'Website', 'link': 'https://website.com'})
self.assertIsInstance(block_value, LinkStructValue)
default_value = block.get_default()
self.assertIsInstance(default_value, LinkStructValue)
def test_initialisation_with_multiple_subclassses(self):
class LinkStructValue(blocks.StructValue):
def url(self):
return self.get('page') or self.get('link')
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
page = blocks.PageChooserBlock(required=False)
link = blocks.URLBlock(required=False)
class Meta:
value_class = LinkStructValue
class StyledLinkBlock(LinkBlock):
classname = blocks.CharBlock()
block = StyledLinkBlock()
self.assertEqual(list(block.child_blocks.keys()), ['title', 'page', 'link', 'classname'])
value_from_datadict = block.value_from_datadict({
'queen-title': "Torchbox",
'queen-link': "http://www.torchbox.com",
'queen-classname': "fullsize",
}, {}, 'queen')
self.assertIsInstance(value_from_datadict, LinkStructValue)
def test_initialisation_with_mixins(self):
class LinkStructValue(blocks.StructValue):
pass
class StylingMixinStructValue(blocks.StructValue):
pass
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
link = blocks.URLBlock()
class Meta:
value_class = LinkStructValue
class StylingMixin(blocks.StructBlock):
classname = blocks.CharBlock()
class StyledLinkBlock(StylingMixin, LinkBlock):
source = blocks.CharBlock()
block = StyledLinkBlock()
self.assertEqual(list(block.child_blocks.keys()),
['title', 'link', 'classname', 'source'])
block_value = block.to_python({
'title': 'Website', 'link': 'https://website.com',
'source': 'google', 'classname': 'full-size',
})
self.assertIsInstance(block_value, LinkStructValue)
def test_value_property(self):
class SectionStructValue(blocks.StructValue):
@property
def foo(self):
return 'bar %s' % self.get('title', '')
class SectionBlock(blocks.StructBlock):
title = blocks.CharBlock()
body = blocks.RichTextBlock()
class Meta:
value_class = SectionStructValue
block = SectionBlock()
struct_value = block.to_python({'title': 'hello', 'body': '<b>world</b>'})
value = struct_value.foo
self.assertEqual(value, 'bar hello')
def test_render_with_template(self):
class SectionStructValue(blocks.StructValue):
def title_with_suffix(self):
title = self.get('title')
if title:
return 'SUFFIX %s' % title
return 'EMPTY TITLE'
class SectionBlock(blocks.StructBlock):
title = blocks.CharBlock(required=False)
class Meta:
value_class = SectionStructValue
block = SectionBlock(template='tests/blocks/struct_block_custom_value.html')
struct_value = block.to_python({'title': 'hello'})
html = block.render(struct_value)
self.assertEqual(html, '<div>SUFFIX hello</div>\n')
struct_value = block.to_python({})
html = block.render(struct_value)
self.assertEqual(html, '<div>EMPTY TITLE</div>\n')
class TestListBlock(WagtailTestUtils, SimpleTestCase):
def test_initialise_with_class(self):
block = blocks.ListBlock(blocks.CharBlock)
# Child block should be initialised for us
self.assertIsInstance(block.child_block, blocks.CharBlock)
def test_initialise_with_instance(self):
child_block = blocks.CharBlock()
block = blocks.ListBlock(child_block)
self.assertEqual(block.child_block, child_block)
def render(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
link = blocks.URLBlock()
block = blocks.ListBlock(LinkBlock())
return block.render([
{
'title': "Wagtail",
'link': 'http://www.wagtail.io',
},
{
'title': "Django",
'link': 'http://www.djangoproject.com',
},
])
def test_render_uses_ul(self):
html = self.render()
self.assertIn('<ul>', html)
self.assertIn('</ul>', html)
def test_render_uses_li(self):
html = self.render()
self.assertIn('<li>', html)
self.assertIn('</li>', html)
def test_render_calls_block_render_on_children(self):
"""
The default rendering of a ListBlock should invoke the block's render method
on each child, rather than just outputting the child value as a string.
"""
block = blocks.ListBlock(
blocks.CharBlock(template='tests/blocks/heading_block.html')
)
html = block.render(["Hello world!", "Goodbye world!"])
self.assertIn('<h1>Hello world!</h1>', html)
self.assertIn('<h1>Goodbye world!</h1>', html)
def test_render_passes_context_to_children(self):
"""
Template context passed to the render method should be passed on
to the render method of the child block.
"""
block = blocks.ListBlock(
blocks.CharBlock(template='tests/blocks/heading_block.html')
)
html = block.render(["Bonjour le monde!", "Au revoir le monde!"], context={
'language': 'fr',
})
self.assertIn('<h1 lang="fr">Bonjour le monde!</h1>', html)
self.assertIn('<h1 lang="fr">Au revoir le monde!</h1>', html)
def test_get_api_representation_calls_same_method_on_children_with_context(self):
"""
The get_api_representation method of a ListBlock should invoke
the block's get_api_representation method on each child and
the context should be passed on.
"""
class ContextBlock(blocks.CharBlock):
def get_api_representation(self, value, context=None):
return context[value]
block = blocks.ListBlock(
ContextBlock()
)
api_representation = block.get_api_representation(["en", "fr"], context={
'en': 'Hello world!',
'fr': 'Bonjour le monde!'
})
self.assertEqual(
api_representation, ['Hello world!', 'Bonjour le monde!']
)
def render_form(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
link = blocks.URLBlock()
block = blocks.ListBlock(LinkBlock)
html = block.render_form([
{
'title': "Wagtail",
'link': 'http://www.wagtail.io',
},
{
'title': "Django",
'link': 'http://www.djangoproject.com',
},
], prefix='links')
return html
def test_render_form_wrapper_class(self):
html = self.render_form()
self.assertIn('<div class="c-sf-container">', html)
def test_render_form_count_field(self):
html = self.render_form()
self.assertIn('<input type="hidden" name="links-count" id="links-count" value="2">', html)
def test_render_form_delete_field(self):
html = self.render_form()
self.assertIn('<input type="hidden" id="links-0-deleted" name="links-0-deleted" value="">', html)
def test_render_form_order_fields(self):
html = self.render_form()
self.assertIn('<input type="hidden" id="links-0-order" name="links-0-order" value="0">', html)
self.assertIn('<input type="hidden" id="links-1-order" name="links-1-order" value="1">', html)
def test_render_form_labels(self):
html = self.render_form()
self.assertIn('<label class="field__label" for="links-0-value-title">Title</label>', html)
self.assertIn('<label class="field__label" for="links-0-value-link">Link</label>', html)
def test_render_form_values(self):
html = self.render_form()
self.assertInHTML(
(
'<input id="links-0-value-title" name="links-0-value-title" placeholder="Title"'
' type="text" value="Wagtail" />'
),
html
)
self.assertInHTML(
(
'<input id="links-0-value-link" name="links-0-value-link" placeholder="Link" type="url"'
' value="http://www.wagtail.io" />'
),
html
)
self.assertInHTML(
(
'<input id="links-1-value-title" name="links-1-value-title" placeholder="Title" type="text"'
' value="Django" />'
),
html
)
self.assertInHTML(
(
'<input id="links-1-value-link" name="links-1-value-link" placeholder="Link"'
' type="url" value="http://www.djangoproject.com" />'
),
html
)
def test_html_declarations(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
link = blocks.URLBlock()
block = blocks.ListBlock(LinkBlock)
html = block.html_declarations()
self.assertTagInTemplateScript(
'<input id="__PREFIX__-value-title" name="__PREFIX__-value-title" placeholder="Title" type="text" />',
html
)
self.assertTagInTemplateScript(
'<input id="__PREFIX__-value-link" name="__PREFIX__-value-link" placeholder="Link" type="url" />',
html
)
def test_html_declarations_uses_default(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock(default="Github")
link = blocks.URLBlock(default="http://www.github.com")
block = blocks.ListBlock(LinkBlock)
html = block.html_declarations()
self.assertTagInTemplateScript(
(
'<input id="__PREFIX__-value-title" name="__PREFIX__-value-title" placeholder="Title"'
' type="text" value="Github" />'
),
html
)
self.assertTagInTemplateScript(
(
'<input id="__PREFIX__-value-link" name="__PREFIX__-value-link" placeholder="Link"'
' type="url" value="http://www.github.com" />'
),
html
)
def test_media_inheritance(self):
class ScriptedCharBlock(blocks.CharBlock):
media = forms.Media(js=['scripted_char_block.js'])
block = blocks.ListBlock(ScriptedCharBlock())
self.assertIn('scripted_char_block.js', ''.join(block.all_media().render_js()))
def test_html_declaration_inheritance(self):
class CharBlockWithDeclarations(blocks.CharBlock):
def html_declarations(self):
return '<script type="text/x-html-template">hello world</script>'
block = blocks.ListBlock(CharBlockWithDeclarations())
self.assertIn('<script type="text/x-html-template">hello world</script>', block.all_html_declarations())
def test_searchable_content(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
link = blocks.URLBlock()
block = blocks.ListBlock(LinkBlock())
content = block.get_searchable_content([
{
'title': "Wagtail",
'link': 'http://www.wagtail.io',
},
{
'title': "Django",
'link': 'http://www.djangoproject.com',
},
])
self.assertEqual(content, ["Wagtail", "Django"])
def test_value_omitted_from_data(self):
block = blocks.ListBlock(blocks.CharBlock())
# overall value is considered present in the form if the 'count' field is present
self.assertFalse(block.value_omitted_from_data({'mylist-count': '0'}, {}, 'mylist'))
self.assertFalse(block.value_omitted_from_data({
'mylist-count': '1',
'mylist-0-value': 'hello', 'mylist-0-deleted': '', 'mylist-0-order': '0'
}, {}, 'mylist'))
self.assertTrue(block.value_omitted_from_data({'nothing-here': 'nope'}, {}, 'mylist'))
def test_ordering_in_form_submission_uses_order_field(self):
block = blocks.ListBlock(blocks.CharBlock())
# check that items are ordered by the 'order' field, not the order they appear in the form
post_data = {'shoppinglist-count': '3'}
for i in range(0, 3):
post_data.update({
'shoppinglist-%d-deleted' % i: '',
'shoppinglist-%d-order' % i: str(2 - i),
'shoppinglist-%d-value' % i: "item %d" % i
})
block_value = block.value_from_datadict(post_data, {}, 'shoppinglist')
self.assertEqual(block_value[2], "item 0")
def test_ordering_in_form_submission_is_numeric(self):
block = blocks.ListBlock(blocks.CharBlock())
# check that items are ordered by 'order' numerically, not alphabetically
post_data = {'shoppinglist-count': '12'}
for i in range(0, 12):
post_data.update({
'shoppinglist-%d-deleted' % i: '',
'shoppinglist-%d-order' % i: str(i),
'shoppinglist-%d-value' % i: "item %d" % i
})
block_value = block.value_from_datadict(post_data, {}, 'shoppinglist')
self.assertEqual(block_value[2], "item 2")
def test_can_specify_default(self):
class ShoppingListBlock(blocks.StructBlock):
shop = blocks.CharBlock()
items = blocks.ListBlock(blocks.CharBlock(), default=['peas', 'beans', 'carrots'])
block = ShoppingListBlock()
# the value here does not specify an 'items' field, so this should revert to the ListBlock's default
form_html = block.render_form(block.to_python({'shop': 'Tesco'}), prefix='shoppinglist')
self.assertIn(
'<input type="hidden" name="shoppinglist-items-count" id="shoppinglist-items-count" value="3">',
form_html
)
self.assertIn('value="peas"', form_html)
def test_default_default(self):
"""
if no explicit 'default' is set on the ListBlock, it should fall back on
a single instance of the child block in its default state.
"""
class ShoppingListBlock(blocks.StructBlock):
shop = blocks.CharBlock()
items = blocks.ListBlock(blocks.CharBlock(default='chocolate'))
block = ShoppingListBlock()
# the value here does not specify an 'items' field, so this should revert to the ListBlock's default
form_html = block.render_form(block.to_python({'shop': 'Tesco'}), prefix='shoppinglist')
self.assertIn(
'<input type="hidden" name="shoppinglist-items-count" id="shoppinglist-items-count" value="1">',
form_html
)
self.assertIn('value="chocolate"', form_html)
def test_default_value_is_distinct_instance(self):
"""
Whenever the default value of a ListBlock is invoked, it should be a distinct
instance of the list so that modifying it doesn't modify other places where the
default value appears.
"""
class ShoppingListBlock(blocks.StructBlock):
shop = blocks.CharBlock()
items = blocks.ListBlock(blocks.CharBlock(default='chocolate'))
block = ShoppingListBlock()
tesco_shopping = block.to_python({'shop': 'Tesco'}) # 'items' will default to ['chocolate']
asda_shopping = block.to_python({'shop': 'Asda'}) # 'items' will default to ['chocolate'], but a distinct instance
tesco_shopping['items'].append('cake')
self.assertEqual(tesco_shopping['items'], ['chocolate', 'cake'])
# asda_shopping should not be modified
self.assertEqual(asda_shopping['items'], ['chocolate'])
def test_render_with_classname_via_kwarg(self):
"""form_classname from kwargs to be used as an additional class when rendering list block"""
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
link = blocks.URLBlock()
block = blocks.ListBlock(LinkBlock, form_classname='special-list-class')
html = block.render_form([
{
'title': "Wagtail",
'link': 'http://www.wagtail.io',
},
{
'title': "Django",
'link': 'http://www.djangoproject.com',
},
], prefix='links')
# including leading space to ensure class name gets added correctly
self.assertEqual(html.count(' special-list-class'), 1)
def test_render_with_classname_via_class_meta(self):
"""form_classname from meta to be used as an additional class when rendering list block"""
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
link = blocks.URLBlock()
class CustomListBlock(blocks.ListBlock):
class Meta:
form_classname = 'custom-list-class'
block = CustomListBlock(LinkBlock)
html = block.render_form([
{
'title': "Wagtail",
'link': 'http://www.wagtail.io',
},
{
'title': "Django",
'link': 'http://www.djangoproject.com',
},
], prefix='links')
# including leading space to ensure class name gets added correctly
self.assertEqual(html.count(' custom-list-class'), 1)
class TestListBlockWithFixtures(TestCase):
fixtures = ['test.json']
def test_calls_child_bulk_to_python_when_available(self):
page_ids = [2, 3, 4, 5]
expected_pages = Page.objects.filter(pk__in=page_ids)
block = blocks.ListBlock(blocks.PageChooserBlock())
with self.assertNumQueries(1):
pages = block.to_python(page_ids)
self.assertSequenceEqual(pages, expected_pages)
def test_bulk_to_python(self):
block = blocks.ListBlock(blocks.PageChooserBlock())
with self.assertNumQueries(1):
result = block.bulk_to_python([[4, 5], [], [2]])
self.assertEqual(result, [
[Page.objects.get(id=4), Page.objects.get(id=5)],
[],
[Page.objects.get(id=2)],
])
class TestStreamBlock(WagtailTestUtils, SimpleTestCase):
def test_initialisation(self):
block = blocks.StreamBlock([
('heading', blocks.CharBlock()),
('paragraph', blocks.CharBlock()),
])
self.assertEqual(list(block.child_blocks.keys()), ['heading', 'paragraph'])
def test_initialisation_with_binary_string_names(self):
# migrations will sometimes write out names as binary strings, just to keep us on our toes
block = blocks.StreamBlock([
(b'heading', blocks.CharBlock()),
(b'paragraph', blocks.CharBlock()),
])
self.assertEqual(list(block.child_blocks.keys()), [b'heading', b'paragraph'])
def test_initialisation_from_subclass(self):
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock()
paragraph = blocks.CharBlock()
block = ArticleBlock()
self.assertEqual(list(block.child_blocks.keys()), ['heading', 'paragraph'])
def test_initialisation_from_subclass_with_extra(self):
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock()
paragraph = blocks.CharBlock()
block = ArticleBlock([
('intro', blocks.CharBlock())
])
self.assertEqual(list(block.child_blocks.keys()), ['heading', 'paragraph', 'intro'])
def test_initialisation_with_multiple_subclassses(self):
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock()
paragraph = blocks.CharBlock()
class ArticleWithIntroBlock(ArticleBlock):
intro = blocks.CharBlock()
block = ArticleWithIntroBlock()
self.assertEqual(list(block.child_blocks.keys()), ['heading', 'paragraph', 'intro'])
def test_initialisation_with_mixins(self):
"""
The order of child blocks of a ``StreamBlock`` with multiple parent
classes is slightly surprising at first. Child blocks are inherited in
a bottom-up order, by traversing the MRO in reverse. In the example
below, ``ArticleWithIntroBlock`` will have an MRO of::
[ArticleWithIntroBlock, IntroMixin, ArticleBlock, StreamBlock, ...]
This will result in ``intro`` appearing *after* ``heading`` and
``paragraph`` in ``ArticleWithIntroBlock.child_blocks``, even though
``IntroMixin`` appeared before ``ArticleBlock``.
"""
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock()
paragraph = blocks.CharBlock()
class IntroMixin(blocks.StreamBlock):
intro = blocks.CharBlock()
class ArticleWithIntroBlock(IntroMixin, ArticleBlock):
by_line = blocks.CharBlock()
block = ArticleWithIntroBlock()
self.assertEqual(list(block.child_blocks.keys()),
['heading', 'paragraph', 'intro', 'by_line'])
def test_field_has_changed(self):
block = blocks.StreamBlock([('paragraph', blocks.CharBlock())])
initial_value = blocks.StreamValue(block, [('paragraph', 'test')])
initial_value[0].id = 'a'
data_value = blocks.StreamValue(block, [('paragraph', 'test')])
data_value[0].id = 'a'
# identical ids and content, so has_changed should return False
self.assertFalse(blocks.BlockField(block).has_changed(initial_value, data_value))
changed_data_value = blocks.StreamValue(block, [('paragraph', 'not a test')])
changed_data_value[0].id = 'a'
# identical ids but changed content, so has_changed should return True
self.assertTrue(blocks.BlockField(block).has_changed(initial_value, changed_data_value))
def test_required_raises_an_exception_if_empty(self):
block = blocks.StreamBlock([('paragraph', blocks.CharBlock())], required=True)
value = blocks.StreamValue(block, [])
with self.assertRaises(blocks.StreamBlockValidationError):
block.clean(value)
def test_required_does_not_raise_an_exception_if_not_empty(self):
block = blocks.StreamBlock([('paragraph', blocks.CharBlock())], required=True)
value = block.to_python([{'type': 'paragraph', 'value': 'Hello'}])
try:
block.clean(value)
except blocks.StreamBlockValidationError:
raise self.failureException("%s was raised" % blocks.StreamBlockValidationError)
def test_not_required_does_not_raise_an_exception_if_empty(self):
block = blocks.StreamBlock([('paragraph', blocks.CharBlock())], required=False)
value = blocks.StreamValue(block, [])
try:
block.clean(value)
except blocks.StreamBlockValidationError:
raise self.failureException("%s was raised" % blocks.StreamBlockValidationError)
def test_required_by_default(self):
block = blocks.StreamBlock([('paragraph', blocks.CharBlock())])
value = blocks.StreamValue(block, [])
with self.assertRaises(blocks.StreamBlockValidationError):
block.clean(value)
def render_article(self, data):
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock()
paragraph = blocks.RichTextBlock()
block = ArticleBlock()
value = block.to_python(data)
return block.render(value)
def test_get_api_representation_calls_same_method_on_children_with_context(self):
"""
The get_api_representation method of a StreamBlock should invoke
the block's get_api_representation method on each child and
the context should be passed on.
"""
class ContextBlock(blocks.CharBlock):
def get_api_representation(self, value, context=None):
return context[value]
block = blocks.StreamBlock([
('language', ContextBlock()),
('author', ContextBlock()),
])
api_representation = block.get_api_representation(
block.to_python([
{'type': 'language', 'value': 'en'},
{'type': 'author', 'value': 'wagtail', 'id': '111111'},
]),
context={
'en': 'English',
'wagtail': 'Wagtail!'
}
)
self.assertListEqual(
api_representation, [
{'type': 'language', 'value': 'English', 'id': None},
{'type': 'author', 'value': 'Wagtail!', 'id': '111111'},
]
)
def test_render(self):
html = self.render_article([
{
'type': 'heading',
'value': "My title",
},
{
'type': 'paragraph',
'value': 'My <i>first</i> paragraph',
},
{
'type': 'paragraph',
'value': 'My second paragraph',
},
])
self.assertIn('<div class="block-heading">My title</div>', html)
self.assertIn('<div class="block-paragraph">My <i>first</i> paragraph</div>', html)
self.assertIn('<div class="block-paragraph">My second paragraph</div>', html)
def test_render_unknown_type(self):
# This can happen if a developer removes a type from their StreamBlock
html = self.render_article([
{
'type': 'foo',
'value': "Hello",
},
{
'type': 'paragraph',
'value': 'My first paragraph',
},
])
self.assertNotIn('foo', html)
self.assertNotIn('Hello', html)
self.assertIn('<div class="block-paragraph">My first paragraph</div>', html)
def test_render_calls_block_render_on_children(self):
"""
The default rendering of a StreamBlock should invoke the block's render method
on each child, rather than just outputting the child value as a string.
"""
block = blocks.StreamBlock([
('heading', blocks.CharBlock(template='tests/blocks/heading_block.html')),
('paragraph', blocks.CharBlock()),
])
value = block.to_python([
{'type': 'heading', 'value': 'Hello'}
])
html = block.render(value)
self.assertIn('<div class="block-heading"><h1>Hello</h1></div>', html)
# calling render_as_block() on value (a StreamValue instance)
# should be equivalent to block.render(value)
html = value.render_as_block()
self.assertIn('<div class="block-heading"><h1>Hello</h1></div>', html)
def test_render_passes_context_to_children(self):
block = blocks.StreamBlock([
('heading', blocks.CharBlock(template='tests/blocks/heading_block.html')),
('paragraph', blocks.CharBlock()),
])
value = block.to_python([
{'type': 'heading', 'value': 'Bonjour'}
])
html = block.render(value, context={
'language': 'fr',
})
self.assertIn('<div class="block-heading"><h1 lang="fr">Bonjour</h1></div>', html)
# calling render_as_block(context=foo) on value (a StreamValue instance)
# should be equivalent to block.render(value, context=foo)
html = value.render_as_block(context={
'language': 'fr',
})
self.assertIn('<div class="block-heading"><h1 lang="fr">Bonjour</h1></div>', html)
def test_render_on_stream_child_uses_child_template(self):
"""
Accessing a child element of the stream (giving a StreamChild object) and rendering it
should use the block template, not just render the value's string representation
"""
block = blocks.StreamBlock([
('heading', blocks.CharBlock(template='tests/blocks/heading_block.html')),
('paragraph', blocks.CharBlock()),
])
value = block.to_python([
{'type': 'heading', 'value': 'Hello'}
])
html = value[0].render()
self.assertEqual('<h1>Hello</h1>', html)
# StreamChild.__str__ should do the same
html = str(value[0])
self.assertEqual('<h1>Hello</h1>', html)
# and so should StreamChild.render_as_block
html = value[0].render_as_block()
self.assertEqual('<h1>Hello</h1>', html)
def test_can_pass_context_to_stream_child_template(self):
block = blocks.StreamBlock([
('heading', blocks.CharBlock(template='tests/blocks/heading_block.html')),
('paragraph', blocks.CharBlock()),
])
value = block.to_python([
{'type': 'heading', 'value': 'Bonjour'}
])
html = value[0].render(context={'language': 'fr'})
self.assertEqual('<h1 lang="fr">Bonjour</h1>', html)
# the same functionality should be available through the alias `render_as_block`
html = value[0].render_as_block(context={'language': 'fr'})
self.assertEqual('<h1 lang="fr">Bonjour</h1>', html)
def render_form(self):
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock()
paragraph = blocks.CharBlock()
block = ArticleBlock()
value = block.to_python([
{
'type': 'heading',
'value': "My title",
'id': '123123123',
},
{
'type': 'paragraph',
'value': 'My first paragraph',
},
{
'type': 'paragraph',
'value': 'My second paragraph',
},
])
return block.render_form(value, prefix='myarticle')
def test_render_form_wrapper_class(self):
html = self.render_form()
self.assertIn('<div class="c-sf-container">', html)
def test_render_form_count_field(self):
html = self.render_form()
self.assertIn('<input type="hidden" name="myarticle-count" id="myarticle-count" value="3">', html)
def test_render_form_delete_field(self):
html = self.render_form()
self.assertIn('<input type="hidden" id="myarticle-0-deleted" name="myarticle-0-deleted" value="">', html)
def test_render_form_order_fields(self):
html = self.render_form()
self.assertIn('<input type="hidden" id="myarticle-0-order" name="myarticle-0-order" value="0">', html)
self.assertIn('<input type="hidden" id="myarticle-1-order" name="myarticle-1-order" value="1">', html)
self.assertIn('<input type="hidden" id="myarticle-2-order" name="myarticle-2-order" value="2">', html)
def test_render_form_id_fields(self):
html = self.render_form()
self.assertIn('<input type="hidden" id="myarticle-0-id" name="myarticle-0-id" value="123123123">', html)
self.assertIn('<input type="hidden" id="myarticle-1-id" name="myarticle-1-id" value="">', html)
self.assertIn('<input type="hidden" id="myarticle-2-id" name="myarticle-2-id" value="">', html)
def test_render_form_type_fields(self):
html = self.render_form()
self.assertIn('<input type="hidden" id="myarticle-0-type" name="myarticle-0-type" value="heading">', html)
self.assertIn('<input type="hidden" id="myarticle-1-type" name="myarticle-1-type" value="paragraph">', html)
self.assertIn('<input type="hidden" id="myarticle-2-type" name="myarticle-2-type" value="paragraph">', html)
def test_render_form_value_fields(self):
html = self.render_form()
self.assertInHTML(
(
'<input id="myarticle-0-value" name="myarticle-0-value" placeholder="Heading"'
' type="text" value="My title" />'
),
html
)
self.assertInHTML(
(
'<input id="myarticle-1-value" name="myarticle-1-value" placeholder="Paragraph"'
' type="text" value="My first paragraph" />'
),
html
)
self.assertInHTML(
(
'<input id="myarticle-2-value" name="myarticle-2-value" placeholder="Paragraph"'
' type="text" value="My second paragraph" />'
),
html
)
def test_value_omitted_from_data(self):
block = blocks.StreamBlock([
('heading', blocks.CharBlock()),
])
# overall value is considered present in the form if the 'count' field is present
self.assertFalse(block.value_omitted_from_data({'mystream-count': '0'}, {}, 'mystream'))
self.assertFalse(block.value_omitted_from_data({
'mystream-count': '1',
'mystream-0-type': 'heading', 'mystream-0-value': 'hello',
'mystream-0-deleted': '', 'mystream-0-order': '0'
}, {}, 'mystream'))
self.assertTrue(block.value_omitted_from_data({'nothing-here': 'nope'}, {}, 'mystream'))
def test_validation_errors(self):
class ValidatedBlock(blocks.StreamBlock):
char = blocks.CharBlock()
url = blocks.URLBlock()
block = ValidatedBlock()
value = blocks.StreamValue(block, [
('char', ''),
('char', 'foo'),
('url', 'http://example.com/'),
('url', 'not a url'),
])
with self.assertRaises(ValidationError) as catcher:
block.clean(value)
self.assertEqual(catcher.exception.params, {
0: ['This field is required.'],
3: ['Enter a valid URL.'],
})
def test_min_num_validation_errors(self):
class ValidatedBlock(blocks.StreamBlock):
char = blocks.CharBlock()
url = blocks.URLBlock()
block = ValidatedBlock(min_num=1)
value = blocks.StreamValue(block, [])
with self.assertRaises(ValidationError) as catcher:
block.clean(value)
self.assertEqual(catcher.exception.params, {
'__all__': ['The minimum number of items is 1']
})
# a value with >= 1 blocks should pass validation
value = blocks.StreamValue(block, [('char', 'foo')])
self.assertTrue(block.clean(value))
def test_max_num_validation_errors(self):
class ValidatedBlock(blocks.StreamBlock):
char = blocks.CharBlock()
url = blocks.URLBlock()
block = ValidatedBlock(max_num=1)
value = blocks.StreamValue(block, [
('char', 'foo'),
('char', 'foo'),
('url', 'http://example.com/'),
('url', 'http://example.com/'),
])
with self.assertRaises(ValidationError) as catcher:
block.clean(value)
self.assertEqual(catcher.exception.params, {
'__all__': ['The maximum number of items is 1']
})
# a value with 1 block should pass validation
value = blocks.StreamValue(block, [('char', 'foo')])
self.assertTrue(block.clean(value))
def test_block_counts_min_validation_errors(self):
class ValidatedBlock(blocks.StreamBlock):
char = blocks.CharBlock()
url = blocks.URLBlock()
block = ValidatedBlock(block_counts={'char': {'min_num': 1}})
value = blocks.StreamValue(block, [
('url', 'http://example.com/'),
('url', 'http://example.com/'),
])
with self.assertRaises(ValidationError) as catcher:
block.clean(value)
self.assertEqual(catcher.exception.params, {
'__all__': ['Char: The minimum number of items is 1']
})
# a value with 1 char block should pass validation
value = blocks.StreamValue(block, [
('url', 'http://example.com/'),
('char', 'foo'),
('url', 'http://example.com/'),
])
self.assertTrue(block.clean(value))
def test_block_counts_max_validation_errors(self):
class ValidatedBlock(blocks.StreamBlock):
char = blocks.CharBlock()
url = blocks.URLBlock()
block = ValidatedBlock(block_counts={'char': {'max_num': 1}})
value = blocks.StreamValue(block, [
('char', 'foo'),
('char', 'foo'),
('url', 'http://example.com/'),
('url', 'http://example.com/'),
])
with self.assertRaises(ValidationError) as catcher:
block.clean(value)
self.assertEqual(catcher.exception.params, {
'__all__': ['Char: The maximum number of items is 1']
})
# a value with 1 char block should pass validation
value = blocks.StreamValue(block, [
('char', 'foo'),
('url', 'http://example.com/'),
('url', 'http://example.com/'),
])
self.assertTrue(block.clean(value))
def test_block_level_validation_renders_errors(self):
block = FooStreamBlock()
post_data = {'stream-count': '2'}
for i, value in enumerate(['bar', 'baz']):
post_data.update({
'stream-%d-deleted' % i: '',
'stream-%d-order' % i: str(i),
'stream-%d-type' % i: 'text',
'stream-%d-value' % i: value,
})
block_value = block.value_from_datadict(post_data, {}, 'stream')
with self.assertRaises(ValidationError) as catcher:
block.clean(block_value)
errors = ErrorList([
catcher.exception
])
self.assertInHTML(
format_html('<div class="help-block help-critical">{}</div>', FooStreamBlock.error),
block.render_form(block_value, prefix='stream', errors=errors))
def test_block_level_validation_render_no_errors(self):
block = FooStreamBlock()
post_data = {'stream-count': '3'}
for i, value in enumerate(['foo', 'bar', 'baz']):
post_data.update({
'stream-%d-deleted' % i: '',
'stream-%d-order' % i: str(i),
'stream-%d-type' % i: 'text',
'stream-%d-value' % i: value,
})
block_value = block.value_from_datadict(post_data, {}, 'stream')
try:
block.clean(block_value)
except ValidationError:
self.fail('Should have passed validation')
self.assertInHTML(
format_html('<div class="help-block help-critical">{}</div>', FooStreamBlock.error),
block.render_form(block_value, prefix='stream'),
count=0)
def test_html_declarations(self):
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock()
paragraph = blocks.CharBlock()
block = ArticleBlock()
html = block.html_declarations()
self.assertTagInTemplateScript('<input type="hidden" id="__PREFIX__-id" name="__PREFIX__-id" value="" />', html)
self.assertTagInTemplateScript('<input type="hidden" id="__PREFIX__-type" name="__PREFIX__-type" value="heading" />', html)
self.assertTagInTemplateScript('<input id="__PREFIX__-value" name="__PREFIX__-value" placeholder="Heading" type="text" />', html)
self.assertTagInTemplateScript(
'<input id="__PREFIX__-value" name="__PREFIX__-value" placeholder="Paragraph" type="text" />',
html
)
def test_html_declarations_uses_default(self):
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock(default="Fish found on moon")
paragraph = blocks.CharBlock(default="Lorem ipsum dolor sit amet")
block = ArticleBlock()
html = block.html_declarations()
self.assertTagInTemplateScript(
(
'<input id="__PREFIX__-value" name="__PREFIX__-value" placeholder="Heading"'
' type="text" value="Fish found on moon" />'
),
html
)
self.assertTagInTemplateScript(
(
'<input id="__PREFIX__-value" name="__PREFIX__-value" placeholder="Paragraph" type="text"'
' value="Lorem ipsum dolor sit amet" />'
),
html
)
def test_media_inheritance(self):
class ScriptedCharBlock(blocks.CharBlock):
media = forms.Media(js=['scripted_char_block.js'])
class ArticleBlock(blocks.StreamBlock):
heading = ScriptedCharBlock()
paragraph = blocks.CharBlock()
block = ArticleBlock()
self.assertIn('scripted_char_block.js', ''.join(block.all_media().render_js()))
def test_html_declaration_inheritance(self):
class CharBlockWithDeclarations(blocks.CharBlock):
def html_declarations(self):
return '<script type="text/x-html-template">hello world</script>'
class ArticleBlock(blocks.StreamBlock):
heading = CharBlockWithDeclarations(default="Torchbox")
paragraph = blocks.CharBlock()
block = ArticleBlock()
self.assertIn('<script type="text/x-html-template">hello world</script>', block.all_html_declarations())
def test_ordering_in_form_submission_uses_order_field(self):
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock()
paragraph = blocks.CharBlock()
block = ArticleBlock()
# check that items are ordered by the 'order' field, not the order they appear in the form
post_data = {'article-count': '3'}
for i in range(0, 3):
post_data.update({
'article-%d-deleted' % i: '',
'article-%d-order' % i: str(2 - i),
'article-%d-type' % i: 'heading',
'article-%d-value' % i: "heading %d" % i,
'article-%d-id' % i: "000%d" % i,
})
block_value = block.value_from_datadict(post_data, {}, 'article')
self.assertEqual(block_value[2].value, "heading 0")
self.assertEqual(block_value[2].id, "0000")
def test_ordering_in_form_submission_is_numeric(self):
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock()
paragraph = blocks.CharBlock()
block = ArticleBlock()
# check that items are ordered by 'order' numerically, not alphabetically
post_data = {'article-count': '12'}
for i in range(0, 12):
post_data.update({
'article-%d-deleted' % i: '',
'article-%d-order' % i: str(i),
'article-%d-type' % i: 'heading',
'article-%d-value' % i: "heading %d" % i
})
block_value = block.value_from_datadict(post_data, {}, 'article')
self.assertEqual(block_value[2].value, "heading 2")
def test_searchable_content(self):
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock()
paragraph = blocks.CharBlock()
block = ArticleBlock()
value = block.to_python([
{
'type': 'heading',
'value': "My title",
},
{
'type': 'paragraph',
'value': 'My first paragraph',
},
{
'type': 'paragraph',
'value': 'My second paragraph',
},
])
content = block.get_searchable_content(value)
self.assertEqual(content, [
"My title",
"My first paragraph",
"My second paragraph",
])
def test_meta_default(self):
"""Test that we can specify a default value in the Meta of a StreamBlock"""
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock()
paragraph = blocks.CharBlock()
class Meta:
default = [('heading', 'A default heading')]
# to access the default value, we retrieve it through a StructBlock
# from a struct value that's missing that key
class ArticleContainerBlock(blocks.StructBlock):
author = blocks.CharBlock()
article = ArticleBlock()
block = ArticleContainerBlock()
struct_value = block.to_python({'author': 'Bob'})
stream_value = struct_value['article']
self.assertTrue(isinstance(stream_value, blocks.StreamValue))
self.assertEqual(len(stream_value), 1)
self.assertEqual(stream_value[0].block_type, 'heading')
self.assertEqual(stream_value[0].value, 'A default heading')
def test_constructor_default(self):
"""Test that we can specify a default value in the constructor of a StreamBlock"""
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock()
paragraph = blocks.CharBlock()
class Meta:
default = [('heading', 'A default heading')]
# to access the default value, we retrieve it through a StructBlock
# from a struct value that's missing that key
class ArticleContainerBlock(blocks.StructBlock):
author = blocks.CharBlock()
article = ArticleBlock(default=[('heading', 'A different default heading')])
block = ArticleContainerBlock()
struct_value = block.to_python({'author': 'Bob'})
stream_value = struct_value['article']
self.assertTrue(isinstance(stream_value, blocks.StreamValue))
self.assertEqual(len(stream_value), 1)
self.assertEqual(stream_value[0].block_type, 'heading')
self.assertEqual(stream_value[0].value, 'A different default heading')
def test_stream_value_equality(self):
block = blocks.StreamBlock([
('text', blocks.CharBlock()),
])
value1 = block.to_python([{'type': 'text', 'value': 'hello'}])
value2 = block.to_python([{'type': 'text', 'value': 'hello'}])
value3 = block.to_python([{'type': 'text', 'value': 'goodbye'}])
self.assertTrue(value1 == value2)
self.assertFalse(value1 != value2)
self.assertFalse(value1 == value3)
self.assertTrue(value1 != value3)
def test_render_considers_group_attribute(self):
"""If group attributes are set in Block Meta classes, render a <h4> for each different block"""
class Group1Block1(blocks.CharBlock):
class Meta:
group = 'group1'
class Group1Block2(blocks.CharBlock):
class Meta:
group = 'group1'
class Group2Block1(blocks.CharBlock):
class Meta:
group = 'group2'
class Group2Block2(blocks.CharBlock):
class Meta:
group = 'group2'
class NoGroupBlock(blocks.CharBlock):
pass
block = blocks.StreamBlock([
('b1', Group1Block1()),
('b2', Group1Block2()),
('b3', Group2Block1()),
('b4', Group2Block2()),
('ngb', NoGroupBlock()),
])
html = block.render_form('')
self.assertNotIn('<h4 class="c-sf-add-panel__group-title"></h4>', block.render_form(''))
self.assertIn('<h4 class="c-sf-add-panel__group-title">group1</h4>', html)
self.assertIn('<h4 class="c-sf-add-panel__group-title">group2</h4>', html)
def test_value_from_datadict(self):
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock()
paragraph = blocks.CharBlock()
block = ArticleBlock()
value = block.value_from_datadict({
'foo-count': '3',
'foo-0-deleted': '',
'foo-0-order': '2',
'foo-0-type': 'heading',
'foo-0-id': '0000',
'foo-0-value': 'this is my heading',
'foo-1-deleted': '1',
'foo-1-order': '1',
'foo-1-type': 'heading',
'foo-1-id': '0001',
'foo-1-value': 'a deleted heading',
'foo-2-deleted': '',
'foo-2-order': '0',
'foo-2-type': 'paragraph',
'foo-2-id': '',
'foo-2-value': '<p>this is a paragraph</p>',
}, {}, prefix='foo')
self.assertEqual(len(value), 2)
self.assertEqual(value[0].block_type, 'paragraph')
self.assertEqual(value[0].id, '')
self.assertEqual(value[0].value, '<p>this is a paragraph</p>')
self.assertEqual(value[1].block_type, 'heading')
self.assertEqual(value[1].id, '0000')
self.assertEqual(value[1].value, 'this is my heading')
def check_get_prep_value(self, stream_data, is_lazy):
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock()
paragraph = blocks.CharBlock()
block = ArticleBlock()
value = blocks.StreamValue(block, stream_data, is_lazy=is_lazy)
jsonish_value = block.get_prep_value(value)
self.assertEqual(len(jsonish_value), 2)
self.assertEqual(jsonish_value[0], {'type': 'heading', 'value': 'this is my heading', 'id': '0000'})
self.assertEqual(jsonish_value[1]['type'], 'paragraph')
self.assertEqual(jsonish_value[1]['value'], '<p>this is a paragraph</p>')
# get_prep_value should assign a new (random and non-empty)
# ID to this block, as it didn't have one already.
self.assertTrue(jsonish_value[1]['id'])
# Calling get_prep_value again should preserve existing IDs, including the one
# just assigned to block 1
jsonish_value_again = block.get_prep_value(value)
self.assertEqual(jsonish_value[0]['id'], jsonish_value_again[0]['id'])
self.assertEqual(jsonish_value[1]['id'], jsonish_value_again[1]['id'])
def test_get_prep_value_not_lazy(self):
stream_data = [
('heading', 'this is my heading', '0000'),
('paragraph', '<p>this is a paragraph</p>')
]
self.check_get_prep_value(stream_data, is_lazy=False)
def test_get_prep_value_is_lazy(self):
stream_data = [
{'type': 'heading', 'value': 'this is my heading', 'id': '0000'},
{'type': 'paragraph', 'value': '<p>this is a paragraph</p>'},
]
self.check_get_prep_value(stream_data, is_lazy=True)
def check_get_prep_value_nested_streamblocks(self, stream_data, is_lazy):
class TwoColumnBlock(blocks.StructBlock):
left = blocks.StreamBlock([('text', blocks.CharBlock())])
right = blocks.StreamBlock([('text', blocks.CharBlock())])
block = TwoColumnBlock()
value = {
k: blocks.StreamValue(block.child_blocks[k], v, is_lazy=is_lazy)
for k, v in stream_data.items()
}
jsonish_value = block.get_prep_value(value)
self.assertEqual(len(jsonish_value), 2)
self.assertEqual(
jsonish_value['left'],
[{'type': 'text', 'value': 'some text', 'id': '0000'}]
)
self.assertEqual(len(jsonish_value['right']), 1)
right_block = jsonish_value['right'][0]
self.assertEqual(right_block['type'], 'text')
self.assertEqual(right_block['value'], 'some other text')
# get_prep_value should assign a new (random and non-empty)
# ID to this block, as it didn't have one already.
self.assertTrue(right_block['id'])
def test_get_prep_value_nested_streamblocks_not_lazy(self):
stream_data = {
'left': [('text', 'some text', '0000')],
'right': [('text', 'some other text')],
}
self.check_get_prep_value_nested_streamblocks(stream_data, is_lazy=False)
def test_get_prep_value_nested_streamblocks_is_lazy(self):
stream_data = {
'left': [{
'type': 'text',
'value': 'some text',
'id': '0000',
}],
'right': [{
'type': 'text',
'value': 'some other text',
}],
}
self.check_get_prep_value_nested_streamblocks(stream_data, is_lazy=True)
def test_modifications_to_stream_child_id_are_saved(self):
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock()
paragraph = blocks.CharBlock()
block = ArticleBlock()
stream = block.to_python([
{'type': 'heading', 'value': 'hello', 'id': '0001'},
{'type': 'paragraph', 'value': 'world', 'id': '0002'},
])
stream[1].id = '0003'
raw_data = block.get_prep_value(stream)
self.assertEqual(raw_data, [
{'type': 'heading', 'value': 'hello', 'id': '0001'},
{'type': 'paragraph', 'value': 'world', 'id': '0003'},
])
def test_modifications_to_stream_child_value_are_saved(self):
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock()
paragraph = blocks.CharBlock()
block = ArticleBlock()
stream = block.to_python([
{'type': 'heading', 'value': 'hello', 'id': '0001'},
{'type': 'paragraph', 'value': 'world', 'id': '0002'},
])
stream[1].value = 'earth'
raw_data = block.get_prep_value(stream)
self.assertEqual(raw_data, [
{'type': 'heading', 'value': 'hello', 'id': '0001'},
{'type': 'paragraph', 'value': 'earth', 'id': '0002'},
])
def test_set_streamvalue_item(self):
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock()
paragraph = blocks.CharBlock()
block = ArticleBlock()
stream = block.to_python([
{'type': 'heading', 'value': 'hello', 'id': '0001'},
{'type': 'paragraph', 'value': 'world', 'id': '0002'},
])
stream[1] = ('heading', 'goodbye', '0003')
raw_data = block.get_prep_value(stream)
self.assertEqual(raw_data, [
{'type': 'heading', 'value': 'hello', 'id': '0001'},
{'type': 'heading', 'value': 'goodbye', 'id': '0003'},
])
def test_delete_streamvalue_item(self):
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock()
paragraph = blocks.CharBlock()
block = ArticleBlock()
stream = block.to_python([
{'type': 'heading', 'value': 'hello', 'id': '0001'},
{'type': 'paragraph', 'value': 'world', 'id': '0002'},
])
del stream[0]
raw_data = block.get_prep_value(stream)
self.assertEqual(raw_data, [
{'type': 'paragraph', 'value': 'world', 'id': '0002'},
])
def test_insert_streamvalue_item(self):
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock()
paragraph = blocks.CharBlock()
block = ArticleBlock()
stream = block.to_python([
{'type': 'heading', 'value': 'hello', 'id': '0001'},
{'type': 'paragraph', 'value': 'world', 'id': '0002'},
])
stream.insert(1, ('paragraph', 'mutable', '0003'))
raw_data = block.get_prep_value(stream)
self.assertEqual(raw_data, [
{'type': 'heading', 'value': 'hello', 'id': '0001'},
{'type': 'paragraph', 'value': 'mutable', 'id': '0003'},
{'type': 'paragraph', 'value': 'world', 'id': '0002'},
])
def test_append_streamvalue_item(self):
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock()
paragraph = blocks.CharBlock()
block = ArticleBlock()
stream = block.to_python([
{'type': 'heading', 'value': 'hello', 'id': '0001'},
{'type': 'paragraph', 'value': 'world', 'id': '0002'},
])
stream.append(('paragraph', 'of warcraft', '0003'))
raw_data = block.get_prep_value(stream)
self.assertEqual(raw_data, [
{'type': 'heading', 'value': 'hello', 'id': '0001'},
{'type': 'paragraph', 'value': 'world', 'id': '0002'},
{'type': 'paragraph', 'value': 'of warcraft', 'id': '0003'},
])
def test_streamvalue_raw_data(self):
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock()
paragraph = blocks.CharBlock()
block = ArticleBlock()
stream = block.to_python([
{'type': 'heading', 'value': 'hello', 'id': '0001'},
{'type': 'paragraph', 'value': 'world', 'id': '0002'},
])
self.assertEqual(stream.raw_data[0], {'type': 'heading', 'value': 'hello', 'id': '0001'})
stream.raw_data[0]['value'] = 'bonjour'
self.assertEqual(stream.raw_data[0], {'type': 'heading', 'value': 'bonjour', 'id': '0001'})
# changes to raw_data will be written back via get_prep_value...
raw_data = block.get_prep_value(stream)
self.assertEqual(raw_data, [
{'type': 'heading', 'value': 'bonjour', 'id': '0001'},
{'type': 'paragraph', 'value': 'world', 'id': '0002'},
])
# ...but once the bound-block representation has been accessed, that takes precedence
self.assertEqual(stream[0].value, 'bonjour')
stream.raw_data[0]['value'] = 'guten tag'
self.assertEqual(stream.raw_data[0]['value'], 'guten tag')
self.assertEqual(stream[0].value, 'bonjour')
raw_data = block.get_prep_value(stream)
self.assertEqual(raw_data, [
{'type': 'heading', 'value': 'bonjour', 'id': '0001'},
{'type': 'paragraph', 'value': 'world', 'id': '0002'},
])
# Replacing a raw_data entry outright will propagate to the bound block, though
stream.raw_data[0] = {'type': 'heading', 'value': 'konnichiwa', 'id': '0003'}
raw_data = block.get_prep_value(stream)
self.assertEqual(raw_data, [
{'type': 'heading', 'value': 'konnichiwa', 'id': '0003'},
{'type': 'paragraph', 'value': 'world', 'id': '0002'},
])
self.assertEqual(stream[0].value, 'konnichiwa')
# deletions / insertions on raw_data will also propagate to the bound block representation
del stream.raw_data[1]
stream.raw_data.insert(0, {'type': 'paragraph', 'value': 'hello kitty says', 'id': '0004'})
raw_data = block.get_prep_value(stream)
self.assertEqual(raw_data, [
{'type': 'paragraph', 'value': 'hello kitty says', 'id': '0004'},
{'type': 'heading', 'value': 'konnichiwa', 'id': '0003'},
])
def test_render_with_classname_via_kwarg(self):
"""form_classname from kwargs to be used as an additional class when rendering stream block"""
block = blocks.StreamBlock([
(b'heading', blocks.CharBlock()),
(b'paragraph', blocks.CharBlock()),
], form_classname='rocket-section')
value = block.to_python([
{
'type': 'heading',
'value': "Falcon Heavy",
'id': '2',
},
{
'type': 'paragraph',
'value': "Ultra heavy launch capability",
'id': '3',
}
])
html = block.render_form(value)
# including leading space to ensure class name gets added correctly
self.assertEqual(html.count(' rocket-section'), 1)
def test_render_with_classname_via_class_meta(self):
"""form_classname from meta to be used as an additional class when rendering stream block"""
class ProfileBlock(blocks.StreamBlock):
username = blocks.CharBlock()
class Meta:
form_classname = 'profile-block-large'
block = ProfileBlock()
value = block.to_python([
{
'type': 'username',
'value': "renegadeM@ster",
'id': '789',
}
])
html = block.render_form(value, prefix='profiles')
# including leading space to ensure class name gets added correctly
self.assertEqual(html.count(' profile-block-large'), 1)
class TestStructBlockWithFixtures(TestCase):
fixtures = ['test.json']
def test_bulk_to_python(self):
page_link_block = blocks.StructBlock([
('page', blocks.PageChooserBlock(required=False)),
('link_text', blocks.CharBlock(default="missing title")),
])
with self.assertNumQueries(1):
result = page_link_block.bulk_to_python([
{'page': 2, 'link_text': 'page two'},
{'page': 3, 'link_text': 'page three'},
{'page': None, 'link_text': 'no page'},
{'page': 4},
])
result_types = [type(val) for val in result]
self.assertEqual(result_types, [blocks.StructValue] * 4)
result_titles = [val['link_text'] for val in result]
self.assertEqual(result_titles, ['page two', 'page three', 'no page', 'missing title'])
result_pages = [val['page'] for val in result]
self.assertEqual(result_pages, [
Page.objects.get(id=2), Page.objects.get(id=3), None, Page.objects.get(id=4)
])
class TestStreamBlockWithFixtures(TestCase):
fixtures = ['test.json']
def test_bulk_to_python(self):
stream_block = blocks.StreamBlock([
('page', blocks.PageChooserBlock()),
('heading', blocks.CharBlock()),
])
# The naive implementation of bulk_to_python (calling to_python on each item) would perform
# NO queries, as StreamBlock.to_python returns a lazy StreamValue that only starts calling
# to_python on its children (and thus triggering DB queries) when its items are accessed.
# This is a good thing for a standalone to_python call, because loading a model instance
# with a StreamField in it will immediately call StreamField.to_python which in turn calls
# to_python on the top-level StreamBlock, and we really don't want
# SomeModelWithAStreamField.objects.get(id=1) to immediately trigger a cascading fetch of
# all objects referenced in the StreamField.
#
# However, for bulk_to_python that's bad, as it means each stream in the list would end up
# doing its own object lookups in isolation, missing the opportunity to group them together
# into a single call to the child block's bulk_to_python. Therefore, the ideal outcome is
# that we perform one query now (covering all PageChooserBlocks across all streams),
# returning a list of non-lazy StreamValues.
with self.assertNumQueries(1):
results = stream_block.bulk_to_python([
[{'type': 'heading', 'value': 'interesting pages'}, {'type': 'page', 'value': 2}, {'type': 'page', 'value': 3}],
[{'type': 'heading', 'value': 'pages written by dogs'}, {'type': 'woof', 'value': 'woof woof'}],
[{'type': 'heading', 'value': 'boring pages'}, {'type': 'page', 'value': 4}],
])
# If bulk_to_python has indeed given us non-lazy StreamValues, then no further queries
# should be performed when iterating over its child blocks.
with self.assertNumQueries(0):
block_types = [
[block.block_type for block in stream]
for stream in results
]
self.assertEqual(block_types, [
['heading', 'page', 'page'],
['heading'],
['heading', 'page'],
])
with self.assertNumQueries(0):
block_values = [
[block.value for block in stream]
for stream in results
]
self.assertEqual(block_values, [
['interesting pages', Page.objects.get(id=2), Page.objects.get(id=3)],
['pages written by dogs'],
['boring pages', Page.objects.get(id=4)],
])
class TestPageChooserBlock(TestCase):
fixtures = ['test.json']
def test_serialize(self):
"""The value of a PageChooserBlock (a Page object) should serialize to an ID"""
block = blocks.PageChooserBlock()
christmas_page = Page.objects.get(slug='christmas')
self.assertEqual(block.get_prep_value(christmas_page), christmas_page.id)
# None should serialize to None
self.assertEqual(block.get_prep_value(None), None)
def test_deserialize(self):
"""The serialized value of a PageChooserBlock (an ID) should deserialize to a Page object"""
block = blocks.PageChooserBlock()
christmas_page = Page.objects.get(slug='christmas')
self.assertEqual(block.to_python(christmas_page.id), christmas_page)
# None should deserialize to None
self.assertEqual(block.to_python(None), None)
def test_form_render(self):
block = blocks.PageChooserBlock(help_text="pick a page, any page")
empty_form_html = block.render_form(None, 'page')
self.assertInHTML('<input id="page" name="page" placeholder="" type="hidden" />', empty_form_html)
self.assertIn('createPageChooser("page", null, {"model_names": ["wagtailcore.page"], "can_choose_root": false, "user_perms": null});', empty_form_html)
christmas_page = Page.objects.get(slug='christmas')
christmas_form_html = block.render_form(christmas_page, 'page')
expected_html = '<input id="page" name="page" placeholder="" type="hidden" value="%d" />' % christmas_page.id
self.assertInHTML(expected_html, christmas_form_html)
self.assertIn("pick a page, any page", christmas_form_html)
def test_form_render_with_target_model_default(self):
block = blocks.PageChooserBlock()
empty_form_html = block.render_form(None, 'page')
self.assertIn('createPageChooser("page", null, {"model_names": ["wagtailcore.page"], "can_choose_root": false, "user_perms": null});', empty_form_html)
def test_form_render_with_target_model_string(self):
block = blocks.PageChooserBlock(help_text="pick a page, any page", page_type='tests.SimplePage')
empty_form_html = block.render_form(None, 'page')
self.assertIn('createPageChooser("page", null, {"model_names": ["tests.simplepage"], "can_choose_root": false, "user_perms": null});', empty_form_html)
def test_form_render_with_target_model_literal(self):
block = blocks.PageChooserBlock(help_text="pick a page, any page", page_type=SimplePage)
empty_form_html = block.render_form(None, 'page')
self.assertIn('createPageChooser("page", null, {"model_names": ["tests.simplepage"], "can_choose_root": false, "user_perms": null});', empty_form_html)
def test_form_render_with_target_model_multiple_strings(self):
block = blocks.PageChooserBlock(help_text="pick a page, any page", page_type=['tests.SimplePage', 'tests.EventPage'])
empty_form_html = block.render_form(None, 'page')
self.assertIn('createPageChooser("page", null, {"model_names": ["tests.simplepage", "tests.eventpage"], "can_choose_root": false, "user_perms": null});', empty_form_html)
def test_form_render_with_target_model_multiple_literals(self):
block = blocks.PageChooserBlock(help_text="pick a page, any page", page_type=[SimplePage, EventPage])
empty_form_html = block.render_form(None, 'page')
self.assertIn('createPageChooser("page", null, {"model_names": ["tests.simplepage", "tests.eventpage"], "can_choose_root": false, "user_perms": null});', empty_form_html)
def test_form_render_with_can_choose_root(self):
block = blocks.PageChooserBlock(help_text="pick a page, any page", can_choose_root=True)
empty_form_html = block.render_form(None, 'page')
self.assertIn('createPageChooser("page", null, {"model_names": ["wagtailcore.page"], "can_choose_root": true, "user_perms": null});', empty_form_html)
def test_form_response(self):
block = blocks.PageChooserBlock()
christmas_page = Page.objects.get(slug='christmas')
value = block.value_from_datadict({'page': str(christmas_page.id)}, {}, 'page')
self.assertEqual(value, christmas_page)
empty_value = block.value_from_datadict({'page': ''}, {}, 'page')
self.assertEqual(empty_value, None)
def test_clean(self):
required_block = blocks.PageChooserBlock()
nonrequired_block = blocks.PageChooserBlock(required=False)
christmas_page = Page.objects.get(slug='christmas')
self.assertEqual(required_block.clean(christmas_page), christmas_page)
with self.assertRaises(ValidationError):
required_block.clean(None)
self.assertEqual(nonrequired_block.clean(christmas_page), christmas_page)
self.assertEqual(nonrequired_block.clean(None), None)
def test_target_model_default(self):
block = blocks.PageChooserBlock()
self.assertEqual(block.target_model, Page)
def test_target_model_string(self):
block = blocks.PageChooserBlock(page_type='tests.SimplePage')
self.assertEqual(block.target_model, SimplePage)
def test_target_model_literal(self):
block = blocks.PageChooserBlock(page_type=SimplePage)
self.assertEqual(block.target_model, SimplePage)
def test_target_model_multiple_strings(self):
block = blocks.PageChooserBlock(page_type=['tests.SimplePage', 'tests.EventPage'])
self.assertEqual(block.target_model, Page)
def test_target_model_multiple_literals(self):
block = blocks.PageChooserBlock(page_type=[SimplePage, EventPage])
self.assertEqual(block.target_model, Page)
def test_deconstruct_target_model_default(self):
block = blocks.PageChooserBlock()
self.assertEqual(block.deconstruct(), (
'wagtail.core.blocks.PageChooserBlock',
(), {}))
def test_deconstruct_target_model_string(self):
block = blocks.PageChooserBlock(page_type='tests.SimplePage')
self.assertEqual(block.deconstruct(), (
'wagtail.core.blocks.PageChooserBlock',
(), {'page_type': ['tests.SimplePage']}))
def test_deconstruct_target_model_literal(self):
block = blocks.PageChooserBlock(page_type=SimplePage)
self.assertEqual(block.deconstruct(), (
'wagtail.core.blocks.PageChooserBlock',
(), {'page_type': ['tests.SimplePage']}))
def test_deconstruct_target_model_multiple_strings(self):
block = blocks.PageChooserBlock(page_type=['tests.SimplePage', 'tests.EventPage'])
self.assertEqual(block.deconstruct(), (
'wagtail.core.blocks.PageChooserBlock',
(), {'page_type': ['tests.SimplePage', 'tests.EventPage']}))
def test_deconstruct_target_model_multiple_literals(self):
block = blocks.PageChooserBlock(page_type=[SimplePage, EventPage])
self.assertEqual(block.deconstruct(), (
'wagtail.core.blocks.PageChooserBlock',
(), {'page_type': ['tests.SimplePage', 'tests.EventPage']}))
def test_bulk_to_python(self):
page_ids = [2, 3, 4, 5]
expected_pages = Page.objects.filter(pk__in=page_ids)
block = blocks.PageChooserBlock()
with self.assertNumQueries(1):
pages = block.bulk_to_python(page_ids)
self.assertSequenceEqual(pages, expected_pages)
class TestStaticBlock(unittest.TestCase):
def test_render_form_with_constructor(self):
block = blocks.StaticBlock(
admin_text="Latest posts - This block doesn't need to be configured, it will be displayed automatically",
template='tests/blocks/posts_static_block.html')
rendered_html = block.render_form(None)
self.assertEqual(rendered_html, "Latest posts - This block doesn't need to be configured, it will be displayed automatically")
def test_render_form_with_subclass(self):
class PostsStaticBlock(blocks.StaticBlock):
class Meta:
admin_text = "Latest posts - This block doesn't need to be configured, it will be displayed automatically"
template = "tests/blocks/posts_static_block.html"
block = PostsStaticBlock()
rendered_html = block.render_form(None)
self.assertEqual(rendered_html, "Latest posts - This block doesn't need to be configured, it will be displayed automatically")
def test_render_form_with_subclass_displays_default_text_if_no_admin_text(self):
class LabelOnlyStaticBlock(blocks.StaticBlock):
class Meta:
label = "Latest posts"
block = LabelOnlyStaticBlock()
rendered_html = block.render_form(None)
self.assertEqual(rendered_html, "Latest posts: this block has no options.")
def test_render_form_with_subclass_displays_default_text_if_no_admin_text_and_no_label(self):
class NoMetaStaticBlock(blocks.StaticBlock):
pass
block = NoMetaStaticBlock()
rendered_html = block.render_form(None)
self.assertEqual(rendered_html, "This block has no options.")
def test_render_form_works_with_mark_safe(self):
block = blocks.StaticBlock(
admin_text=mark_safe("<b>Latest posts</b> - This block doesn't need to be configured, it will be displayed automatically"),
template='tests/blocks/posts_static_block.html')
rendered_html = block.render_form(None)
self.assertEqual(rendered_html, "<b>Latest posts</b> - This block doesn't need to be configured, it will be displayed automatically")
def test_get_default(self):
block = blocks.StaticBlock()
default_value = block.get_default()
self.assertEqual(default_value, None)
def test_render(self):
block = blocks.StaticBlock(template='tests/blocks/posts_static_block.html')
result = block.render(None)
self.assertEqual(result, '<p>PostsStaticBlock template</p>')
def test_serialize(self):
block = blocks.StaticBlock()
result = block.get_prep_value(None)
self.assertEqual(result, None)
def test_deserialize(self):
block = blocks.StaticBlock()
result = block.to_python(None)
self.assertEqual(result, None)
class TestDateBlock(TestCase):
def test_render_form(self):
block = blocks.DateBlock()
value = date(2015, 8, 13)
result = block.render_form(value, prefix='dateblock')
# we should see the JS initialiser code:
# <script>initDateChooser("dateblock", {"dayOfWeekStart": 0, "format": "Y-m-d"});</script>
# except that we can't predict the order of the config options
self.assertIn('<script>initDateChooser("dateblock", {', result)
self.assertIn('"dayOfWeekStart": 0', result)
self.assertIn('"format": "Y-m-d"', result)
self.assertInHTML(
'<input id="dateblock" name="dateblock" placeholder="" type="text" value="2015-08-13" autocomplete="off" />',
result
)
def test_render_form_with_format(self):
block = blocks.DateBlock(format='%d.%m.%Y')
value = date(2015, 8, 13)
result = block.render_form(value, prefix='dateblock')
self.assertIn('<script>initDateChooser("dateblock", {', result)
self.assertIn('"dayOfWeekStart": 0', result)
self.assertIn('"format": "d.m.Y"', result)
self.assertInHTML(
'<input id="dateblock" name="dateblock" placeholder="" type="text" value="13.08.2015" autocomplete="off" />',
result
)
class TestDateTimeBlock(TestCase):
def test_render_form_with_format(self):
block = blocks.DateTimeBlock(format='%d.%m.%Y %H:%M')
value = datetime(2015, 8, 13, 10, 0)
result = block.render_form(value, prefix='datetimeblock')
self.assertIn(
'"format": "d.m.Y H:i"',
result
)
self.assertInHTML(
'<input id="datetimeblock" name="datetimeblock" placeholder="" type="text" value="13.08.2015 10:00" autocomplete="off" />',
result
)
class TestSystemCheck(TestCase):
def test_name_cannot_contain_non_alphanumeric(self):
block = blocks.StreamBlock([
('heading', blocks.CharBlock()),
('rich+text', blocks.RichTextBlock()),
])
errors = block.check()
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].id, 'wagtailcore.E001')
self.assertEqual(errors[0].hint, "Block names should follow standard Python conventions for variable names: alpha-numeric and underscores, and cannot begin with a digit")
self.assertEqual(errors[0].obj, block.child_blocks['rich+text'])
def test_name_must_be_nonempty(self):
block = blocks.StreamBlock([
('heading', blocks.CharBlock()),
('', blocks.RichTextBlock()),
])
errors = block.check()
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].id, 'wagtailcore.E001')
self.assertEqual(errors[0].hint, "Block name cannot be empty")
self.assertEqual(errors[0].obj, block.child_blocks[''])
def test_name_cannot_contain_spaces(self):
block = blocks.StreamBlock([
('heading', blocks.CharBlock()),
('rich text', blocks.RichTextBlock()),
])
errors = block.check()
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].id, 'wagtailcore.E001')
self.assertEqual(errors[0].hint, "Block names cannot contain spaces")
self.assertEqual(errors[0].obj, block.child_blocks['rich text'])
def test_name_cannot_contain_dashes(self):
block = blocks.StreamBlock([
('heading', blocks.CharBlock()),
('rich-text', blocks.RichTextBlock()),
])
errors = block.check()
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].id, 'wagtailcore.E001')
self.assertEqual(errors[0].hint, "Block names cannot contain dashes")
self.assertEqual(errors[0].obj, block.child_blocks['rich-text'])
def test_name_cannot_begin_with_digit(self):
block = blocks.StreamBlock([
('heading', blocks.CharBlock()),
('99richtext', blocks.RichTextBlock()),
])
errors = block.check()
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].id, 'wagtailcore.E001')
self.assertEqual(errors[0].hint, "Block names cannot begin with a digit")
self.assertEqual(errors[0].obj, block.child_blocks['99richtext'])
def test_system_checks_recurse_into_lists(self):
failing_block = blocks.RichTextBlock()
block = blocks.StreamBlock([
('paragraph_list', blocks.ListBlock(
blocks.StructBlock([
('heading', blocks.CharBlock()),
('rich text', failing_block),
])
))
])
errors = block.check()
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].id, 'wagtailcore.E001')
self.assertEqual(errors[0].hint, "Block names cannot contain spaces")
self.assertEqual(errors[0].obj, failing_block)
def test_system_checks_recurse_into_streams(self):
failing_block = blocks.RichTextBlock()
block = blocks.StreamBlock([
('carousel', blocks.StreamBlock([
('text', blocks.StructBlock([
('heading', blocks.CharBlock()),
('rich text', failing_block),
]))
]))
])
errors = block.check()
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].id, 'wagtailcore.E001')
self.assertEqual(errors[0].hint, "Block names cannot contain spaces")
self.assertEqual(errors[0].obj, failing_block)
def test_system_checks_recurse_into_structs(self):
failing_block_1 = blocks.RichTextBlock()
failing_block_2 = blocks.RichTextBlock()
block = blocks.StreamBlock([
('two_column', blocks.StructBlock([
('left', blocks.StructBlock([
('heading', blocks.CharBlock()),
('rich text', failing_block_1),
])),
('right', blocks.StructBlock([
('heading', blocks.CharBlock()),
('rich text', failing_block_2),
]))
]))
])
errors = block.check()
self.assertEqual(len(errors), 2)
self.assertEqual(errors[0].id, 'wagtailcore.E001')
self.assertEqual(errors[0].hint, "Block names cannot contain spaces")
self.assertEqual(errors[0].obj, failing_block_1)
self.assertEqual(errors[1].id, 'wagtailcore.E001')
self.assertEqual(errors[1].hint, "Block names cannot contain spaces")
self.assertEqual(errors[1].obj, failing_block_2)
class TestTemplateRendering(TestCase):
def test_render_with_custom_context(self):
block = CustomLinkBlock()
value = block.to_python({'title': 'Torchbox', 'url': 'http://torchbox.com/'})
context = {'classname': 'important'}
result = block.render(value, context)
self.assertEqual(result, '<a href="http://torchbox.com/" class="important">Torchbox</a>')
def test_render_with_custom_form_context(self):
block = CustomLinkBlock()
value = block.to_python({'title': 'Torchbox', 'url': 'http://torchbox.com/'})
result = block.render_form(value, prefix='my-link-block')
self.assertIn('data-prefix="my-link-block"', result)
self.assertIn('<p>Hello from get_form_context!</p>', result)
class TestIncludeBlockTag(TestCase):
def test_include_block_tag_with_boundblock(self):
"""
The include_block tag should be able to render a BoundBlock's template
while keeping the parent template's context
"""
block = blocks.CharBlock(template='tests/blocks/heading_block.html')
bound_block = block.bind('bonjour')
result = render_to_string('tests/blocks/include_block_test.html', {
'test_block': bound_block,
'language': 'fr',
})
self.assertIn('<body><h1 lang="fr">bonjour</h1></body>', result)
def test_include_block_tag_with_structvalue(self):
"""
The include_block tag should be able to render a StructValue's template
while keeping the parent template's context
"""
block = SectionBlock()
struct_value = block.to_python({'title': 'Bonjour', 'body': 'monde <i>italique</i>'})
result = render_to_string('tests/blocks/include_block_test.html', {
'test_block': struct_value,
'language': 'fr',
})
self.assertIn(
"""<body><h1 lang="fr">Bonjour</h1>monde <i>italique</i></body>""",
result
)
def test_include_block_tag_with_streamvalue(self):
"""
The include_block tag should be able to render a StreamValue's template
while keeping the parent template's context
"""
block = blocks.StreamBlock([
('heading', blocks.CharBlock(template='tests/blocks/heading_block.html')),
('paragraph', blocks.CharBlock()),
], template='tests/blocks/stream_with_language.html')
stream_value = block.to_python([
{'type': 'heading', 'value': 'Bonjour'}
])
result = render_to_string('tests/blocks/include_block_test.html', {
'test_block': stream_value,
'language': 'fr',
})
self.assertIn('<div class="heading" lang="fr"><h1 lang="fr">Bonjour</h1></div>', result)
def test_include_block_tag_with_plain_value(self):
"""
The include_block tag should be able to render a value without a render_as_block method
by just rendering it as a string
"""
result = render_to_string('tests/blocks/include_block_test.html', {
'test_block': 42,
})
self.assertIn('<body>42</body>', result)
def test_include_block_tag_with_filtered_value(self):
"""
The block parameter on include_block tag should support complex values including filters,
e.g. {% include_block foo|default:123 %}
"""
block = blocks.CharBlock(template='tests/blocks/heading_block.html')
bound_block = block.bind('bonjour')
result = render_to_string('tests/blocks/include_block_test_with_filter.html', {
'test_block': bound_block,
'language': 'fr',
})
self.assertIn('<body><h1 lang="fr">bonjour</h1></body>', result)
result = render_to_string('tests/blocks/include_block_test_with_filter.html', {
'test_block': None,
'language': 'fr',
})
self.assertIn('<body>999</body>', result)
def test_include_block_tag_with_extra_context(self):
"""
Test that it's possible to pass extra context on an include_block tag using
{% include_block foo with classname="bar" %}
"""
block = blocks.CharBlock(template='tests/blocks/heading_block.html')
bound_block = block.bind('bonjour')
result = render_to_string('tests/blocks/include_block_with_test.html', {
'test_block': bound_block,
'language': 'fr',
})
self.assertIn('<body><h1 lang="fr" class="important">bonjour</h1></body>', result)
def test_include_block_tag_with_only_flag(self):
"""
A tag such as {% include_block foo with classname="bar" only %}
should not inherit the parent context
"""
block = blocks.CharBlock(template='tests/blocks/heading_block.html')
bound_block = block.bind('bonjour')
result = render_to_string('tests/blocks/include_block_only_test.html', {
'test_block': bound_block,
'language': 'fr',
})
self.assertIn('<body><h1 class="important">bonjour</h1></body>', result)
class BlockUsingGetTemplateMethod(blocks.Block):
my_new_template = "my_super_awesome_dynamic_template.html"
def get_template(self):
return self.my_new_template
class TestOverriddenGetTemplateBlockTag(TestCase):
def test_template_is_overriden_by_get_template(self):
block = BlockUsingGetTemplateMethod(template='tests/blocks/this_shouldnt_be_used.html')
template = block.get_template()
self.assertEqual(template, block.my_new_template)
| bsd-3-clause | 9,188,236,261,612,717,000 | 38.250902 | 199 | 0.58864 | false |
nanjj/softlayer-python | SoftLayer/CLI/file/detail.py | 1 | 5131 | """Display details for a specified volume."""
# :license: MIT, see LICENSE for more details.
import click
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import formatting
from SoftLayer import utils
@click.command()
@click.argument('volume_id')
@environment.pass_env
def cli(env, volume_id):
"""Display details for a specified volume."""
file_manager = SoftLayer.FileStorageManager(env.client)
file_volume = file_manager.get_file_volume_details(volume_id)
file_volume = utils.NestedDict(file_volume)
table = formatting.KeyValueTable(['Name', 'Value'])
table.align['Name'] = 'r'
table.align['Value'] = 'l'
storage_type = file_volume['storageType']['keyName'].split('_').pop(0)
table.add_row(['ID', file_volume['id']])
table.add_row(['Username', file_volume['username']])
table.add_row(['Type', storage_type])
table.add_row(['Capacity (GB)', "%iGB" % file_volume['capacityGb']])
used_space = int(file_volume['bytesUsed'])\
if file_volume['bytesUsed'] else 0
if used_space < (1 << 10):
table.add_row(['Used Space', "%dB" % used_space])
elif used_space < (1 << 20):
table.add_row(['Used Space', "%dKB" % (used_space / (1 << 10))])
elif used_space < (1 << 30):
table.add_row(['Used Space', "%dMB" % (used_space / (1 << 20))])
else:
table.add_row(['Used Space', "%dGB" % (used_space / (1 << 30))])
if file_volume.get('provisionedIops'):
table.add_row(['IOPs', int(file_volume['provisionedIops'])])
if file_volume.get('storageTierLevel'):
table.add_row([
'Endurance Tier',
file_volume['storageTierLevel'],
])
table.add_row([
'Data Center',
file_volume['serviceResource']['datacenter']['name'],
])
table.add_row([
'Target IP',
file_volume['serviceResourceBackendIpAddress'],
])
if file_volume['fileNetworkMountAddress']:
table.add_row([
'Mount Address',
file_volume['fileNetworkMountAddress'],
])
if file_volume['snapshotCapacityGb']:
table.add_row([
'Snapshot Capacity (GB)',
file_volume['snapshotCapacityGb'],
])
if 'snapshotSizeBytes' in file_volume['parentVolume']:
table.add_row([
'Snapshot Used (Bytes)',
file_volume['parentVolume']['snapshotSizeBytes'],
])
table.add_row(['# of Active Transactions', "%i"
% file_volume['activeTransactionCount']])
if file_volume['activeTransactions']:
for trans in file_volume['activeTransactions']:
if 'transactionStatus' in trans and 'friendlyName' in trans['transactionStatus']:
table.add_row(['Ongoing Transaction', trans['transactionStatus']['friendlyName']])
table.add_row(['Replicant Count', "%u" % file_volume.get('replicationPartnerCount', 0)])
if file_volume['replicationPartnerCount'] > 0:
# This if/else temporarily handles a bug in which the SL API
# returns a string or object for 'replicationStatus'; it seems that
# the type is string for File volumes and object for Block volumes
if 'message' in file_volume['replicationStatus']:
table.add_row(['Replication Status', "%s"
% file_volume['replicationStatus']['message']])
else:
table.add_row(['Replication Status', "%s"
% file_volume['replicationStatus']])
replicant_list = []
for replicant in file_volume['replicationPartners']:
replicant_table = formatting.Table(['Replicant ID',
replicant['id']])
replicant_table.add_row([
'Volume Name',
utils.lookup(replicant, 'username')])
replicant_table.add_row([
'Target IP',
utils.lookup(replicant, 'serviceResourceBackendIpAddress')])
replicant_table.add_row([
'Data Center',
utils.lookup(replicant,
'serviceResource', 'datacenter', 'name')])
replicant_table.add_row([
'Schedule',
utils.lookup(replicant,
'replicationSchedule', 'type', 'keyname')])
replicant_list.append(replicant_table)
table.add_row(['Replicant Volumes', replicant_list])
if file_volume.get('originalVolumeSize'):
original_volume_info = formatting.Table(['Property', 'Value'])
original_volume_info.add_row(['Original Volume Size', file_volume['originalVolumeSize']])
if file_volume.get('originalVolumeName'):
original_volume_info.add_row(['Original Volume Name', file_volume['originalVolumeName']])
if file_volume.get('originalSnapshotName'):
original_volume_info.add_row(['Original Snapshot Name', file_volume['originalSnapshotName']])
table.add_row(['Original Volume Properties', original_volume_info])
env.fout(table)
| mit | -6,818,909,302,590,211,000 | 39.401575 | 105 | 0.592477 | false |
wood-galaxy/FreeCAD | src/Mod/Path/PathCommands.py | 3 | 3451 | # -*- coding: utf-8 -*-
# ***************************************************************************
# * *
# * Copyright (c) 2016 sliptonic <[email protected]> *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
import FreeCAD
from PathScripts.PathUtils import loopdetect
if FreeCAD.GuiUp:
import FreeCADGui
from PySide import QtCore
from DraftTools import translate
else:
def translate(ctxt,txt):
return txt
__title__="FreeCAD Path Commands"
__author__ = "sliptonic"
__url__ = "http://www.freecadweb.org"
class _CommandSelectLoop:
"the Arch RemoveShape command definition"
def GetResources(self):
return {'Pixmap' : 'Path-SelectLoop',
'MenuText': QtCore.QT_TRANSLATE_NOOP("Path_SelectLoop","Finish Selecting Loop"),
'ToolTip': QtCore.QT_TRANSLATE_NOOP("Path_SelectLoop","Complete loop selection from two edges")}
def IsActive(self):
if bool(FreeCADGui.Selection.getSelection()) is False:
return False
try:
sel = FreeCADGui.Selection.getSelectionEx()[0]
sub1 = sel.SubElementNames[0]
if sub1[0:4] != 'Edge':
return False
sub2 = sel.SubElementNames[1]
if sub2[0:4] != 'Edge':
return False
return True
except:
return False
def Activated(self):
sel = FreeCADGui.Selection.getSelectionEx()[0]
obj = sel.Object
edge1 = sel.SubObjects[0]
edge2 = sel.SubObjects[1]
loopwire = loopdetect(obj, edge1, edge2)
if loopwire is not None:
FreeCADGui.Selection.clearSelection()
elist = obj.Shape.Edges
for e in elist:
for i in loopwire.Edges:
if e.hashCode() == i.hashCode():
FreeCADGui.Selection.addSelection(obj, "Edge"+str(elist.index(e)+1))
if FreeCAD.GuiUp:
FreeCADGui.addCommand('Path_SelectLoop',_CommandSelectLoop())
| lgpl-2.1 | -4,393,383,547,174,822,400 | 42.1375 | 112 | 0.496957 | false |
disenone/zsync | test/zsync_client.py | 1 | 2617 | # -*- coding: utf-8 -*-
import zmq
import os
import time
from threading import Thread
from zhelpers import socket_set_hwm, zpipe
from zsync_server import CHUNK_SIZE, PIPELINE, ports, ip
dst_path = 'sync_files_dst'
def client_thread(ctx, port):
dealer = ctx.socket(zmq.DEALER)
socket_set_hwm(dealer, PIPELINE)
tcp = 'tcp://%s:%d' % (ip, port)
dealer.connect(tcp)
print 'connecting %s \n' % tcp
credit = PIPELINE # Up to PIPELINE chunks in transit
total = 0 # Total bytes received
chunks = 0 # Total chunks received
offset = 0 # Offset of next chunk request
dealer.send_multipart([b'fetch'])
try:
fname = dealer.recv()
except zmq.ZMQError as e:
if e.errno == zmq.ETERM:
return # shutting down, quit
else:
raise
outf = open(os.path.join(dst_path, fname), 'w')
print 'fetching %s \n' % fname
recvd = {}
while True:
while credit:
# ask for next chunk
dealer.send_multipart([
b"fetch",
b"%i" % offset,
b"%i" % CHUNK_SIZE,
])
offset += CHUNK_SIZE
credit -= 1
try:
msg = dealer.recv_multipart()
except zmq.ZMQError as e:
if e.errno == zmq.ETERM:
return # shutting down, quit
else:
raise
offset_str, chunk = msg
chunks += 1
credit += 1
roffset = int(offset_str)
if total != roffset:
recvd[roffset] = chunk
print 'total %d save offset %d' % (total, roffset)
else:
outf.write(chunk)
last_size = len(chunk)
total += last_size
for roff in sorted(recvd.keys()):
if roff == total:
chunk = recvd.pop(roff)
outf.write(chunk)
last_size = len(chunk)
total += last_size
else:
break
if last_size < CHUNK_SIZE:
break # Last chunk received; exit
outf.close()
dealer.send_multipart([b'close', b'0', b'0'])
print ("%i chunks received, %i bytes" % (chunks, total))
return
if __name__ == '__main__':
begint = time.time()
ctx = zmq.Context()
clients = [Thread(target=client_thread, args=(ctx, port,)) for port in ports]
[client.start() for client in clients]
[client.join() for client in clients]
endt = time.time()
print 'finish: %ss' % (endt - begint) | mit | -1,719,389,939,645,223,000 | 25.444444 | 81 | 0.512419 | false |
peter765/pineapple | plugins/base/Help.py | 1 | 3564 | from util import Events
class Plugin(object):
def __init__(self, pm):
self.pm = pm
self.name = "Help"
@staticmethod
def register_events():
return [Events.Command("help", desc="Usage: help [module]|all, shows help text for a plugin, or a list of "
"plugins if no plugin is specified."),
Events.Command("hello"),
Events.Command("info")]
async def handle_command(self, message_object, command, args):
if command == "help":
if "all" in args[1]:
await self.all_help(message_object)
elif args[1] is not "":
await self.show_help(message_object, args[1].lower())
else:
await self.show_help_assigned(message_object)
if command == "info":
await self.info(message_object)
if command == "hello":
await self.hello(message_object)
async def all_help(self, message_object):
hstr = "Complete Command List\n"
for name, commands in self.pm.comlist.items():
if len(commands) > 0:
hstr += "\n**{0}**\n".format(name[:-3])
for c, d in commands:
if d is not "":
hstr += "`" + self.pm.botPreferences.commandPrefix + c + "`: \n_" + d + "_\n"
else:
hstr += "`" + self.pm.botPreferences.commandPrefix + c + "`\n"
# Split text into pieces of 1000 chars
help_strings = list(map(''.join, zip(*[iter(hstr)] * 1000)))
for string in help_strings:
await self.pm.client.send_message(message_object.author, string)
if not message_object.channel.is_private:
await self.pm.client.delete_message(message_object)
async def info(self, message_object):
await self.pm.clientWrap.send_message(self.name, message_object.channel,
'**Pineapple**\nSource code available at: https://github.com/Dynista/pineapple')
async def hello(self, message_object):
msg = 'Hello {0.author.mention}'.format(message_object)
await self.pm.clientWrap.send_message(self.name, message_object.channel, msg)
async def show_help(self, message_object, args):
try:
hstr = "**{0}**:\n".format(args)
for c, d in self.pm.comlist[args + ".py"]:
hstr = hstr + "`" + self.pm.botPreferences.commandPrefix + c + "`: " + d + "\n"
await self.pm.clientWrap.send_message(self.name, message_object.author, hstr)
except KeyError:
await self.pm.clientWrap.send_message(self.name, message_object.author,
":exclamation: That\'s not a valid plugin name")
if not message_object.channel.is_private:
await self.pm.client.delete_message(message_object)
async def show_help_assigned(self, message_object):
x = "Bot Help\n```"
for name, commands in self.pm.comlist.items():
if len(commands) > 0:
x = x + name[:-3] + " "
x += "```\n`" + self.pm.botPreferences.commandPrefix + "help [help_topic]` to evoke a help topic.\n`" + \
self.pm.botPreferences.commandPrefix + "help all` for all commands."
await self.pm.clientWrap.send_message(self.name, message_object.author, x)
if not message_object.channel.is_private:
await self.pm.client.delete_message(message_object)
| mit | 2,830,629,725,129,842,000 | 44.692308 | 126 | 0.5578 | false |
georgemarselis/homeworkdb | getDisgenetData.py | 1 | 1991 | #!/usr/bin/env python3.4
import sys
import urllib.request, urllib.error, urllib.parse
import pandas
import numpy
import csv
from clint.textui import colored
# c1 (diseaseId, name, hpoName, STY, MESH, diseaseClassName, doName, type, OMIM ),
query="""
DEFINE
c0='/data/gene_disease_summary',
c1='/data/diseases',
c2='/data/genes',
c3='/data/gene_to_associated_diseases',
c4='/data/sources'
ON
'http://www.disgenet.org/web/DisGeNET'
SELECT
c1 (diseaseId, OMIM ),
c2 (symbol, geneId, uniprotId, description, pantherName ),
c0 (score, Npmids, Nsnps ),
c3 (Ndiseases)
FROM
c0
WHERE
(
c1 = 'C0030567'
AND
c4 = 'ALL'
AND
c0.score > '0.25'
)
ORDER BY
c0.score DESC"""
binary_data = query.encode("utf-8")
req = urllib.request.Request("http://www.disgenet.org/oql")
res = urllib.request.urlopen(req, binary_data)
csvresults = res.read().decode( 'utf-8' )
print( colored.green( csvresults ) )
disgenetDataFile = 'disgenet/disgenet_data.tsv'
with open( disgenetDataFile, 'w' ) as file:
for row in csvresults:
file.write( row )
## disgenet
###########################################
disgenetDataFile = 'disgenet/disgenet_data.tsv'
disgenetFieldNames = [ 'c1.diseaseId', 'c1.OMIM', 'c2.symbol', 'c2.geneId', 'c2.uniprotId', 'c2.description', 'c2.pantherName', 'c0.score', 'c0.Npmids', 'c0.Nsnps', 'c3.Ndiseases' ]
restkey = 'unknownkey';
restval = 'uknownvalue';
dialect = 'excel-tab';
# read payload
###########################################
disgenetCsvfile = open( disgenetDataFile )
disgenetReader = csv.DictReader( disgenetCsvfile, disgenetFieldNames, restkey, restval, dialect );
array = []
kot = 0 # magic to skip the first header row
for row in disgenetReader:
if kot == 0 :
kot = 1
continue
if row['c2.symbol'] not in array:
array.append( row['c2.symbol'] )
print( "Array of genes to be writen to disk: " + colored.yellow( array ) )
listOfGenes = 'listOfGenes.tsv'
with open( listOfGenes, 'w' ) as file:
file.write( '\n'.join( array ) )
| gpl-3.0 | -370,553,118,357,286,200 | 24.525641 | 181 | 0.663988 | false |
jag1g13/pycgtool | test/test_util.py | 1 | 7866 | import unittest
import os
import logging
import numpy as np
import numpy.testing
from pycgtool.util import tuple_equivalent, extend_graph_chain, stat_moments, transpose_and_sample
from pycgtool.util import dir_up, backup_file, sliding, r_squared, dist_with_pbc
from pycgtool.util import SimpleEnum, FixedFormatUnpacker
class UtilTest(unittest.TestCase):
def test_tuple_equivalent(self):
t1 = (0, 1, 2)
t2 = (0, 1, 2)
self.assertTrue(tuple_equivalent(t1, t2))
t2 = (2, 1, 0)
self.assertTrue(tuple_equivalent(t1, t2))
t2 = (2, 1, 3)
self.assertFalse(tuple_equivalent(t1, t2))
def test_dist_with_pbc(self):
pos_a = np.array([1., 1., 1.])
pos_b = np.array([9., 9., 9.])
numpy.testing.assert_equal(np.array([8., 8., 8.]),
dist_with_pbc(pos_a, pos_b, np.array([0., 0., 0.])))
numpy.testing.assert_equal(np.array([8., 8., 8.]),
dist_with_pbc(pos_a, pos_b, np.array([20., 20., 20.])))
numpy.testing.assert_equal(np.array([-2., -2., -2.]),
dist_with_pbc(pos_a, pos_b, np.array([10., 10., 10.])))
def test_triplets_from_pairs(self):
pairs = [(0, 1), (1, 2), (2, 3)]
result = [(0, 1, 2), (1, 2, 3)]
self.assertEqual(result, sorted(extend_graph_chain(pairs, pairs)))
pairs = [(0, 1), (1, 2), (2, 3), (3, 0)]
result = [(0, 1, 2), (1, 0, 3), (1, 2, 3), (2, 3, 0)]
self.assertEqual(result, sorted(extend_graph_chain(pairs, pairs)))
def test_triplets_from_pairs_multires(self):
pairs = [("a", "b"), ("b", "c"), ("c", "d"), ("d", "+a")]
result = [("a", "b", "c"), ("b", "c", "d"), ("c", "d", "+a"), ("d", "+a", "+b")]
self.assertEqual(result, sorted(extend_graph_chain(pairs, pairs)))
def test_quadruplets_from_pairs(self):
pairs = [(0, 1), (1, 2), (2, 3)]
result = [(0, 1, 2, 3)]
triplets = extend_graph_chain(pairs, pairs)
self.assertEqual(result, sorted(extend_graph_chain(triplets, pairs)))
pairs = [(0, 1), (1, 2), (2, 3), (3, 0)]
triplets = extend_graph_chain(pairs, pairs)
result = [(0, 1, 2, 3), (1, 0, 3, 2), (1, 2, 3, 0), (2, 1, 0, 3)]
self.assertEqual(result, sorted(extend_graph_chain(triplets, pairs)))
def test_stat_moments(self):
t1 = [3, 3, 3, 3, 3]
t2 = [1, 2, 3, 4, 5]
np.testing.assert_allclose(np.array([3, 0]), stat_moments(t1))
np.testing.assert_allclose(np.array([3, 2]), stat_moments(t2))
def test_dir_up(self):
path = os.path.realpath(__file__)
self.assertEqual(path, dir_up(path, 0))
self.assertEqual(os.path.dirname(path), dir_up(path))
self.assertEqual(os.path.dirname(os.path.dirname(path)), dir_up(path, 2))
def test_backup_file(self):
try:
os.remove("testfile")
os.remove("#testfile.1#")
os.remove("#testfile.2#")
except OSError:
pass
logging.disable(logging.WARNING)
open("testfile", "a").close()
self.assertEqual("#testfile.1#", backup_file("testfile"))
open("testfile", "a").close()
self.assertTrue(os.path.exists("#testfile.1#"))
self.assertEqual("#testfile.2#", backup_file("testfile"))
open("testfile", "a").close()
self.assertTrue(os.path.exists("#testfile.2#"))
logging.disable(logging.NOTSET)
os.remove("testfile")
os.remove("#testfile.1#")
os.remove("#testfile.2#")
def test_sliding(self):
l = [0, 1, 2, 3, 4]
res = [(None, 0, 1), (0, 1, 2), (1, 2, 3), (2, 3, 4), (3, 4, None)]
for res, pair in zip(res, sliding(l)):
self.assertEqual(res, pair)
def test_r_squared(self):
ref = [i for i in range(5)]
fit = ref
self.assertEqual(1, r_squared(ref, fit))
fit = [2 for _ in range(5)]
self.assertEqual(0, r_squared(ref, fit))
fit = [i for i in range(1, 6)]
self.assertEqual(0.5, r_squared(ref, fit))
def test_transpose_and_sample_no_sample(self):
l = [(1, 2), (3, 4), (5, 6)]
l_t = [(1, 3, 5), (2, 4, 6)]
self.assertEqual(l_t, transpose_and_sample(l, None))
def test_transpose_and_sample(self):
l = [(1, 2), (3, 4), (5, 6)]
l_t = [(1, 3, 5), (2, 4, 6)]
l_t_test = transpose_and_sample(l, n=1)
self.assertEqual(1, len(l_t_test))
self.assertIn(l_t_test[0], l_t)
def test_simple_enum(self):
enum = SimpleEnum.enum("enum", ["one", "two", "three"])
self.assertTrue(enum.one == enum.one)
self.assertTrue(enum.one.compare_value(enum.one))
self.assertFalse(enum.two == enum.three)
self.assertFalse(enum.two.compare_value(enum.three))
with self.assertRaises(AttributeError):
_ = enum.four
with self.assertRaises(AttributeError):
enum.one = 2
enum2 = SimpleEnum.enum("enum2", ["one", "two", "three"])
with self.assertRaises(TypeError):
assert enum2.one == enum.one
self.assertTrue("one" in enum)
self.assertFalse("four" in enum)
def test_simple_enum_values(self):
enum = SimpleEnum.enum_from_dict("enum", {"one": 111,
"two": 111,
"three": 333})
self.assertTrue(enum.one == enum.one)
self.assertTrue(enum.one.compare_value(enum.one))
self.assertFalse(enum.one == enum.two)
self.assertTrue(enum.one.compare_value(enum.two))
self.assertFalse(enum.two == enum.three)
self.assertFalse(enum.two.compare_value(enum.three))
with self.assertRaises(AttributeError):
_ = enum.four
with self.assertRaises(AttributeError):
enum.one = 2
enum2 = SimpleEnum.enum("enum2", ["one", "two", "three"])
with self.assertRaises(TypeError):
assert enum2.one == enum.one
self.assertTrue("one" in enum)
self.assertEqual(111, enum.one.value)
self.assertFalse("four" in enum)
def test_fixed_format_unpacker_c(self):
unpacker = FixedFormatUnpacker("%-4d%5s%4.1f")
toks = unpacker.unpack("1234hello12.3")
self.assertEqual(3, len(toks))
self.assertEqual(1234, toks[0])
self.assertEqual("hello", toks[1])
self.assertAlmostEqual(12.3, toks[2])
def test_fixed_format_unpacker_fortran(self):
unpacker = FixedFormatUnpacker("I4,A5,F4.1",
FixedFormatUnpacker.FormatStyle.Fortran)
toks = unpacker.unpack("1234hello12.3")
self.assertEqual(3, len(toks))
self.assertEqual(1234, toks[0])
self.assertEqual("hello", toks[1])
self.assertAlmostEqual(12.3, toks[2])
def test_fixed_format_unpacker_fortran_space(self):
unpacker = FixedFormatUnpacker("I4,X3,A5,X2,F4.1",
FixedFormatUnpacker.FormatStyle.Fortran)
toks = unpacker.unpack("1234 x hello x12.3")
self.assertEqual(3, len(toks))
self.assertEqual(1234, toks[0])
self.assertEqual("hello", toks[1])
self.assertAlmostEqual(12.3, toks[2])
def test_fixed_format_unpacker_fortran_repeat(self):
unpacker = FixedFormatUnpacker("2I2,X3,A5,2X,F4.1",
FixedFormatUnpacker.FormatStyle.Fortran)
toks = unpacker.unpack("1234 x hello x12.3")
self.assertEqual(4, len(toks))
self.assertEqual(12, toks[0])
self.assertEqual(34, toks[1])
self.assertEqual("hello", toks[2])
self.assertAlmostEqual(12.3, toks[3])
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | 6,681,905,722,751,483,000 | 37.748768 | 98 | 0.552377 | false |
NationalSecurityAgency/ghidra | GhidraBuild/IDAPro/Python/7xx/plugins/xml_exporter.py | 1 | 2785 | ## ###
# IP: GHIDRA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
#---------------------------------------------------------------------
# xmlexp.py - IDA XML Exporter plugin
#---------------------------------------------------------------------
"""
Plugin for IDA which exports a XML PROGRAM document file from a database.
This file must be placed in the IDA plugins directory.
The file idaxml.py must be placed in the IDA python directory.
"""
import ida_auto
import ida_idaapi
import ida_kernwin
import idaxml
import idc
import sys
class XmlExporterPlugin(ida_idaapi.plugin_t):
"""
XML Exporter plugin class
"""
flags = 0
comment = "Export database as XML file"
help = "Export database as XML <PROGRAM> document"
wanted_name = "XML Exporter"
wanted_hotkey = "Ctrl-Shift-x"
def init(self):
"""
init function for XML Exporter plugin.
Returns:
Constant PLUGIN_OK if this IDA version supports the plugin,
else returns PLUGIN_SKIP if this IDA is older than the supported
baseline version.
"""
if idaxml.is_ida_version_supported():
return ida_idaapi.PLUGIN_OK
else:
return ida_idaapi.PLUGIN_SKIP
def run(self, arg):
"""
run function for XML Exporter plugin.
Args:
arg: Integer, non-zero value enables auto-run feature for
IDA batch (no gui) processing mode. Default is 0.
"""
st = idc.set_ida_state(idc.IDA_STATUS_WORK)
xml = idaxml.XmlExporter(arg)
try:
try:
xml.export_xml()
except idaxml.Cancelled:
ida_kernwin.hide_wait_box()
msg = "XML Export cancelled!"
print "\n" + msg
idc.warning(msg)
except:
ida_kernwin.hide_wait_box()
msg = "***** Exception occurred: XML Exporter failed! *****"
print "\n" + msg + "\n", sys.exc_type, sys.exc_value
idc.warning(msg)
finally:
xml.cleanup()
ida_auto.set_ida_state(st)
def term(self):
pass
def PLUGIN_ENTRY():
return XmlExporterPlugin()
| apache-2.0 | -2,619,736,467,100,112,000 | 29.271739 | 76 | 0.571275 | false |
elin-moco/metrics | metrics/etl/tools/pd_transform.py | 1 | 1526 | # -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import re
from urlparse import urlparse
fx_path_pattern = re.compile('(/firefox/)([0-9a-z\.]+)/(whatsnew|firstrun|releasenotes)')
blog_path_pattern = re.compile('(/posts/[0-9]+)(/.*)')
def actual_path(url):
#print url
path = urlparse(url).path
while path.endswith('/'):
path = path[:-1]
fxPathMatch = fx_path_pattern.search(path)
if fxPathMatch:
path = fxPathMatch.group(1) + fxPathMatch.group(3)
blogPathMatch = blog_path_pattern.search(path)
if blogPathMatch:
path = blogPathMatch.group(1)
print path
if path == '':
path = '/'
return path
def main(argv = []):
df = pd.read_hdf('mocotw.h5', 'fx_download')
actualPathSeries = df['previousPagePath'].apply(actual_path)
print actualPathSeries
df['actualPagePath'] = actualPathSeries
df.to_hdf('mocotw.h5', 'fx_download')
df_sum = df[['actualPagePath', 'pageviews']].groupby('actualPagePath').sum().sort('pageviews', ascending=False)
print df_sum
df_sum.to_hdf('mocotw.h5', 'fx_download_sum')
df_stack = df.groupby(['actualPagePath', 'date']).sum()
df_stack = df_stack.reset_index()
df_stack = df_stack[df_stack.actualPagePath.isin(df_sum[:10].index)]
df_stack = df_stack.pivot(index='date', columns='actualPagePath', values='pageviews')
df_stack = df_stack.fillna(0)
df_stack = df_stack.reset_index()
print df_stack
df_stack.to_hdf('mocotw.h5', 'fx_download_stack')
| bsd-3-clause | 526,834,806,051,038,400 | 28.346154 | 115 | 0.644168 | false |
warp1337/xdemo | xdemo/utilities/generics.py | 1 | 1214 | """
This file is part of XDEMO
Copyright(c) <Florian Lier>
https://github.com/warp1337/xdemo
This file may be licensed under the terms of the
GNU Lesser General Public License Version 3 (the ``LGPL''),
or (at your option) any later version.
Software distributed under the License is distributed
on an ``AS IS'' basis, WITHOUT WARRANTY OF ANY KIND, either
express or implied. See the LGPL for the specific language
governing rights and limitations.
You should have received a copy of the LGPL along with this
program. If not, go to http://www.gnu.org/licenses/lgpl.html
or write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
The development of this software was supported by the
Excellence Cluster EXC 277 Cognitive Interaction Technology.
The Excellence Cluster EXC 277 is a grant of the Deutsche
Forschungsgemeinschaft (DFG) in the context of the German
Excellence Initiative.
Authors: Florian Lier
<flier>@techfak.uni-bielefeld.de
"""
# STD
import os
def represents_int(_input):
try:
int(_input)
return True
except ValueError:
return False
def clear_console():
os.system('cls' if os.name == 'nt' else 'clear') | gpl-3.0 | -7,011,370,291,598,344,000 | 25.413043 | 60 | 0.746293 | false |
KonstantinShemyak/python-javatools | tests/distinfo.py | 2 | 1430 | # This library is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, see
# <http://www.gnu.org/licenses/>.
"""
unit tests for javatools/distinfo.py
author: Konstantin Shemyak <[email protected]>
license: LGPL v.3
"""
import os
from unittest import TestCase
from . import get_data_fn
from javatools.distinfo import main
class DistinfoTest(TestCase):
dist = get_data_fn(os.path.join("test_distinfo", "dist1"))
# classinfo-specific option is accepted:
def test_classinfo_options(self):
self.assertEqual(0, main(["argv0", "-p", self.dist]))
# jarinfo-specific option is accepted:
def test_jarinfo_options(self):
self.assertEqual(0, main(["argv0", "--jar-classes", self.dist]))
# distinfo-specific option is accepted:
def test_distinfo_options(self):
self.assertEqual(0, main(["argv0", "--dist-provides", self.dist]))
| lgpl-3.0 | 6,098,395,069,267,159,000 | 32.255814 | 74 | 0.724476 | false |
SystemRage/py-kms | py-kms/pykms_RpcBind.py | 1 | 7955 | #!/usr/bin/env python3
import logging
import binascii
import uuid
import pykms_RpcBase
from pykms_Dcerpc import MSRPCHeader, MSRPCBindAck
from pykms_Structure import Structure
from pykms_Format import justify, byterize, enco, deco, pretty_printer
#--------------------------------------------------------------------------------------------------------------------------------------------------------
loggersrv = logging.getLogger('logsrv')
uuidNDR32 = uuid.UUID('8a885d04-1ceb-11c9-9fe8-08002b104860')
uuidNDR64 = uuid.UUID('71710533-beba-4937-8319-b5dbef9ccc36')
uuidTime = uuid.UUID('6cb71c2c-9812-4540-0300-000000000000')
uuidEmpty = uuid.UUID('00000000-0000-0000-0000-000000000000')
class CtxItem(Structure):
structure = (
('ContextID', '<H=0'),
('TransItems', 'B=0'),
('Pad', 'B=0'),
('AbstractSyntaxUUID', '16s=""'),
('AbstractSyntaxVer', '<I=0'),
('TransferSyntaxUUID', '16s=""'),
('TransferSyntaxVer', '<I=0'),
)
def ts(self):
return uuid.UUID(bytes_le = enco(self['TransferSyntaxUUID'], 'latin-1'))
class CtxItemResult(Structure):
structure = (
('Result', '<H=0'),
('Reason', '<H=0'),
('TransferSyntaxUUID', '16s=""'),
('TransferSyntaxVer', '<I=0'),
)
def __init__(self, result, reason, tsUUID, tsVer):
Structure.__init__(self)
self['Result'] = result
self['Reason'] = reason
self['TransferSyntaxUUID'] = tsUUID.bytes_le
self['TransferSyntaxVer'] = tsVer
class MSRPCBind(Structure):
class CtxItemArray:
def __init__(self, data):
self.data = data
def __len__(self):
return len(self.data)
def __str__(self):
return self.data
def __getitem__(self, i):
return CtxItem(self.data[(len(CtxItem()) * i):])
_CTX_ITEM_LEN = len(CtxItem())
structure = (
('max_tfrag', '<H=4280'),
('max_rfrag', '<H=4280'),
('assoc_group', '<L=0'),
('ctx_num', 'B=0'),
('Reserved', 'B=0'),
('Reserved2', '<H=0'),
('_ctx_items', '_-ctx_items', 'self["ctx_num"]*self._CTX_ITEM_LEN'),
('ctx_items', ':', CtxItemArray),
)
class handler(pykms_RpcBase.rpcBase):
def parseRequest(self):
request = MSRPCHeader(self.data)
pretty_printer(num_text = 3, where = "srv")
request = byterize(request)
loggersrv.debug("RPC Bind Request Bytes: \n%s\n" % justify(deco(binascii.b2a_hex(self.data), 'utf-8')))
loggersrv.debug("RPC Bind Request: \n%s\n%s\n" % (justify(request.dump(print_to_stdout = False)),
justify(MSRPCBind(request['pduData']).dump(print_to_stdout = False))))
return request
def generateResponse(self, request):
response = MSRPCBindAck()
bind = MSRPCBind(request['pduData'])
response['ver_major'] = request['ver_major']
response['ver_minor'] = request['ver_minor']
response['type'] = self.packetType['bindAck']
response['flags'] = self.packetFlags['firstFrag'] | self.packetFlags['lastFrag'] | self.packetFlags['multiplex']
response['representation'] = request['representation']
response['frag_len'] = 36 + bind['ctx_num'] * 24
response['auth_len'] = request['auth_len']
response['call_id'] = request['call_id']
response['max_tfrag'] = bind['max_tfrag']
response['max_rfrag'] = bind['max_rfrag']
response['assoc_group'] = 0x1063bf3f
port = str(self.srv_config['port'])
response['SecondaryAddrLen'] = len(port) + 1
response['SecondaryAddr'] = port
pad = (4 - ((response["SecondaryAddrLen"] + MSRPCBindAck._SIZE) % 4)) % 4
response['Pad'] = '\0' * pad
response['ctx_num'] = bind['ctx_num']
preparedResponses = {}
preparedResponses[uuidNDR32] = CtxItemResult(0, 0, uuidNDR32, 2)
preparedResponses[uuidNDR64] = CtxItemResult(2, 2, uuidEmpty, 0)
preparedResponses[uuidTime] = CtxItemResult(3, 3, uuidEmpty, 0)
response['ctx_items'] = ''
for i in range (0, bind['ctx_num']):
ts_uuid = bind['ctx_items'][i].ts()
resp = preparedResponses[ts_uuid]
response['ctx_items'] += str(resp)
pretty_printer(num_text = 4, where = "srv")
response = byterize(response)
loggersrv.debug("RPC Bind Response: \n%s\n" % justify(response.dump(print_to_stdout = False)))
loggersrv.debug("RPC Bind Response Bytes: \n%s\n" % justify(deco(binascii.b2a_hex(enco(str(response), 'latin-1')), 'utf-8')))
return response
def generateRequest(self):
firstCtxItem = CtxItem()
firstCtxItem['ContextID'] = 0
firstCtxItem['TransItems'] = 1
firstCtxItem['Pad'] = 0
firstCtxItem['AbstractSyntaxUUID'] = uuid.UUID('51c82175-844e-4750-b0d8-ec255555bc06').bytes_le
firstCtxItem['AbstractSyntaxVer'] = 1
firstCtxItem['TransferSyntaxUUID'] = uuidNDR32.bytes_le
firstCtxItem['TransferSyntaxVer'] = 2
secondCtxItem = CtxItem()
secondCtxItem['ContextID'] = 1
secondCtxItem['TransItems'] = 1
secondCtxItem['Pad'] = 0
secondCtxItem['AbstractSyntaxUUID'] = uuid.UUID('51c82175-844e-4750-b0d8-ec255555bc06').bytes_le
secondCtxItem['AbstractSyntaxVer'] = 1
secondCtxItem['TransferSyntaxUUID'] = uuidTime.bytes_le
secondCtxItem['TransferSyntaxVer'] = 1
bind = MSRPCBind()
bind['max_tfrag'] = 5840
bind['max_rfrag'] = 5840
bind['assoc_group'] = 0
bind['ctx_num'] = 2
bind['ctx_items'] = str(bind.CtxItemArray(str(firstCtxItem) + str(secondCtxItem)))
request = MSRPCHeader()
request['ver_major'] = 5
request['ver_minor'] = 0
request['type'] = self.packetType['bindReq']
request['flags'] = self.packetFlags['firstFrag'] | self.packetFlags['lastFrag'] | self.packetFlags['multiplex']
request['call_id'] = self.srv_config['call_id']
request['pduData'] = str(bind)
pretty_printer(num_text = 0, where = "clt")
bind = byterize(bind)
request = byterize(request)
loggersrv.debug("RPC Bind Request: \n%s\n%s\n" % (justify(request.dump(print_to_stdout = False)),
justify(MSRPCBind(request['pduData']).dump(print_to_stdout = False))))
loggersrv.debug("RPC Bind Request Bytes: \n%s\n" % justify(deco(binascii.b2a_hex(enco(str(request), 'latin-1')), 'utf-8')))
return request
def parseResponse(self):
return response
| unlicense | 2,967,289,741,253,012,000 | 44.457143 | 153 | 0.488121 | false |
vadim-ivlev/STUDY | coding/drawtree.py | 1 | 1325 | # VISUALIZATION ----------------------
import networkx as nx
from networkx.drawing.nx_agraph import write_dot, graphviz_layout
import matplotlib.pyplot as plt
def draw_graph(G):
plt.rcParams["figure.figsize"] = [10., 5.]
pos = graphviz_layout(G, prog='dot')
node_labels = nx.get_node_attributes(G, 'name')
nx.draw(G, pos, with_labels=True, labels=node_labels, width=2,
node_size=1000, node_color="orange", alpha=1.0)
lbls = nx.get_edge_attributes(G, 'label')
nx.draw_networkx_edge_labels(G, pos, edge_labels=lbls)
# nx.draw_networkx_nodes(G,pos,node_size=2000, nodelist=['x'])
# nx.draw_networkx_edges(G, pos, alpha=0.9, width=6, edge_color="orange", edgelist=[(1, 'Petya')])
# plt.figure(1)
plt.show()
import uuid
# import random
def build_graph(g, parent_g_node, t, edge_label=None):
# global count
if not t:
return
node = next(uid) # str(uuid.uuid4()) #random.random()
g.add_node(node, name=t.get_value())
if parent_g_node:
g.add_edge(parent_g_node, node, label=edge_label)
left = t.get_left()
right = t.get_right()
if left:
build_graph(g, node, left, 'L')
if right:
build_graph(g, node, right, 'R')
return node
G = nx.DiGraph()
root = build_graph(G, None, t)
draw_graph(G)
| mit | 235,742,579,492,465,060 | 23.537037 | 102 | 0.613585 | false |
danielhjames/Booktype | lib/booktype/convert/epub/converter.py | 1 | 18751 | # This file is part of Booktype.
# Copyright (c) 2013 Borko Jandras <[email protected]>
#
# Booktype is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Booktype is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Booktype. If not, see <http://www.gnu.org/licenses/>.
import os
import uuid
import json
import logging
import urlparse
import ebooklib
import datetime
from copy import deepcopy
from lxml import etree
from django.template.base import Template
from django.template.context import Context
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
from django.core.exceptions import ImproperlyConfigured
from booktype.apps.themes.utils import (
read_theme_style, read_theme_assets, read_theme_asset_content)
from booktype.apps.convert.templatetags.convert_tags import (
get_refines, get_metadata)
from booktype.apps.convert import plugin
from booktype.convert.image_editor_conversion import ImageEditorConversion
from .writer import Epub3Writer, Epub2Writer
from .writerplugins import WriterPlugin, ImageEditorWriterPlugin, CleanupTagsWriterPlugin
from .cover import add_cover, COVER_FILE_NAME
from .constants import (
IMAGES_DIR, STYLES_DIR, FONTS_DIR,
DOCUMENTS_DIR, DEFAULT_LANG, EPUB_DOCUMENT_WIDTH
)
from ..base import BaseConverter
from ..utils.epub import parse_toc_nav
logger = logging.getLogger("booktype.convert.epub")
class Epub3Converter(BaseConverter):
name = 'epub3'
verbose_name = _('EPUB3')
support_section_settings = True
images_color_model = "RGB"
toc_title = 'toc'
default_style = 'style1'
default_lang = DEFAULT_LANG
writer_plugin_class = WriterPlugin
css_dir = os.path.join(os.path.dirname(__file__), 'styles/')
_theme_suffix = 'epub'
_images_dir = 'images/'
# valid extensions to assign right mimetype
WOFF_FONTS = ['.woff']
OPENTYPE_FONTS = ['.otf', '.otc', '.ttf', '.ttc']
def __init__(self, *args, **kwargs):
super(Epub3Converter, self).__init__(*args, **kwargs)
self.images_path = os.path.join(self.sandbox_path, self._images_dir)
self.theme_name = ''
self.theme_plugin = None
self._bk_image_editor_conversion = None
def _get_theme_plugin(self):
return plugin.load_theme_plugin(self._theme_suffix, self.theme_name)
def _init_theme_plugin(self):
if 'theme' in self.config:
self.theme_name = self.config['theme'].get('id', '')
tp = self._get_theme_plugin()
if tp:
self.theme_plugin = tp(self)
else:
self.theme_name = None
def pre_convert(self, original_book, book):
super(Epub3Converter, self).pre_convert(original_book)
if self.theme_plugin:
try:
self.theme_plugin.pre_convert(original_book, book)
except NotImplementedError:
pass
# TODO move it to more proper place in the future, and create plugin for it
self._bk_image_editor_conversion = ImageEditorConversion(
original_book, EPUB_DOCUMENT_WIDTH, self
)
def post_convert(self, original_book, book, output_path):
if self.theme_plugin:
try:
self.theme_plugin.post_convert(original_book, book, output_path)
except NotImplementedError:
pass
def convert(self, original_book, output_path):
convert_start = datetime.datetime.now()
logger.debug('[EPUB] {}.convert'.format(self.__class__.__name__))
self._init_theme_plugin()
epub_book = ebooklib.epub.EpubBook()
epub_book.FOLDER_NAME = 'OEBPS'
self.pre_convert(original_book, epub_book)
epub_book.uid = original_book.uid
epub_book.title = original_book.title
# we should define better uri for this
epub_book.add_prefix('bkterms', 'http://booktype.org/')
epub_book.metadata = deepcopy(original_book.metadata)
epub_book.toc = []
self.direction = self._get_dir(epub_book)
logger.debug('[EPUB] Edit metadata')
self._edit_metadata(epub_book)
logger.debug('[EPUB] Copy items')
self._copy_items(epub_book, original_book)
logger.debug('[EPUB] Make navigation')
self._make_nav(epub_book, original_book)
logger.debug('[EPUB] Add cover')
self._add_cover(epub_book)
if self.theme_name:
self._add_theme_assets(epub_book)
self.post_convert(original_book, epub_book, output_path)
logger.debug('[EPUB] Setting writer plugins and options')
writer_options = {'plugins': self._get_plugins(epub_book, original_book)}
logger.debug('[EPUB] Writer')
writer_class = self._get_writer_class()
epub_writer = writer_class(output_path, epub_book, options=writer_options)
logger.debug('[EPUB] Process')
epub_writer.process()
logger.debug('[EPUB] Write')
epub_writer.write()
logger.debug('[END] {}.convert'.format(self.__class__.__name__))
convert_end = datetime.datetime.now()
logger.info('Conversion lasted %s.', convert_end - convert_start)
return {"size": os.path.getsize(output_path)}
def _get_dir(self, epub_book):
m = epub_book.metadata[ebooklib.epub.NAMESPACES["OPF"]]
def _check(x):
return x[1] and x[1].get('property', '') == 'bkterms:dir'
values = filter(_check, m[None])
if len(values) > 0 and len(values[0]) > 0:
return values[0][0].lower()
return 'ltr'
def _get_writer_plugin_class(self):
"""Returns the writer plugin class to used by writer"""
if self.writer_plugin_class:
return self.writer_plugin_class
raise ImproperlyConfigured
def _get_writer_plugin(self, epub_book, original_book):
"""Returns the writer plugin instance with some default options already set up"""
writer_plugin = self._get_writer_plugin_class()()
opts = {
'css': self._add_css_styles(epub_book),
'style': self.config.get('style', self.default_style),
'lang': self._get_language(original_book),
'preview': self.config.get('preview', True)
}
writer_plugin.options.update(opts)
return writer_plugin
def _get_plugins(self, epub_book, original_book):
"""Returns the plugins to be used by writer instance"""
writer_plugin = self._get_writer_plugin(epub_book, original_book)
image_editor_writer_plugin = ImageEditorWriterPlugin(converter=self)
cleanup_tags_writerplugin = CleanupTagsWriterPlugin()
return [writer_plugin, image_editor_writer_plugin, cleanup_tags_writerplugin]
def _get_writer_class(self):
"""Simply returns the default writer class to be used by the converter"""
return Epub3Writer
def _get_language(self, original_book):
"""
Returns the book language, if there is no language in metadata (from settings)
then we use the default language set to the class
"""
metadata = self._get_data(original_book)
default = metadata.get('language', self.default_lang)
return self.config.get('lang', default)
def _edit_metadata(self, epub_book):
"""Modifies original metadata."""
# delete existing 'modified' tag
m = epub_book.metadata[ebooklib.epub.NAMESPACES["OPF"]]
m[None] = filter(lambda (_, x): not (isinstance(x, dict) and x.get("property") == "dcterms:modified"), m[None]) # noqa
# we also need to remove the `additional metadata` which here is just garbage
m[None] = filter(lambda (_, x): not (isinstance(x, dict) and x.get("property").startswith("add_meta_terms:")), m[None]) # noqa
# NOTE: probably going to extend this function in future
def _make_nav(self, epub_book, original_book):
"""Creates navigational stuff (guide, ncx, nav) by copying the original."""
# maps TOC items to sections and links
self._num_of_text = 0
def mapper(toc_item):
add_to_guide = True
if isinstance(toc_item[1], list):
section_title, chapters = toc_item
section = ebooklib.epub.Section(section_title)
links = map(mapper, chapters)
return (section, links)
else:
chapter_title, chapter_href = toc_item
chapter_href = "{}/{}".format(DOCUMENTS_DIR, chapter_href)
chapter_path = urlparse.urlparse(chapter_href).path
book_item = self.items_by_path[chapter_path]
book_item.title = chapter_title
if self._num_of_text > 0:
add_to_guide = False
self._num_of_text += 1
if add_to_guide:
epub_book.guide.append({
'type': 'text',
'href': chapter_href,
'title': chapter_title,
})
return ebooklib.epub.Link(
href=chapter_href, title=chapter_title, uid=book_item.id)
# filters-out empty sections
def _empty_sec(item):
if isinstance(item, tuple) and len(item[1]) == 0:
return False
else:
return True
# filters-out existing cover
def _skip_cover(item):
if type(item[1]) in (str, unicode):
if os.path.basename(item[1]) == COVER_FILE_NAME:
return False
return True
toc = filter(_skip_cover, parse_toc_nav(original_book))
toc = map(mapper, toc)
# we don't allow empty sections just because epubcheck will
# raise an error at the moment of evaluating the toc.ncx file
toc = filter(_empty_sec, toc)
epub_book.toc = toc
def _copy_items(self, epub_book, original_book):
"""Populates the book by copying items from the original book"""
self.items_by_path = {}
for orig_item in original_book.items:
item = deepcopy(orig_item)
item_type = item.get_type()
file_name = os.path.basename(item.file_name)
# do not copy cover
if self._is_cover_item(item):
continue
if item_type == ebooklib.ITEM_IMAGE:
item.file_name = '{}/{}'.format(IMAGES_DIR, file_name)
elif item_type == ebooklib.ITEM_STYLE:
item.file_name = '{}/{}'.format(STYLES_DIR, file_name)
elif item_type == ebooklib.ITEM_DOCUMENT:
item.file_name = '{}/{}'.format(DOCUMENTS_DIR, file_name)
if isinstance(item, ebooklib.epub.EpubNav):
epub_book.spine.insert(0, item)
epub_book.guide.insert(0, {
'type': 'toc',
'href': file_name,
'title': self.config.get('toc_title', self.toc_title)
})
item.file_name = file_name
else:
epub_book.spine.append(item)
if self.theme_plugin:
try:
content = ebooklib.utils.parse_html_string(item.content)
cnt = self.theme_plugin.fix_content(content)
item.content = etree.tostring(cnt, method='html', encoding='utf-8', pretty_print=True)
except NotImplementedError:
pass
# todo move it to more proper place in the future, and create plugin for it
if self._bk_image_editor_conversion:
try:
content = ebooklib.utils.parse_html_string(item.content)
cnt = self._bk_image_editor_conversion.convert(content)
item.content = etree.tostring(cnt, method='html', encoding='utf-8', pretty_print=True)
except:
logger.exception("epub ImageEditorConversion failed")
if isinstance(item, ebooklib.epub.EpubNcx):
item = ebooklib.epub.EpubNcx()
epub_book.add_item(item)
self.items_by_path[item.file_name] = item
def _add_cover(self, epub_book):
"""Adds cover image if present in config to the resulting EPUB"""
if 'cover_image' in self.config.keys():
cover_asset = self.get_asset(self.config['cover_image'])
add_cover(
epub_book, cover_asset, self.config.get('lang', DEFAULT_LANG))
def _get_theme_style(self):
return read_theme_style(self.theme_name, self._theme_suffix)
def _get_default_style(self):
return render_to_string('themes/style_{}.css'.format(self._theme_suffix), {'dir': self.direction})
def _add_css_styles(self, epub_book):
"""Adds default css styles and custom css text if exists in config"""
book_css = []
try:
epub_book.add_item(
ebooklib.epub.EpubItem(
uid='default.css',
content=self._get_default_style(),
file_name='{}/{}'.format(STYLES_DIR, 'default.css'),
media_type='text/css'
)
)
book_css.append('default.css')
except Exception as e:
logger.info('Default style was not added %s.', e)
if self.theme_name:
content = self._get_theme_style()
if self.theme_name == 'custom':
try:
data = json.loads(self.config['theme']['custom'].encode('utf8'))
tmpl = Template(content)
ctx = Context(data)
content = tmpl.render(ctx)
except:
logger.exception("Fails with custom theme.")
item = ebooklib.epub.EpubItem(
uid='theme.css',
content=content,
file_name='{}/{}'.format(STYLES_DIR, 'theme.css'),
media_type='text/css'
)
epub_book.add_item(item)
book_css.append('theme.css')
# we need to add css from publishing settings screen
settings_style = self.config.get('settings', {}).get('styling', None)
if settings_style:
item = ebooklib.epub.EpubItem(
uid='custom_style.css',
content=settings_style,
file_name='{}/{}'.format(STYLES_DIR, 'custom_style.css'),
media_type='text/css'
)
epub_book.add_item(item)
book_css.append('custom_style.css')
return book_css
def _get_theme_assets(self):
return read_theme_assets(self.theme_name, self._theme_suffix)
def _add_theme_assets(self, epub_book):
assets = self._get_theme_assets()
for asset_type, asset_list in assets.iteritems():
if asset_type == 'images':
for image_name in asset_list:
name = os.path.basename(image_name)
content = read_theme_asset_content(self.theme_name, image_name)
if content:
image = ebooklib.epub.EpubImage()
image.file_name = "{}/{}".format(IMAGES_DIR, name)
image.id = 'theme_image_%s' % uuid.uuid4().hex[:5]
image.set_content(content)
epub_book.add_item(image)
elif asset_type == 'fonts':
for font_name in asset_list:
name = os.path.basename(font_name)
extension = os.path.splitext(font_name)[-1].lower()
content = read_theme_asset_content(self.theme_name, font_name)
if content:
font = ebooklib.epub.EpubItem()
font.file_name = "{}/{}".format(FONTS_DIR, name)
font.set_content(content)
# try to set the right font media type
# http://www.idpf.org/epub/301/spec/epub-publications.html#sec-core-media-types
if extension in self.OPENTYPE_FONTS:
font.media_type = 'application/vnd.ms-opentype'
elif extension in self.WOFF_FONTS:
font.media_type = 'application/font-woff'
epub_book.add_item(font)
def _get_data(self, book):
"""Returns default data for the front and end matter templates.
It mainly has default metadata from the book.
:Returns:
- Dictionary with default data for the templates
"""
return {
"title": get_refines(book.metadata, 'title-type', 'main'),
"subtitle": get_refines(book.metadata, 'title-type', 'subtitle'),
"shorttitle": get_refines(book.metadata, 'title-type', 'short'),
"author": get_refines(book.metadata, 'role', 'aut'),
"publisher": get_metadata(book.metadata, 'publisher'),
"isbn": get_metadata(book.metadata, 'identifier'),
"language": get_metadata(book.metadata, 'language'),
"metadata": book.metadata
}
def _is_cover_item(self, item):
"""Determines if an given item is cover type"""
file_name = os.path.basename(item.file_name)
cover_types = [
ebooklib.epub.EpubCover,
ebooklib.epub.EpubCoverHtml
]
return (type(item) in cover_types or file_name == 'cover.xhtml')
class Epub2Converter(Epub3Converter):
name = 'epub2'
verbose_name = _('EPUB2')
support_section_settings = True
images_color_model = "RGB"
writer_plugin_class = WriterPlugin
def __init__(self, *args, **kwargs):
super(Epub2Converter, self).__init__(*args, **kwargs)
def _get_writer_class(self):
return Epub2Writer
| agpl-3.0 | 5,059,919,701,820,894,000 | 34.921456 | 135 | 0.575169 | false |
ojarva/home-info-display | display_control_consumer/run.py | 1 | 1488 | from setproctitle import setproctitle
import json
import os
import redis
import subprocess
import time
class DisplayControlConsumer(object):
def __init__(self, redis_host, redis_port):
self.redis_instance = redis.StrictRedis(host=redis_host, port=redis_port)
self.env = {"DISPLAY": ":0"}
def get_brightness(self):
p = subprocess.Popen(["xrandr", "--verbose"],
env=self.env, stdout=subprocess.PIPE)
(stdout, _) = p.communicate()
for line in stdout.split("\n"):
if "Brightness" in line:
return float(line.strip().split(": ")[1])
def set_brightness(self, brightness):
p = subprocess.Popen(["xrandr", "--q1", "--output", "HDMI-0", "--brightness", brightness], env=self.env)
p.wait()
self.redis_instance.publish("home:broadcast:generic", json.dumps({"key": "display_brightness", "content": brightness}))
def run(self):
pubsub = self.redis_instance.pubsub(ignore_subscribe_messages=True)
pubsub.subscribe("display-control-set-brightness")
for _ in pubsub.listen():
# Only poll redis after triggered with pubsub
self.set_brightness(item["data"])
def main():
setproctitle("display-control-consumer: run")
redis_host = os.environ["REDIS_HOST"]
redis_port = os.environ["REDIS_PORT"]
dcc = DisplayControlConsumer(redis_host, redis_port)
dcc.run()
if __name__ == '__main__':
main()
| bsd-3-clause | 4,833,940,609,596,993,000 | 32.818182 | 127 | 0.62164 | false |
3dfxsoftware/cbss-addons | account_aged_partner_balance_vw/wizard/wizard_open_move_line.py | 1 | 4342 | # -*- encoding: utf-8 -*-
###########################################################################
# Module Writen to OpenERP, Open Source Management Solution
#
# Copyright (c) 2011 Vauxoo - http://www.vauxoo.com/
# All Rights Reserved.
# info Vauxoo ([email protected])
############################################################################
# Coded by: moylop260 ([email protected])
############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import pooler
import wizard
class wizard_open_move_line(wizard.interface):
def _open_window(self, cr, uid, data, context={}):
if not context:
context = {}
mod_obj = pooler.get_pool(cr.dbname).get('ir.model.data')
act_obj = pooler.get_pool(cr.dbname).get('ir.actions.act_window')
aged_partner_balance_vw_obj = pooler.get_pool(
cr.dbname).get('account.aged.partner.balance.vw')
partner_ids = [aged_partner_balance_vw.partner_id and aged_partner_balance_vw.partner_id.id or False for aged_partner_balance_vw in aged_partner_balance_vw_obj.browse(
cr, uid, data['ids'], context=context)]
# result = mod_obj._get_id(cr, uid, 'account',
# 'action_account_moves_all_a')
result = mod_obj._get_id(cr, uid, 'account', 'action_move_line_select')
id = mod_obj.read(cr, uid, [result], ['res_id'])[0]['res_id']
result = act_obj.read(cr, uid, [id])[0]
# result['context'] = {'partner_id': partner_ids}
# result['domain'] = [('partner_id','in',partner_ids),
# ('account_id.type','=','receivable')]
where_query = []
days_due_start = context.get('days_due_start', False)
if not days_due_start is False:
where_query.append('days_due >= %d' % (days_due_start))
days_due_end = context.get('days_due_end', False)
if not days_due_end is False:
where_query.append('days_due <= %d' % (days_due_end))
# where_query_str = (where_query and ' WHERE ' or '') + ' AND '.join(
# where_query )
where_query_str = (
where_query and ' AND ' or '') + ' AND '.join(where_query)
query = """SELECT l.id as id--, l.partner_id, l.company_id
FROM account_move_line l
INNER JOIN
(
SELECT id, EXTRACT(DAY FROM (now() - COALESCE(lt.date_maturity,lt.date))) AS days_due
FROM account_move_line lt
) l2
ON l2.id = l.id
INNER JOIN account_account
ON account_account.id = l.account_id
INNER JOIN res_company
ON account_account.company_id = res_company.id
INNER JOIN account_move
ON account_move.id = l.move_id
WHERE account_account.active
AND (account_account.type IN ('receivable'))
AND (l.reconcile_id IS NULL)
AND account_move.state = 'posted'
AND l.reconcile_id is null --and l.currency_id is null
"""+where_query_str
cr.execute(query)
res = cr.fetchall()
move_ids = [r[0] for r in res]
result['domain'] = [('partner_id', 'in', partner_ids), (
'id', 'in', move_ids)]
return result
states = {
'init': {
'actions': [],
'result': {'type': 'action', 'action': _open_window, 'state': 'end'}
}
}
wizard_open_move_line('wizard.open.move.line')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| gpl-2.0 | 8,739,498,346,271,963,000 | 45.191489 | 175 | 0.546292 | false |
mihaip/intersquares | app/third_party/pytz/tests/test_tzinfo.py | 1 | 25046 | # -*- coding: ascii -*-
import sys, os, os.path
import unittest, doctest
try:
import cPickle as pickle
except ImportError:
import pickle
from datetime import datetime, time, timedelta, tzinfo
import warnings
if __name__ == '__main__':
# Only munge path if invoked as a script. Testrunners should have setup
# the paths already
sys.path.insert(0, os.path.abspath(os.path.join(os.pardir, os.pardir)))
import pytz
from pytz import reference
from pytz.tzfile import _byte_string
from pytz.tzinfo import StaticTzInfo
# I test for expected version to ensure the correct version of pytz is
# actually being tested.
EXPECTED_VERSION='2011h'
fmt = '%Y-%m-%d %H:%M:%S %Z%z'
NOTIME = timedelta(0)
# GMT is a tzinfo.StaticTzInfo--the class we primarily want to test--while
# UTC is reference implementation. They both have the same timezone meaning.
UTC = pytz.timezone('UTC')
GMT = pytz.timezone('GMT')
assert isinstance(GMT, StaticTzInfo), 'GMT is no longer a StaticTzInfo'
def prettydt(dt):
"""datetime as a string using a known format.
We don't use strftime as it doesn't handle years earlier than 1900
per http://bugs.python.org/issue1777412
"""
if dt.utcoffset() >= timedelta(0):
offset = '+%s' % (dt.utcoffset(),)
else:
offset = '-%s' % (-1 * dt.utcoffset(),)
return '%04d-%02d-%02d %02d:%02d:%02d %s %s' % (
dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.tzname(), offset)
try:
unicode
except NameError:
# Python 3.x doesn't have unicode(), making writing code
# for Python 2.3 and Python 3.x a pain.
unicode = str
class BasicTest(unittest.TestCase):
def testVersion(self):
# Ensuring the correct version of pytz has been loaded
self.assertEqual(EXPECTED_VERSION, pytz.__version__,
'Incorrect pytz version loaded. Import path is stuffed '
'or this test needs updating. (Wanted %s, got %s)'
% (EXPECTED_VERSION, pytz.__version__)
)
def testGMT(self):
now = datetime.now(tz=GMT)
self.assertTrue(now.utcoffset() == NOTIME)
self.assertTrue(now.dst() == NOTIME)
self.assertTrue(now.timetuple() == now.utctimetuple())
self.assertTrue(now==now.replace(tzinfo=UTC))
def testReferenceUTC(self):
now = datetime.now(tz=UTC)
self.assertTrue(now.utcoffset() == NOTIME)
self.assertTrue(now.dst() == NOTIME)
self.assertTrue(now.timetuple() == now.utctimetuple())
def testUnknownOffsets(self):
# This tzinfo behavior is required to make
# datetime.time.{utcoffset, dst, tzname} work as documented.
dst_tz = pytz.timezone('US/Eastern')
# This information is not known when we don't have a date,
# so return None per API.
self.assertTrue(dst_tz.utcoffset(None) is None)
self.assertTrue(dst_tz.dst(None) is None)
# We don't know the abbreviation, but this is still a valid
# tzname per the Python documentation.
self.assertEqual(dst_tz.tzname(None), 'US/Eastern')
def clearCache(self):
pytz._tzinfo_cache.clear()
def testUnicodeTimezone(self):
# We need to ensure that cold lookups work for both Unicode
# and traditional strings, and that the desired singleton is
# returned.
self.clearCache()
eastern = pytz.timezone(unicode('US/Eastern'))
self.assertTrue(eastern is pytz.timezone('US/Eastern'))
self.clearCache()
eastern = pytz.timezone('US/Eastern')
self.assertTrue(eastern is pytz.timezone(unicode('US/Eastern')))
class PicklingTest(unittest.TestCase):
def _roundtrip_tzinfo(self, tz):
p = pickle.dumps(tz)
unpickled_tz = pickle.loads(p)
self.assertTrue(tz is unpickled_tz, '%s did not roundtrip' % tz.zone)
def _roundtrip_datetime(self, dt):
# Ensure that the tzinfo attached to a datetime instance
# is identical to the one returned. This is important for
# DST timezones, as some state is stored in the tzinfo.
tz = dt.tzinfo
p = pickle.dumps(dt)
unpickled_dt = pickle.loads(p)
unpickled_tz = unpickled_dt.tzinfo
self.assertTrue(tz is unpickled_tz, '%s did not roundtrip' % tz.zone)
def testDst(self):
tz = pytz.timezone('Europe/Amsterdam')
dt = datetime(2004, 2, 1, 0, 0, 0)
for localized_tz in tz._tzinfos.values():
self._roundtrip_tzinfo(localized_tz)
self._roundtrip_datetime(dt.replace(tzinfo=localized_tz))
def testRoundtrip(self):
dt = datetime(2004, 2, 1, 0, 0, 0)
for zone in pytz.all_timezones:
tz = pytz.timezone(zone)
self._roundtrip_tzinfo(tz)
def testDatabaseFixes(self):
# Hack the pickle to make it refer to a timezone abbreviation
# that does not match anything. The unpickler should be able
# to repair this case
tz = pytz.timezone('Australia/Melbourne')
p = pickle.dumps(tz)
tzname = tz._tzname
hacked_p = p.replace(_byte_string(tzname), _byte_string('???'))
self.assertNotEqual(p, hacked_p)
unpickled_tz = pickle.loads(hacked_p)
self.assertTrue(tz is unpickled_tz)
# Simulate a database correction. In this case, the incorrect
# data will continue to be used.
p = pickle.dumps(tz)
new_utcoffset = tz._utcoffset.seconds + 42
# Python 3 introduced a new pickle protocol where numbers are stored in
# hexadecimal representation. Here we extract the pickle
# representation of the number for the current Python version.
old_pickle_pattern = pickle.dumps(tz._utcoffset.seconds)[3:-1]
new_pickle_pattern = pickle.dumps(new_utcoffset)[3:-1]
hacked_p = p.replace(old_pickle_pattern, new_pickle_pattern)
self.assertNotEqual(p, hacked_p)
unpickled_tz = pickle.loads(hacked_p)
self.assertEqual(unpickled_tz._utcoffset.seconds, new_utcoffset)
self.assertTrue(tz is not unpickled_tz)
def testOldPickles(self):
# Ensure that applications serializing pytz instances as pickles
# have no troubles upgrading to a new pytz release. These pickles
# where created with pytz2006j
east1 = pickle.loads(_byte_string(
"cpytz\n_p\np1\n(S'US/Eastern'\np2\nI-18000\n"
"I0\nS'EST'\np3\ntRp4\n."
))
east2 = pytz.timezone('US/Eastern')
self.assertTrue(east1 is east2)
# Confirm changes in name munging between 2006j and 2007c cause
# no problems.
pap1 = pickle.loads(_byte_string(
"cpytz\n_p\np1\n(S'America/Port_minus_au_minus_Prince'"
"\np2\nI-17340\nI0\nS'PPMT'\np3\ntRp4\n."))
pap2 = pytz.timezone('America/Port-au-Prince')
self.assertTrue(pap1 is pap2)
gmt1 = pickle.loads(_byte_string(
"cpytz\n_p\np1\n(S'Etc/GMT_plus_10'\np2\ntRp3\n."))
gmt2 = pytz.timezone('Etc/GMT+10')
self.assertTrue(gmt1 is gmt2)
class USEasternDSTStartTestCase(unittest.TestCase):
tzinfo = pytz.timezone('US/Eastern')
# 24 hours before DST changeover
transition_time = datetime(2002, 4, 7, 7, 0, 0, tzinfo=UTC)
# Increase for 'flexible' DST transitions due to 1 minute granularity
# of Python's datetime library
instant = timedelta(seconds=1)
# before transition
before = {
'tzname': 'EST',
'utcoffset': timedelta(hours = -5),
'dst': timedelta(hours = 0),
}
# after transition
after = {
'tzname': 'EDT',
'utcoffset': timedelta(hours = -4),
'dst': timedelta(hours = 1),
}
def _test_tzname(self, utc_dt, wanted):
tzname = wanted['tzname']
dt = utc_dt.astimezone(self.tzinfo)
self.assertEqual(dt.tzname(), tzname,
'Expected %s as tzname for %s. Got %s' % (
tzname, str(utc_dt), dt.tzname()
)
)
def _test_utcoffset(self, utc_dt, wanted):
utcoffset = wanted['utcoffset']
dt = utc_dt.astimezone(self.tzinfo)
self.assertEqual(
dt.utcoffset(), wanted['utcoffset'],
'Expected %s as utcoffset for %s. Got %s' % (
utcoffset, utc_dt, dt.utcoffset()
)
)
def _test_dst(self, utc_dt, wanted):
dst = wanted['dst']
dt = utc_dt.astimezone(self.tzinfo)
self.assertEqual(dt.dst(),dst,
'Expected %s as dst for %s. Got %s' % (
dst, utc_dt, dt.dst()
)
)
def test_arithmetic(self):
utc_dt = self.transition_time
for days in range(-420, 720, 20):
delta = timedelta(days=days)
# Make sure we can get back where we started
dt = utc_dt.astimezone(self.tzinfo)
dt2 = dt + delta
dt2 = dt2 - delta
self.assertEqual(dt, dt2)
# Make sure arithmetic crossing DST boundaries ends
# up in the correct timezone after normalization
utc_plus_delta = (utc_dt + delta).astimezone(self.tzinfo)
local_plus_delta = self.tzinfo.normalize(dt + delta)
self.assertEqual(
prettydt(utc_plus_delta),
prettydt(local_plus_delta),
'Incorrect result for delta==%d days. Wanted %r. Got %r'%(
days,
prettydt(utc_plus_delta),
prettydt(local_plus_delta),
)
)
def _test_all(self, utc_dt, wanted):
self._test_utcoffset(utc_dt, wanted)
self._test_tzname(utc_dt, wanted)
self._test_dst(utc_dt, wanted)
def testDayBefore(self):
self._test_all(
self.transition_time - timedelta(days=1), self.before
)
def testTwoHoursBefore(self):
self._test_all(
self.transition_time - timedelta(hours=2), self.before
)
def testHourBefore(self):
self._test_all(
self.transition_time - timedelta(hours=1), self.before
)
def testInstantBefore(self):
self._test_all(
self.transition_time - self.instant, self.before
)
def testTransition(self):
self._test_all(
self.transition_time, self.after
)
def testInstantAfter(self):
self._test_all(
self.transition_time + self.instant, self.after
)
def testHourAfter(self):
self._test_all(
self.transition_time + timedelta(hours=1), self.after
)
def testTwoHoursAfter(self):
self._test_all(
self.transition_time + timedelta(hours=1), self.after
)
def testDayAfter(self):
self._test_all(
self.transition_time + timedelta(days=1), self.after
)
class USEasternDSTEndTestCase(USEasternDSTStartTestCase):
tzinfo = pytz.timezone('US/Eastern')
transition_time = datetime(2002, 10, 27, 6, 0, 0, tzinfo=UTC)
before = {
'tzname': 'EDT',
'utcoffset': timedelta(hours = -4),
'dst': timedelta(hours = 1),
}
after = {
'tzname': 'EST',
'utcoffset': timedelta(hours = -5),
'dst': timedelta(hours = 0),
}
class USEasternEPTStartTestCase(USEasternDSTStartTestCase):
transition_time = datetime(1945, 8, 14, 23, 0, 0, tzinfo=UTC)
before = {
'tzname': 'EWT',
'utcoffset': timedelta(hours = -4),
'dst': timedelta(hours = 1),
}
after = {
'tzname': 'EPT',
'utcoffset': timedelta(hours = -4),
'dst': timedelta(hours = 1),
}
class USEasternEPTEndTestCase(USEasternDSTStartTestCase):
transition_time = datetime(1945, 9, 30, 6, 0, 0, tzinfo=UTC)
before = {
'tzname': 'EPT',
'utcoffset': timedelta(hours = -4),
'dst': timedelta(hours = 1),
}
after = {
'tzname': 'EST',
'utcoffset': timedelta(hours = -5),
'dst': timedelta(hours = 0),
}
class WarsawWMTEndTestCase(USEasternDSTStartTestCase):
# In 1915, Warsaw changed from Warsaw to Central European time.
# This involved the clocks being set backwards, causing a end-of-DST
# like situation without DST being involved.
tzinfo = pytz.timezone('Europe/Warsaw')
transition_time = datetime(1915, 8, 4, 22, 36, 0, tzinfo=UTC)
before = {
'tzname': 'WMT',
'utcoffset': timedelta(hours=1, minutes=24),
'dst': timedelta(0),
}
after = {
'tzname': 'CET',
'utcoffset': timedelta(hours=1),
'dst': timedelta(0),
}
class VilniusWMTEndTestCase(USEasternDSTStartTestCase):
# At the end of 1916, Vilnius changed timezones putting its clock
# forward by 11 minutes 35 seconds. Neither timezone was in DST mode.
tzinfo = pytz.timezone('Europe/Vilnius')
instant = timedelta(seconds=31)
transition_time = datetime(1916, 12, 31, 22, 36, 00, tzinfo=UTC)
before = {
'tzname': 'WMT',
'utcoffset': timedelta(hours=1, minutes=24),
'dst': timedelta(0),
}
after = {
'tzname': 'KMT',
'utcoffset': timedelta(hours=1, minutes=36), # Really 1:35:36
'dst': timedelta(0),
}
class VilniusCESTStartTestCase(USEasternDSTStartTestCase):
# In 1941, Vilnius changed from MSG to CEST, switching to summer
# time while simultaneously reducing its UTC offset by two hours,
# causing the clocks to go backwards for this summer time
# switchover.
tzinfo = pytz.timezone('Europe/Vilnius')
transition_time = datetime(1941, 6, 23, 21, 00, 00, tzinfo=UTC)
before = {
'tzname': 'MSK',
'utcoffset': timedelta(hours=3),
'dst': timedelta(0),
}
after = {
'tzname': 'CEST',
'utcoffset': timedelta(hours=2),
'dst': timedelta(hours=1),
}
class LondonHistoryStartTestCase(USEasternDSTStartTestCase):
# The first known timezone transition in London was in 1847 when
# clocks where synchronized to GMT. However, we currently only
# understand v1 format tzfile(5) files which does handle years
# this far in the past, so our earliest known transition is in
# 1916.
tzinfo = pytz.timezone('Europe/London')
# transition_time = datetime(1847, 12, 1, 1, 15, 00, tzinfo=UTC)
# before = {
# 'tzname': 'LMT',
# 'utcoffset': timedelta(minutes=-75),
# 'dst': timedelta(0),
# }
# after = {
# 'tzname': 'GMT',
# 'utcoffset': timedelta(0),
# 'dst': timedelta(0),
# }
transition_time = datetime(1916, 5, 21, 2, 00, 00, tzinfo=UTC)
before = {
'tzname': 'GMT',
'utcoffset': timedelta(0),
'dst': timedelta(0),
}
after = {
'tzname': 'BST',
'utcoffset': timedelta(hours=1),
'dst': timedelta(hours=1),
}
class LondonHistoryEndTestCase(USEasternDSTStartTestCase):
# Timezone switchovers are projected into the future, even
# though no official statements exist or could be believed even
# if they did exist. We currently only check the last known
# transition in 2037, as we are still using v1 format tzfile(5)
# files.
tzinfo = pytz.timezone('Europe/London')
# transition_time = datetime(2499, 10, 25, 1, 0, 0, tzinfo=UTC)
transition_time = datetime(2037, 10, 25, 1, 0, 0, tzinfo=UTC)
before = {
'tzname': 'BST',
'utcoffset': timedelta(hours=1),
'dst': timedelta(hours=1),
}
after = {
'tzname': 'GMT',
'utcoffset': timedelta(0),
'dst': timedelta(0),
}
class NoumeaHistoryStartTestCase(USEasternDSTStartTestCase):
# Noumea adopted a whole hour offset in 1912. Previously
# it was 11 hours, 5 minutes and 48 seconds off UTC. However,
# due to limitations of the Python datetime library, we need
# to round that to 11 hours 6 minutes.
tzinfo = pytz.timezone('Pacific/Noumea')
transition_time = datetime(1912, 1, 12, 12, 54, 12, tzinfo=UTC)
before = {
'tzname': 'LMT',
'utcoffset': timedelta(hours=11, minutes=6),
'dst': timedelta(0),
}
after = {
'tzname': 'NCT',
'utcoffset': timedelta(hours=11),
'dst': timedelta(0),
}
class NoumeaDSTEndTestCase(USEasternDSTStartTestCase):
# Noumea dropped DST in 1997.
tzinfo = pytz.timezone('Pacific/Noumea')
transition_time = datetime(1997, 3, 1, 15, 00, 00, tzinfo=UTC)
before = {
'tzname': 'NCST',
'utcoffset': timedelta(hours=12),
'dst': timedelta(hours=1),
}
after = {
'tzname': 'NCT',
'utcoffset': timedelta(hours=11),
'dst': timedelta(0),
}
class NoumeaNoMoreDSTTestCase(NoumeaDSTEndTestCase):
# Noumea dropped DST in 1997. Here we test that it stops occuring.
transition_time = (
NoumeaDSTEndTestCase.transition_time + timedelta(days=365*10))
before = NoumeaDSTEndTestCase.after
after = NoumeaDSTEndTestCase.after
class TahitiTestCase(USEasternDSTStartTestCase):
# Tahiti has had a single transition in its history.
tzinfo = pytz.timezone('Pacific/Tahiti')
transition_time = datetime(1912, 10, 1, 9, 58, 16, tzinfo=UTC)
before = {
'tzname': 'LMT',
'utcoffset': timedelta(hours=-9, minutes=-58),
'dst': timedelta(0),
}
after = {
'tzname': 'TAHT',
'utcoffset': timedelta(hours=-10),
'dst': timedelta(0),
}
class ReferenceUSEasternDSTStartTestCase(USEasternDSTStartTestCase):
tzinfo = reference.Eastern
def test_arithmetic(self):
# Reference implementation cannot handle this
pass
class ReferenceUSEasternDSTEndTestCase(USEasternDSTEndTestCase):
tzinfo = reference.Eastern
def testHourBefore(self):
# Python's datetime library has a bug, where the hour before
# a daylight savings transition is one hour out. For example,
# at the end of US/Eastern daylight savings time, 01:00 EST
# occurs twice (once at 05:00 UTC and once at 06:00 UTC),
# whereas the first should actually be 01:00 EDT.
# Note that this bug is by design - by accepting this ambiguity
# for one hour one hour per year, an is_dst flag on datetime.time
# became unnecessary.
self._test_all(
self.transition_time - timedelta(hours=1), self.after
)
def testInstantBefore(self):
self._test_all(
self.transition_time - timedelta(seconds=1), self.after
)
def test_arithmetic(self):
# Reference implementation cannot handle this
pass
class LocalTestCase(unittest.TestCase):
def testLocalize(self):
loc_tz = pytz.timezone('Europe/Amsterdam')
loc_time = loc_tz.localize(datetime(1930, 5, 10, 0, 0, 0))
# Actually +00:19:32, but Python datetime rounds this
self.assertEqual(loc_time.strftime('%Z%z'), 'AMT+0020')
loc_time = loc_tz.localize(datetime(1930, 5, 20, 0, 0, 0))
# Actually +00:19:32, but Python datetime rounds this
self.assertEqual(loc_time.strftime('%Z%z'), 'NST+0120')
loc_time = loc_tz.localize(datetime(1940, 5, 10, 0, 0, 0))
self.assertEqual(loc_time.strftime('%Z%z'), 'NET+0020')
loc_time = loc_tz.localize(datetime(1940, 5, 20, 0, 0, 0))
self.assertEqual(loc_time.strftime('%Z%z'), 'CEST+0200')
loc_time = loc_tz.localize(datetime(2004, 2, 1, 0, 0, 0))
self.assertEqual(loc_time.strftime('%Z%z'), 'CET+0100')
loc_time = loc_tz.localize(datetime(2004, 4, 1, 0, 0, 0))
self.assertEqual(loc_time.strftime('%Z%z'), 'CEST+0200')
tz = pytz.timezone('Europe/Amsterdam')
loc_time = loc_tz.localize(datetime(1943, 3, 29, 1, 59, 59))
self.assertEqual(loc_time.strftime('%Z%z'), 'CET+0100')
# Switch to US
loc_tz = pytz.timezone('US/Eastern')
# End of DST ambiguity check
loc_time = loc_tz.localize(datetime(1918, 10, 27, 1, 59, 59), is_dst=1)
self.assertEqual(loc_time.strftime('%Z%z'), 'EDT-0400')
loc_time = loc_tz.localize(datetime(1918, 10, 27, 1, 59, 59), is_dst=0)
self.assertEqual(loc_time.strftime('%Z%z'), 'EST-0500')
self.assertRaises(pytz.AmbiguousTimeError,
loc_tz.localize, datetime(1918, 10, 27, 1, 59, 59), is_dst=None
)
# Start of DST non-existent times
loc_time = loc_tz.localize(datetime(1918, 3, 31, 2, 0, 0), is_dst=0)
self.assertEqual(loc_time.strftime('%Z%z'), 'EST-0500')
loc_time = loc_tz.localize(datetime(1918, 3, 31, 2, 0, 0), is_dst=1)
self.assertEqual(loc_time.strftime('%Z%z'), 'EDT-0400')
self.assertRaises(pytz.NonExistentTimeError,
loc_tz.localize, datetime(1918, 3, 31, 2, 0, 0), is_dst=None
)
# Weird changes - war time and peace time both is_dst==True
loc_time = loc_tz.localize(datetime(1942, 2, 9, 3, 0, 0))
self.assertEqual(loc_time.strftime('%Z%z'), 'EWT-0400')
loc_time = loc_tz.localize(datetime(1945, 8, 14, 19, 0, 0))
self.assertEqual(loc_time.strftime('%Z%z'), 'EPT-0400')
loc_time = loc_tz.localize(datetime(1945, 9, 30, 1, 0, 0), is_dst=1)
self.assertEqual(loc_time.strftime('%Z%z'), 'EPT-0400')
loc_time = loc_tz.localize(datetime(1945, 9, 30, 1, 0, 0), is_dst=0)
self.assertEqual(loc_time.strftime('%Z%z'), 'EST-0500')
def testNormalize(self):
tz = pytz.timezone('US/Eastern')
dt = datetime(2004, 4, 4, 7, 0, 0, tzinfo=UTC).astimezone(tz)
dt2 = dt - timedelta(minutes=10)
self.assertEqual(
dt2.strftime('%Y-%m-%d %H:%M:%S %Z%z'),
'2004-04-04 02:50:00 EDT-0400'
)
dt2 = tz.normalize(dt2)
self.assertEqual(
dt2.strftime('%Y-%m-%d %H:%M:%S %Z%z'),
'2004-04-04 01:50:00 EST-0500'
)
def testPartialMinuteOffsets(self):
# utcoffset in Amsterdam was not a whole minute until 1937
# However, we fudge this by rounding them, as the Python
# datetime library
tz = pytz.timezone('Europe/Amsterdam')
utc_dt = datetime(1914, 1, 1, 13, 40, 28, tzinfo=UTC) # correct
utc_dt = utc_dt.replace(second=0) # But we need to fudge it
loc_dt = utc_dt.astimezone(tz)
self.assertEqual(
loc_dt.strftime('%Y-%m-%d %H:%M:%S %Z%z'),
'1914-01-01 14:00:00 AMT+0020'
)
# And get back...
utc_dt = loc_dt.astimezone(UTC)
self.assertEqual(
utc_dt.strftime('%Y-%m-%d %H:%M:%S %Z%z'),
'1914-01-01 13:40:00 UTC+0000'
)
def no_testCreateLocaltime(self):
# It would be nice if this worked, but it doesn't.
tz = pytz.timezone('Europe/Amsterdam')
dt = datetime(2004, 10, 31, 2, 0, 0, tzinfo=tz)
self.assertEqual(
dt.strftime(fmt),
'2004-10-31 02:00:00 CET+0100'
)
class CommonTimezonesTestCase(unittest.TestCase):
def test_bratislava(self):
# Bratislava is the default timezone for Slovakia, but our
# heuristics where not adding it to common_timezones. Ideally,
# common_timezones should be populated from zone.tab at runtime,
# but I'm hesitant to pay the startup cost as loading the list
# on demand whilst remaining backwards compatible seems
# difficult.
self.assertTrue('Europe/Bratislava' in pytz.common_timezones)
self.assertTrue('Europe/Bratislava' in pytz.common_timezones_set)
def test_us_eastern(self):
self.assertTrue('US/Eastern' in pytz.common_timezones)
self.assertTrue('US/Eastern' in pytz.common_timezones_set)
def test_belfast(self):
# Belfast uses London time.
self.assertTrue('Europe/Belfast' in pytz.all_timezones_set)
self.assertFalse('Europe/Belfast' in pytz.common_timezones)
self.assertFalse('Europe/Belfast' in pytz.common_timezones_set)
def test_suite():
suite = unittest.TestSuite()
suite.addTest(doctest.DocTestSuite('pytz'))
suite.addTest(doctest.DocTestSuite('pytz.tzinfo'))
import test_tzinfo
suite.addTest(unittest.defaultTestLoader.loadTestsFromModule(test_tzinfo))
return suite
if __name__ == '__main__':
warnings.simplefilter("error") # Warnings should be fatal in tests.
unittest.main(defaultTest='test_suite')
| apache-2.0 | 1,166,653,170,952,241,000 | 33.980447 | 79 | 0.598099 | false |
firebase/firebase-admin-python | tests/test_http_client.py | 1 | 5782 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for firebase_admin._http_client."""
import pytest
from pytest_localserver import http
import requests
from firebase_admin import _http_client
from tests import testutils
_TEST_URL = 'http://firebase.test.url/'
def test_http_client_default_session():
client = _http_client.HttpClient()
assert client.session is not None
assert client.base_url == ''
recorder = _instrument(client, 'body')
resp = client.request('get', _TEST_URL)
assert resp.status_code == 200
assert resp.text == 'body'
assert len(recorder) == 1
assert recorder[0].method == 'GET'
assert recorder[0].url == _TEST_URL
def test_http_client_custom_session():
session = requests.Session()
client = _http_client.HttpClient(session=session)
assert client.session is session
assert client.base_url == ''
recorder = _instrument(client, 'body')
resp = client.request('get', _TEST_URL)
assert resp.status_code == 200
assert resp.text == 'body'
assert len(recorder) == 1
assert recorder[0].method == 'GET'
assert recorder[0].url == _TEST_URL
def test_base_url():
client = _http_client.HttpClient(base_url=_TEST_URL)
assert client.session is not None
assert client.base_url == _TEST_URL
recorder = _instrument(client, 'body')
resp = client.request('get', 'foo')
assert resp.status_code == 200
assert resp.text == 'body'
assert len(recorder) == 1
assert recorder[0].method == 'GET'
assert recorder[0].url == _TEST_URL + 'foo'
def test_credential():
client = _http_client.HttpClient(
credential=testutils.MockGoogleCredential())
assert client.session is not None
recorder = _instrument(client, 'body')
resp = client.request('get', _TEST_URL)
assert resp.status_code == 200
assert resp.text == 'body'
assert len(recorder) == 1
assert recorder[0].method == 'GET'
assert recorder[0].url == _TEST_URL
assert recorder[0].headers['Authorization'] == 'Bearer mock-token'
@pytest.mark.parametrize('options, timeout', [
({}, _http_client.DEFAULT_TIMEOUT_SECONDS),
({'timeout': 7}, 7),
({'timeout': 0}, 0),
({'timeout': None}, None),
])
def test_timeout(options, timeout):
client = _http_client.HttpClient(**options)
assert client.timeout == timeout
recorder = _instrument(client, 'body')
client.request('get', _TEST_URL)
assert len(recorder) == 1
if timeout is None:
assert recorder[0]._extra_kwargs['timeout'] is None
else:
assert recorder[0]._extra_kwargs['timeout'] == pytest.approx(timeout, 0.001)
def _instrument(client, payload, status=200):
recorder = []
adapter = testutils.MockAdapter(payload, status, recorder)
client.session.mount(_TEST_URL, adapter)
return recorder
class TestHttpRetry:
"""Unit tests for the default HTTP retry configuration."""
ENTITY_ENCLOSING_METHODS = ['post', 'put', 'patch']
ALL_METHODS = ENTITY_ENCLOSING_METHODS + ['get', 'delete', 'head', 'options']
@classmethod
def setup_class(cls):
# Turn off exponential backoff for faster execution.
_http_client.DEFAULT_RETRY_CONFIG.backoff_factor = 0
# Start a test server instance scoped to the class.
server = http.ContentServer()
server.start()
cls.httpserver = server
@classmethod
def teardown_class(cls):
cls.httpserver.stop()
def setup_method(self):
# Clean up any state in the server before starting a new test case.
self.httpserver.requests = []
@pytest.mark.parametrize('method', ALL_METHODS)
def test_retry_on_503(self, method):
self.httpserver.serve_content({}, 503)
client = _http_client.JsonHttpClient(
credential=testutils.MockGoogleCredential(), base_url=self.httpserver.url)
body = None
if method in self.ENTITY_ENCLOSING_METHODS:
body = {'key': 'value'}
with pytest.raises(requests.exceptions.HTTPError) as excinfo:
client.request(method, '/', json=body)
assert excinfo.value.response.status_code == 503
assert len(self.httpserver.requests) == 5
@pytest.mark.parametrize('method', ALL_METHODS)
def test_retry_on_500(self, method):
self.httpserver.serve_content({}, 500)
client = _http_client.JsonHttpClient(
credential=testutils.MockGoogleCredential(), base_url=self.httpserver.url)
body = None
if method in self.ENTITY_ENCLOSING_METHODS:
body = {'key': 'value'}
with pytest.raises(requests.exceptions.HTTPError) as excinfo:
client.request(method, '/', json=body)
assert excinfo.value.response.status_code == 500
assert len(self.httpserver.requests) == 5
def test_no_retry_on_404(self):
self.httpserver.serve_content({}, 404)
client = _http_client.JsonHttpClient(
credential=testutils.MockGoogleCredential(), base_url=self.httpserver.url)
with pytest.raises(requests.exceptions.HTTPError) as excinfo:
client.request('get', '/')
assert excinfo.value.response.status_code == 404
assert len(self.httpserver.requests) == 1
| apache-2.0 | -4,307,229,820,997,090,300 | 35.36478 | 86 | 0.663611 | false |
phobson/statsmodels | statsmodels/regression/recursive_ls.py | 1 | 25492 | """
Recursive least squares model
Author: Chad Fulton
License: Simplified-BSD
"""
from __future__ import division, absolute_import, print_function
from warnings import warn
from statsmodels.compat.collections import OrderedDict
import numpy as np
import pandas as pd
from statsmodels.regression.linear_model import OLS
from statsmodels.tools.data import _is_using_pandas
from statsmodels.tsa.statespace.mlemodel import (
MLEModel, MLEResults, MLEResultsWrapper)
from statsmodels.tools.tools import Bunch
from statsmodels.tools.decorators import cache_readonly, resettable_cache
import statsmodels.base.wrapper as wrap
# Columns are alpha = 0.1, 0.05, 0.025, 0.01, 0.005
_cusum_squares_scalars = np.array([
[1.0729830, 1.2238734, 1.3581015, 1.5174271, 1.6276236],
[-0.6698868, -0.6700069, -0.6701218, -0.6702672, -0.6703724],
[-0.5816458, -0.7351697, -0.8858694, -1.0847745, -1.2365861]
])
class RecursiveLS(MLEModel):
r"""
Recursive least squares
Parameters
----------
endog : array_like
The observed time-series process :math:`y`
exog : array_like
Array of exogenous regressors, shaped nobs x k.
Notes
-----
Recursive least squares (RLS) corresponds to expanding window ordinary
least squares (OLS).
This model applies the Kalman filter to compute recursive estimates of the
coefficients and recursive residuals.
References
----------
.. [1] Durbin, James, and Siem Jan Koopman. 2012.
Time Series Analysis by State Space Methods: Second Edition.
Oxford University Press.
"""
def __init__(self, endog, exog, **kwargs):
# Standardize data
if not _is_using_pandas(endog, None):
endog = np.asanyarray(endog)
exog_is_using_pandas = _is_using_pandas(exog, None)
if not exog_is_using_pandas:
exog = np.asarray(exog)
# Make sure we have 2-dimensional array
if exog.ndim == 1:
if not exog_is_using_pandas:
exog = exog[:, None]
else:
exog = pd.DataFrame(exog)
self.k_exog = exog.shape[1]
# Handle coefficient initialization
# By default, do not calculate likelihood while it is controlled by
# diffuse initial conditions.
kwargs.setdefault('loglikelihood_burn', self.k_exog)
kwargs.setdefault('initialization', 'approximate_diffuse')
kwargs.setdefault('initial_variance', 1e9)
# Initialize the state space representation
super(RecursiveLS, self).__init__(
endog, k_states=self.k_exog, exog=exog, **kwargs
)
# Setup the state space representation
self['design'] = self.exog[:, :, None].T
self['transition'] = np.eye(self.k_states)
# Notice that the filter output does not depend on the measurement
# variance, so we set it here to 1
self['obs_cov', 0, 0] = 1.
@classmethod
def from_formula(cls, formula, data, subset=None):
"""
Not implemented for state space models
"""
return super(MLEModel, cls).from_formula(formula, data, subset)
def fit(self):
"""
Fits the model by application of the Kalman filter
Returns
-------
RecursiveLSResults
"""
# Get the smoother results with an arbitrary measurement variance
smoother_results = self.smooth(return_ssm=True)
# Compute the MLE of sigma2 (see Harvey, 1989 equation 4.2.5)
resid = smoother_results.standardized_forecasts_error[0]
sigma2 = (np.inner(resid, resid) /
(self.nobs - self.loglikelihood_burn))
# Now construct a results class, where the params are the final
# estimates of the regression coefficients
self['obs_cov', 0, 0] = sigma2
return self.smooth()
def filter(self, return_ssm=False, **kwargs):
# Get the state space output
result = super(RecursiveLS, self).filter([], transformed=True,
cov_type='none',
return_ssm=True, **kwargs)
# Wrap in a results object
if not return_ssm:
params = result.filtered_state[:, -1]
cov_kwds = {
'custom_cov_type': 'nonrobust',
'custom_cov_params': result.filtered_state_cov[:, :, -1],
'custom_description': ('Parameters and covariance matrix'
' estimates are RLS estimates'
' conditional on the entire sample.')
}
result = RecursiveLSResultsWrapper(
RecursiveLSResults(self, params, result, cov_type='custom',
cov_kwds=cov_kwds)
)
return result
def smooth(self, return_ssm=False, **kwargs):
# Get the state space output
result = super(RecursiveLS, self).smooth([], transformed=True,
cov_type='none',
return_ssm=True, **kwargs)
# Wrap in a results object
if not return_ssm:
params = result.filtered_state[:, -1]
cov_kwds = {
'custom_cov_type': 'nonrobust',
'custom_cov_params': result.filtered_state_cov[:, :, -1],
'custom_description': ('Parameters and covariance matrix'
' estimates are RLS estimates'
' conditional on the entire sample.')
}
result = RecursiveLSResultsWrapper(
RecursiveLSResults(self, params, result, cov_type='custom',
cov_kwds=cov_kwds)
)
return result
@property
def param_names(self):
return self.exog_names
@property
def start_params(self):
# Only parameter is the measurment disturbance standard deviation
return np.zeros(0)
def update(self, params, **kwargs):
"""
Update the parameters of the model
Updates the representation matrices to fill in the new parameter
values.
Parameters
----------
params : array_like
Array of new parameters.
transformed : boolean, optional
Whether or not `params` is already transformed. If set to False,
`transform_params` is called. Default is True..
Returns
-------
params : array_like
Array of parameters.
"""
pass
class RecursiveLSResults(MLEResults):
"""
Class to hold results from fitting a recursive least squares model.
Parameters
----------
model : RecursiveLS instance
The fitted model instance
Attributes
----------
specification : dictionary
Dictionary including all attributes from the recursive least squares
model instance.
See Also
--------
statsmodels.tsa.statespace.kalman_filter.FilterResults
statsmodels.tsa.statespace.mlemodel.MLEResults
"""
def __init__(self, model, params, filter_results, cov_type='opg',
**kwargs):
super(RecursiveLSResults, self).__init__(
model, params, filter_results, cov_type, **kwargs)
self.df_resid = np.inf # attribute required for wald tests
# Save _init_kwds
self._init_kwds = self.model._get_init_kwds()
# Save the model specification
self.specification = Bunch(**{
'k_exog': self.model.k_exog})
@property
def recursive_coefficients(self):
"""
Estimates of regression coefficients, recursively estimated
Returns
-------
out: Bunch
Has the following attributes:
- `filtered`: a time series array with the filtered estimate of
the component
- `filtered_cov`: a time series array with the filtered estimate of
the variance/covariance of the component
- `smoothed`: a time series array with the smoothed estimate of
the component
- `smoothed_cov`: a time series array with the smoothed estimate of
the variance/covariance of the component
- `offset`: an integer giving the offset in the state vector where
this component begins
"""
out = None
spec = self.specification
start = offset = 0
end = offset + spec.k_exog
out = Bunch(
filtered=self.filtered_state[start:end],
filtered_cov=self.filtered_state_cov[start:end, start:end],
smoothed=None, smoothed_cov=None,
offset=offset
)
if self.smoothed_state is not None:
out.smoothed = self.smoothed_state[start:end]
if self.smoothed_state_cov is not None:
out.smoothed_cov = (
self.smoothed_state_cov[start:end, start:end])
return out
@cache_readonly
def resid_recursive(self):
"""
Recursive residuals
Returns
-------
resid_recursive : array_like
An array of length `nobs` holding the recursive
residuals.
Notes
-----
The first `k_exog` residuals are typically unreliable due to
initialization.
"""
# See Harvey (1989) section 5.4; he defines the standardized
# innovations in 5.4.1, but they have non-unit variance, whereas
# the standardized forecast errors assume unit variance. To convert
# to Harvey's definition, we need to multiply by the standard
# deviation.
return (self.filter_results.standardized_forecasts_error.squeeze() *
self.filter_results.obs_cov[0, 0]**0.5)
@cache_readonly
def cusum(self):
r"""
Cumulative sum of standardized recursive residuals statistics
Returns
-------
cusum : array_like
An array of length `nobs - k_exog` holding the
CUSUM statistics.
Notes
-----
The CUSUM statistic takes the form:
.. math::
W_t = \frac{1}{\hat \sigma} \sum_{j=k+1}^t w_j
where :math:`w_j` is the recursive residual at time :math:`j` and
:math:`\hat \sigma` is the estimate of the standard deviation
from the full sample.
Excludes the first `k_exog` datapoints.
Due to differences in the way :math:`\hat \sigma` is calculated, the
output of this function differs slightly from the output in the
R package strucchange and the Stata contributed .ado file cusum6. The
calculation in this package is consistent with the description of
Brown et al. (1975)
References
----------
.. [1] Brown, R. L., J. Durbin, and J. M. Evans. 1975.
"Techniques for Testing the Constancy of
Regression Relationships over Time."
Journal of the Royal Statistical Society.
Series B (Methodological) 37 (2): 149-92.
"""
llb = self.loglikelihood_burn
return (np.cumsum(self.resid_recursive[self.loglikelihood_burn:]) /
np.std(self.resid_recursive[llb:], ddof=1))
@cache_readonly
def cusum_squares(self):
r"""
Cumulative sum of squares of standardized recursive residuals
statistics
Returns
-------
cusum_squares : array_like
An array of length `nobs - k_exog` holding the
CUSUM of squares statistics.
Notes
-----
The CUSUM of squares statistic takes the form:
.. math::
s_t = \left ( \sum_{j=k+1}^t w_j^2 \right ) \Bigg /
\left ( \sum_{j=k+1}^T w_j^2 \right )
where :math:`w_j` is the recursive residual at time :math:`j`.
Excludes the first `k_exog` datapoints.
References
----------
.. [1] Brown, R. L., J. Durbin, and J. M. Evans. 1975.
"Techniques for Testing the Constancy of
Regression Relationships over Time."
Journal of the Royal Statistical Society.
Series B (Methodological) 37 (2): 149-92.
"""
numer = np.cumsum(self.resid_recursive[self.loglikelihood_burn:]**2)
denom = numer[-1]
return numer / denom
def plot_recursive_coefficient(self, variables=0, alpha=0.05,
legend_loc='upper left', fig=None,
figsize=None):
r"""
Plot the recursively estimated coefficients on a given variable
Parameters
----------
variables : int or str or iterable of int or string, optional
Integer index or string name of the variable whose coefficient will
be plotted. Can also be an iterable of integers or strings. Default
is the first variable.
alpha : float, optional
The confidence intervals for the coefficient are (1 - alpha) %
legend_loc : string, optional
The location of the legend in the plot. Default is upper left.
fig : Matplotlib Figure instance, optional
If given, subplots are created in this figure instead of in a new
figure. Note that the grid will be created in the provided
figure using `fig.add_subplot()`.
figsize : tuple, optional
If a figure is created, this argument allows specifying a size.
The tuple is (width, height).
Notes
-----
All plots contain (1 - `alpha`) % confidence intervals.
"""
# Get variables
if isinstance(variables, (int, str)):
variables = [variables]
k_variables = len(variables)
# If a string was given for `variable`, try to get it from exog names
exog_names = self.model.exog_names
for i in range(k_variables):
variable = variables[i]
if isinstance(variable, str):
variables[i] = exog_names.index(variable)
# Create the plot
from scipy.stats import norm
from statsmodels.graphics.utils import _import_mpl, create_mpl_fig
plt = _import_mpl()
fig = create_mpl_fig(fig, figsize)
for i in range(k_variables):
variable = variables[i]
ax = fig.add_subplot(k_variables, 1, i + 1)
# Get dates, if applicable
if hasattr(self.data, 'dates') and self.data.dates is not None:
dates = self.data.dates._mpl_repr()
else:
dates = np.arange(self.nobs)
llb = self.loglikelihood_burn
# Plot the coefficient
coef = self.recursive_coefficients
ax.plot(dates[llb:], coef.filtered[variable, llb:],
label='Recursive estimates: %s' % exog_names[variable])
# Legend
handles, labels = ax.get_legend_handles_labels()
# Get the critical value for confidence intervals
if alpha is not None:
critical_value = norm.ppf(1 - alpha / 2.)
# Plot confidence intervals
std_errors = np.sqrt(coef.filtered_cov[variable, variable, :])
ci_lower = (
coef.filtered[variable] - critical_value * std_errors)
ci_upper = (
coef.filtered[variable] + critical_value * std_errors)
ci_poly = ax.fill_between(
dates[llb:], ci_lower[llb:], ci_upper[llb:], alpha=0.2
)
ci_label = ('$%.3g \\%%$ confidence interval'
% ((1 - alpha)*100))
# Only add CI to legend for the first plot
if i == 0:
# Proxy artist for fill_between legend entry
# See http://matplotlib.org/1.3.1/users/legend_guide.html
p = plt.Rectangle((0, 0), 1, 1,
fc=ci_poly.get_facecolor()[0])
handles.append(p)
labels.append(ci_label)
ax.legend(handles, labels, loc=legend_loc)
# Remove xticks for all but the last plot
if i < k_variables - 1:
ax.xaxis.set_ticklabels([])
fig.tight_layout()
return fig
def _cusum_significance_bounds(self, alpha, ddof=0, points=None):
"""
Parameters
----------
alpha : float, optional
The significance bound is alpha %.
ddof : int, optional
The number of periods additional to `k_exog` to exclude in
constructing the bounds. Default is zero. This is usually used
only for testing purposes.
points : iterable, optional
The points at which to evaluate the significance bounds. Default is
two points, beginning and end of the sample.
Notes
-----
Comparing against the cusum6 package for Stata, this does not produce
exactly the same confidence bands (which are produced in cusum6 by
lw, uw) because they burn the first k_exog + 1 periods instead of the
first k_exog. If this change is performed
(so that `tmp = (self.nobs - llb - 1)**0.5`), then the output here
matches cusum6.
The cusum6 behavior does not seem to be consistent with
Brown et al. (1975); it is likely they did that because they needed
three initial observations to get the initial OLS estimates, whereas
we do not need to do that.
"""
# Get the constant associated with the significance level
if alpha == 0.01:
scalar = 1.143
elif alpha == 0.05:
scalar = 0.948
elif alpha == 0.10:
scalar = 0.950
else:
raise ValueError('Invalid significance level.')
# Get the points for the significance bound lines
llb = self.loglikelihood_burn
tmp = (self.nobs - llb - ddof)**0.5
upper_line = lambda x: scalar * tmp + 2 * scalar * (x - llb) / tmp
if points is None:
points = np.array([llb, self.nobs])
return -upper_line(points), upper_line(points)
def plot_cusum(self, alpha=0.05, legend_loc='upper left',
fig=None, figsize=None):
r"""
Plot the CUSUM statistic and significance bounds.
Parameters
----------
alpha : float, optional
The plotted significance bounds are alpha %.
legend_loc : string, optional
The location of the legend in the plot. Default is upper left.
fig : Matplotlib Figure instance, optional
If given, subplots are created in this figure instead of in a new
figure. Note that the grid will be created in the provided
figure using `fig.add_subplot()`.
figsize : tuple, optional
If a figure is created, this argument allows specifying a size.
The tuple is (width, height).
Notes
-----
Evidence of parameter instability may be found if the CUSUM statistic
moves out of the significance bounds.
References
----------
.. [1] Brown, R. L., J. Durbin, and J. M. Evans. 1975.
"Techniques for Testing the Constancy of
Regression Relationships over Time."
Journal of the Royal Statistical Society.
Series B (Methodological) 37 (2): 149-92.
"""
# Create the plot
from statsmodels.graphics.utils import _import_mpl, create_mpl_fig
plt = _import_mpl()
fig = create_mpl_fig(fig, figsize)
ax = fig.add_subplot(1, 1, 1)
# Get dates, if applicable
if hasattr(self.data, 'dates') and self.data.dates is not None:
dates = self.data.dates._mpl_repr()
else:
dates = np.arange(self.nobs)
llb = self.loglikelihood_burn
# Plot cusum series and reference line
ax.plot(dates[llb:], self.cusum, label='CUSUM')
ax.hlines(0, dates[llb], dates[-1], color='k', alpha=0.3)
# Plot significance bounds
lower_line, upper_line = self._cusum_significance_bounds(alpha)
ax.plot([dates[llb], dates[-1]], upper_line, 'k--',
label='%d%% significance' % (alpha * 100))
ax.plot([dates[llb], dates[-1]], lower_line, 'k--')
ax.legend(loc=legend_loc)
return fig
def _cusum_squares_significance_bounds(self, alpha, points=None):
"""
Notes
-----
Comparing against the cusum6 package for Stata, this does not produce
exactly the same confidence bands (which are produced in cusum6 by
lww, uww) because they use a different method for computing the
critical value; in particular, they use tabled values from
Table C, pp. 364-365 of "The Econometric Analysis of Time Series"
Harvey, (1990), and use the value given to 99 observations for any
larger number of observations. In contrast, we use the approximating
critical values suggested in Edgerton and Wells (1994) which allows
computing relatively good approximations for any number of
observations.
"""
# Get the approximate critical value associated with the significance
# level
llb = self.loglikelihood_burn
n = 0.5 * (self.nobs - llb) - 1
try:
ix = [0.1, 0.05, 0.025, 0.01, 0.005].index(alpha / 2)
except ValueError:
raise ValueError('Invalid significance level.')
scalars = _cusum_squares_scalars[:, ix]
crit = scalars[0] / n**0.5 + scalars[1] / n + scalars[2] / n**1.5
# Get the points for the significance bound lines
if points is None:
points = np.array([llb, self.nobs])
line = (points - llb) / (self.nobs - llb)
return line - crit, line + crit
def plot_cusum_squares(self, alpha=0.05, legend_loc='upper left',
fig=None, figsize=None):
r"""
Plot the CUSUM of squares statistic and significance bounds.
Parameters
----------
alpha : float, optional
The plotted significance bounds are alpha %.
legend_loc : string, optional
The location of the legend in the plot. Default is upper left.
fig : Matplotlib Figure instance, optional
If given, subplots are created in this figure instead of in a new
figure. Note that the grid will be created in the provided
figure using `fig.add_subplot()`.
figsize : tuple, optional
If a figure is created, this argument allows specifying a size.
The tuple is (width, height).
Notes
-----
Evidence of parameter instability may be found if the CUSUM of squares
statistic moves out of the significance bounds.
Critical values used in creating the significance bounds are computed
using the approximate formula of [2]_.
References
----------
.. [1] Brown, R. L., J. Durbin, and J. M. Evans. 1975.
"Techniques for Testing the Constancy of
Regression Relationships over Time."
Journal of the Royal Statistical Society.
Series B (Methodological) 37 (2): 149-92.
.. [2] Edgerton, David, and Curt Wells. 1994.
"Critical Values for the Cusumsq Statistic
in Medium and Large Sized Samples."
Oxford Bulletin of Economics and Statistics 56 (3): 355-65.
"""
# Create the plot
from statsmodels.graphics.utils import _import_mpl, create_mpl_fig
plt = _import_mpl()
fig = create_mpl_fig(fig, figsize)
ax = fig.add_subplot(1, 1, 1)
# Get dates, if applicable
if hasattr(self.data, 'dates') and self.data.dates is not None:
dates = self.data.dates._mpl_repr()
else:
dates = np.arange(self.nobs)
llb = self.loglikelihood_burn
# Plot cusum series and reference line
ax.plot(dates[llb:], self.cusum_squares, label='CUSUM of squares')
ref_line = (np.arange(llb, self.nobs) - llb) / (self.nobs - llb)
ax.plot(dates[llb:], ref_line, 'k', alpha=0.3)
# Plot significance bounds
lower_line, upper_line = self._cusum_squares_significance_bounds(alpha)
ax.plot([dates[llb], dates[-1]], upper_line, 'k--',
label='%d%% significance' % (alpha * 100))
ax.plot([dates[llb], dates[-1]], lower_line, 'k--')
ax.legend(loc=legend_loc)
return fig
class RecursiveLSResultsWrapper(MLEResultsWrapper):
_attrs = {}
_wrap_attrs = wrap.union_dicts(MLEResultsWrapper._wrap_attrs,
_attrs)
_methods = {}
_wrap_methods = wrap.union_dicts(MLEResultsWrapper._wrap_methods,
_methods)
wrap.populate_wrapper(RecursiveLSResultsWrapper, RecursiveLSResults)
| bsd-3-clause | -7,399,047,095,831,074,000 | 35.573888 | 79 | 0.574494 | false |
nuxeh/morph | morphlib/buildcommand.py | 1 | 22770 | # Copyright (C) 2011-2015 Codethink Limited
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
import itertools
import os
import shutil
import logging
import tempfile
import datetime
import morphlib
import distbuild
class MultipleRootArtifactsError(morphlib.Error):
def __init__(self, artifacts):
self.msg = ('System build has multiple root artifacts: %r'
% [a.name for a in artifacts])
self.artifacts = artifacts
class BuildCommand(object):
'''High level logic for building.
This controls how the whole build process goes. This is a separate
class to enable easy experimentation of different approaches to
the various parts of the process.
'''
def __init__(self, app, build_env = None):
self.supports_local_build = True
self.app = app
self.lac, self.rac = self.new_artifact_caches()
self.lrc, self.rrc = self.new_repo_caches()
def build(self, repo_name, ref, filename, original_ref=None):
'''Build a given system morphology.'''
self.app.status(
msg='Building %(repo_name)s %(ref)s %(filename)s',
repo_name=repo_name, ref=ref, filename=filename)
self.app.status(msg='Deciding on task order')
srcpool = self.create_source_pool(
repo_name, ref, filename, original_ref)
self.validate_sources(srcpool)
root_artifact = self.resolve_artifacts(srcpool)
self.build_in_order(root_artifact)
self.app.status(
msg='Build of %(repo_name)s %(ref)s %(filename)s ended '
'successfully',
repo_name=repo_name, ref=ref, filename=filename)
def new_artifact_caches(self):
'''Create interfaces for the build artifact caches.
This includes creating the directories on disk if they are missing.
'''
return morphlib.util.new_artifact_caches(self.app.settings)
def new_repo_caches(self):
return morphlib.util.new_repo_caches(self.app)
def new_build_env(self, arch):
'''Create a new BuildEnvironment instance.'''
return morphlib.buildenvironment.BuildEnvironment(self.app.settings,
arch)
def create_source_pool(self, repo_name, ref, filename, original_ref=None):
'''Find the source objects required for building a the given artifact
The SourcePool will contain every stratum and chunk dependency of the
given artifact (which must be a system) but will not take into account
any Git submodules which are required in the build.
'''
self.app.status(msg='Creating source pool', chatty=True)
srcpool = morphlib.sourceresolver.create_source_pool(
self.lrc, self.rrc, repo_name, ref, filename,
cachedir=self.app.settings['cachedir'],
original_ref=original_ref,
update_repos=not self.app.settings['no-git-update'],
status_cb=self.app.status)
return srcpool
def validate_sources(self, srcpool):
self.app.status(
msg='Validating cross-morphology references', chatty=True)
self._validate_cross_morphology_references(srcpool)
self.app.status(msg='Validating for there being non-bootstrap chunks',
chatty=True)
self._validate_has_non_bootstrap_chunks(srcpool)
def _validate_root_artifact(self, root_artifact):
self._validate_root_kind(root_artifact)
self._validate_architecture(root_artifact)
@staticmethod
def _validate_root_kind(root_artifact):
root_kind = root_artifact.source.morphology['kind']
if root_kind != 'system':
raise morphlib.Error(
'Building a %s directly is not supported' % root_kind)
def _validate_architecture(self, root_artifact):
'''Perform the validation between root and target architectures.'''
root_arch = root_artifact.source.morphology['arch']
host_arch = morphlib.util.get_host_architecture()
if root_arch == host_arch:
return
# Since the armv8 instruction set is nearly entirely armv7 compatible,
# and since the incompatibilities are appropriately trapped in the
# kernel, we can safely run any armv7 toolchain natively on armv8.
if host_arch == 'armv8l' and root_arch in ('armv7l', 'armv7lhf'):
return
if host_arch == 'armv8b' and root_arch in ('armv7b', 'armv7bhf'):
return
raise morphlib.Error(
'Are you trying to cross-build? Host architecture is %s but '
'target is %s' % (host_arch, root_arch))
@staticmethod
def _validate_has_non_bootstrap_chunks(srcpool):
stratum_sources = [src for src in srcpool
if src.morphology['kind'] == 'stratum']
# any will return true for an empty iterable, which will give
# a false positive when there are no strata.
# This is an error by itself, but the source of this error can
# be better diagnosed later, so we abort validating here.
if not stratum_sources:
return
if not any(spec.get('build-mode', 'staging') != 'bootstrap'
for src in stratum_sources
for spec in src.morphology['chunks']):
raise morphlib.Error('No non-bootstrap chunks found.')
def _compute_cache_keys(self, root_artifact):
arch = root_artifact.source.morphology['arch']
self.app.status(msg='Creating build environment for %(arch)s',
arch=arch, chatty=True)
build_env = self.new_build_env(arch)
self.app.status(msg='Computing cache keys', chatty=True)
ckc = morphlib.cachekeycomputer.CacheKeyComputer(build_env)
for source in set(a.source for a in root_artifact.walk()):
source.cache_key = ckc.compute_key(source)
source.cache_id = ckc.get_cache_id(source)
root_artifact.build_env = build_env
def resolve_artifacts(self, srcpool):
'''Resolve the artifacts that will be built for a set of sources'''
self.app.status(msg='Creating artifact resolver', chatty=True)
ar = morphlib.artifactresolver.ArtifactResolver()
self.app.status(msg='Resolving artifacts', chatty=True)
root_artifacts = ar.resolve_root_artifacts(srcpool)
if len(root_artifacts) > 1:
# Validate root artifacts to give a more useful error message
for root_artifact in root_artifacts:
self._validate_root_artifact(root_artifact)
raise MultipleRootArtifactsError(root_artifacts)
root_artifact = root_artifacts[0]
self.app.status(msg='Validating root artifact', chatty=True)
self._validate_root_artifact(root_artifact)
self._compute_cache_keys(root_artifact)
return root_artifact
def _validate_cross_morphology_references(self, srcpool):
'''Perform validation across all morphologies involved in the build'''
stratum_names = {}
for src in srcpool:
kind = src.morphology['kind']
# Verify that chunks pointed to by strata really are chunks, etc.
method_name = '_validate_cross_refs_for_%s' % kind
if hasattr(self, method_name):
logging.debug('Calling %s' % method_name)
getattr(self, method_name)(src, srcpool)
else:
logging.warning('No %s' % method_name)
# Verify stratum build-depends agree with the system's contents.
# It is permissible for a stratum to build-depend on a stratum that
# isn't specified in the target system morphology.
# Multiple references to the same stratum are permitted. This is
# handled by the SourcePool deduplicating added Sources.
# It is forbidden to have two different strata with the same name.
# Hence if a Stratum is defined in the System, and in a Stratum as
# a build-dependency, then they must both have the same Repository
# and Ref specified.
if src.morphology['kind'] == 'stratum':
name = src.name
if name in stratum_names:
raise morphlib.Error(
"Multiple strata produce a '%s' artifact: %s and %s" %
(name, stratum_names[name].filename, src.filename))
stratum_names[name] = src
def _validate_cross_refs_for_system(self, src, srcpool):
self._validate_cross_refs_for_xxx(
src, srcpool, src.morphology['strata'], 'stratum')
def _validate_cross_refs_for_stratum(self, src, srcpool):
self._validate_cross_refs_for_xxx(
src, srcpool, src.morphology['chunks'], 'chunk')
def _validate_cross_refs_for_xxx(self, src, srcpool, specs, wanted):
for spec in specs:
repo_name = spec.get('repo') or src.repo_name
ref = spec.get('ref') or src.original_ref
filename = morphlib.util.sanitise_morphology_path(
spec.get('morph', spec.get('name')))
logging.debug(
'Validating cross ref to %s:%s:%s' %
(repo_name, ref, filename))
for other in srcpool.lookup(repo_name, ref, filename):
if other.morphology['kind'] != wanted:
raise morphlib.Error(
'%s %s references %s:%s:%s which is a %s, '
'instead of a %s' %
(src.morphology['kind'],
src.name,
repo_name,
ref,
filename,
other.morphology['kind'],
wanted))
@staticmethod
def get_ordered_sources(artifacts):
ordered_sources = []
known_sources = set()
for artifact in artifacts:
if artifact.source not in known_sources:
known_sources.add(artifact.source)
yield artifact.source
def build_in_order(self, root_artifact):
'''Build everything specified in a build order.'''
self.app.status(msg='Building a set of sources')
build_env = root_artifact.build_env
ordered_sources = list(self.get_ordered_sources(root_artifact.walk()))
old_prefix = self.app.status_prefix
for i, s in enumerate(ordered_sources):
self.app.status_prefix = (
old_prefix + '[Build %(index)d/%(total)d] [%(name)s] ' % {
'index': (i+1),
'total': len(ordered_sources),
'name': s.name,
})
self.cache_or_build_source(s, build_env)
self.app.status_prefix = old_prefix
def cache_or_build_source(self, source, build_env):
'''Make artifacts of the built source available in the local cache.
This can be done by retrieving from a remote artifact cache, or if
that doesn't work for some reason, by building the source locally.
'''
artifacts = source.artifacts.values()
if self.rac is not None:
try:
self.cache_artifacts_locally(artifacts)
except morphlib.remoteartifactcache.GetError:
# Error is logged by the RemoteArtifactCache object.
pass
if any(not self.lac.has(artifact) for artifact in artifacts):
self.build_source(source, build_env)
for a in artifacts:
self.app.status(msg='%(kind)s %(name)s is cached at %(cachepath)s',
kind=source.morphology['kind'], name=a.name,
cachepath=self.lac.artifact_filename(a),
chatty=(source.morphology['kind'] != "system"))
def build_source(self, source, build_env):
'''Build all artifacts for one source.
All the dependencies are assumed to be built and available
in either the local or remote cache already.
'''
starttime = datetime.datetime.now()
self.app.status(msg='Building %(kind)s %(name)s',
name=source.name,
kind=source.morphology['kind'])
self.fetch_sources(source)
# TODO: Make an artifact.walk() that takes multiple root artifacts.
# as this does a walk for every artifact. This was the status
# quo before build logic was made to work per-source, but we can
# now do better.
deps = self.get_recursive_deps(source.artifacts.values())
self.cache_artifacts_locally(deps)
use_chroot = False
setup_mounts = False
if source.morphology['kind'] == 'chunk':
build_mode = source.build_mode
extra_env = {'PREFIX': source.prefix}
dep_prefix_set = set(a.source.prefix for a in deps
if a.source.morphology['kind'] == 'chunk')
extra_path = [os.path.join(d, 'bin') for d in dep_prefix_set]
if build_mode not in ['bootstrap', 'staging', 'test']:
logging.warning('Unknown build mode %s for chunk %s. '
'Defaulting to staging mode.' %
(build_mode, artifact.name))
build_mode = 'staging'
if build_mode == 'staging':
use_chroot = True
setup_mounts = True
staging_area = self.create_staging_area(build_env,
use_chroot,
extra_env=extra_env,
extra_path=extra_path)
try:
self.install_dependencies(staging_area, deps, source)
except BaseException:
staging_area.abort()
raise
else:
staging_area = self.create_staging_area(build_env, False)
self.build_and_cache(staging_area, source, setup_mounts)
self.remove_staging_area(staging_area)
td = datetime.datetime.now() - starttime
hours, remainder = divmod(int(td.total_seconds()), 60*60)
minutes, seconds = divmod(remainder, 60)
td_string = "%02d:%02d:%02d" % (hours, minutes, seconds)
self.app.status(msg="Elapsed time %(duration)s", duration=td_string)
def get_recursive_deps(self, artifacts):
deps = set()
ordered_deps = []
for artifact in artifacts:
for dep in artifact.walk():
if dep not in deps and dep not in artifacts:
deps.add(dep)
ordered_deps.append(dep)
return ordered_deps
def fetch_sources(self, source):
'''Update the local git repository cache with the sources.'''
repo_name = source.repo_name
source.repo = self.lrc.get_updated_repo(repo_name, ref=source.sha1)
self.lrc.ensure_submodules(source.repo, source.sha1)
def cache_artifacts_locally(self, artifacts):
'''Get artifacts missing from local cache from remote cache.'''
def fetch_files(to_fetch):
'''Fetch a set of files atomically.
If an error occurs during the transfer of any files, all downloaded
data is deleted, to ensure integrity of the local cache.
'''
try:
for remote, local in to_fetch:
shutil.copyfileobj(remote, local)
except BaseException:
for remote, local in to_fetch:
local.abort()
raise
else:
for remote, local in to_fetch:
remote.close()
local.close()
for artifact in artifacts:
# This block should fetch all artifact files in one go, using the
# 1.0/artifacts method of morph-cache-server. The code to do that
# needs bringing in from the distbuild.worker_build_connection
# module into morphlib.remoteartififactcache first.
to_fetch = []
if not self.lac.has(artifact):
to_fetch.append((self.rac.get(artifact),
self.lac.put(artifact)))
if artifact.source.morphology.needs_artifact_metadata_cached:
if not self.lac.has_artifact_metadata(artifact, 'meta'):
to_fetch.append((
self.rac.get_artifact_metadata(artifact, 'meta'),
self.lac.put_artifact_metadata(artifact, 'meta')))
if len(to_fetch) > 0:
self.app.status(
msg='Fetching to local cache: artifact %(name)s',
name=artifact.name)
fetch_files(to_fetch)
def create_staging_area(self, build_env, use_chroot=True, extra_env={},
extra_path=[]):
'''Create the staging area for building a single artifact.'''
self.app.status(msg='Creating staging area')
staging_dir = tempfile.mkdtemp(
dir=os.path.join(self.app.settings['tempdir'], 'staging'))
staging_area = morphlib.stagingarea.StagingArea(
self.app, staging_dir, build_env, use_chroot, extra_env,
extra_path)
return staging_area
def remove_staging_area(self, staging_area):
'''Remove the staging area.'''
self.app.status(msg='Removing staging area')
staging_area.remove()
# Nasty hack to avoid installing chunks built in 'bootstrap' mode in a
# different stratum when constructing staging areas.
# TODO: make nicer by having chunk morphs keep a reference to the
# stratum they were in
def in_same_stratum(self, s1, s2):
'''Checks whether two chunk sources are from the same stratum.
In the absence of morphologies tracking where they came from,
this checks whether both sources are depended on by artifacts
that belong to sources which have the same morphology.
'''
def dependent_stratum_morphs(source):
dependents = set(itertools.chain.from_iterable(
a.dependents for a in source.artifacts.itervalues()))
dependent_strata = set(s for s in dependents
if s.morphology['kind'] == 'stratum')
return set(s.morphology for s in dependent_strata)
return dependent_stratum_morphs(s1) == dependent_stratum_morphs(s2)
def install_dependencies(self, staging_area, artifacts, target_source):
'''Install chunk artifacts into staging area.
We only ever care about chunk artifacts as build dependencies,
so this is not a generic artifact installer into staging area.
Any non-chunk artifacts are silently ignored.
All artifacts MUST be in the local artifact cache already.
'''
for artifact in artifacts:
if artifact.source.morphology['kind'] != 'chunk':
continue
if artifact.source.build_mode == 'bootstrap':
if not self.in_same_stratum(artifact.source, target_source):
continue
self.app.status(
msg='Installing chunk %(chunk_name)s from cache %(cache)s',
chunk_name=artifact.name,
cache=artifact.source.cache_key[:7],
chatty=True)
handle = self.lac.get(artifact)
staging_area.install_artifact(handle)
if target_source.build_mode == 'staging':
morphlib.builder.ldconfig(self.app.runcmd, staging_area.dirname)
def build_and_cache(self, staging_area, source, setup_mounts):
'''Build a source and put its artifacts into the local cache.'''
self.app.status(msg='Starting actual build: %(name)s '
'%(sha1)s',
name=source.name, sha1=source.sha1[:7])
builder = morphlib.builder.Builder(
self.app, staging_area, self.lac, self.rac, self.lrc,
self.app.settings['max-jobs'], setup_mounts)
return builder.build_and_cache(source)
class InitiatorBuildCommand(BuildCommand):
RECONNECT_INTERVAL = 30 # seconds
MAX_RETRIES = 1
def __init__(self, app, addr, port):
self.app = app
self.addr = addr
self.port = port
self.app.settings['push-build-branches'] = True
super(InitiatorBuildCommand, self).__init__(app)
def build(self, repo_name, ref, filename, original_ref=None):
'''Initiate a distributed build on a controller'''
distbuild.add_crash_conditions(self.app.settings['crash-condition'])
if self.addr == '':
raise morphlib.Error(
'Need address of controller to run a distbuild')
self.app.status(msg='Starting distributed build')
loop = distbuild.MainLoop()
args = [repo_name, ref, filename, original_ref or ref]
cm = distbuild.InitiatorConnectionMachine(self.app,
self.addr,
self.port,
distbuild.Initiator,
[self.app] + args,
self.RECONNECT_INTERVAL,
self.MAX_RETRIES)
loop.add_state_machine(cm)
try:
loop.run()
except KeyboardInterrupt:
# This will run if the user presses Ctrl+C or sends SIGINT during
# the build. It won't trigger on SIGTERM, SIGKILL or unhandled
# Python exceptions.
logging.info('Received KeyboardInterrupt, aborting.')
for initiator in loop.state_machines_of_type(distbuild.Initiator):
initiator.handle_cancel()
| gpl-2.0 | 7,502,372,622,402,206,000 | 40.101083 | 79 | 0.584629 | false |
ufal/neuralmonkey | neuralmonkey/model/feedable.py | 1 | 2178 | from abc import ABCMeta
from typing import Any, Dict, List
# pylint: disable=unused-import
from typing import Optional
# pylint: enable=unused-import
import tensorflow as tf
from neuralmonkey.dataset import Dataset
# pylint: disable=invalid-name
FeedDict = Dict[tf.Tensor, Any]
# pylint: enable=invalid-name
class Feedable(metaclass=ABCMeta):
"""Base class for feedable model parts.
In TensorFlow, data is provided to the model using placeholders. Neural
Monkey abstraction objects, such as encoders or decoders, can be members of
this class in order to be able to receive data inputs from the framework.
All feedable objects have a `feed_dict` method, which gets the current
dataset and returns a `FeedDict` dictionary which assigns values to
symbolic placeholders.
Additionally, each Feedable object has two placeholders which are fed
automatically in this super class - `batch_size` and `train_mode`.
"""
def __init__(self) -> None:
self.train_mode = tf.placeholder(tf.bool, [], "train_mode")
self.batch_size = tf.placeholder(tf.int32, [], "batch_size")
self._dataset = None # type: Optional[Dict[str, tf.Tensor]]
def feed_dict(self, dataset: Dataset, train: bool = True) -> FeedDict:
"""Return a feed dictionary for the given feedable object.
Arguments:
dataset: A dataset instance from which to get the data.
train: Boolean indicating whether the model runs in training mode.
Returns:
A `FeedDict` dictionary object.
"""
fd = {} # type: FeedDict
fd[self.train_mode] = train
fd[self.batch_size] = len(dataset)
return fd
@property
def input_types(self) -> Dict[str, tf.DType]:
return {}
@property
def input_shapes(self) -> Dict[str, List[int]]:
return {}
@property
def dataset(self) -> Dict[str, tf.Tensor]:
if self._dataset is None:
raise RuntimeError("Getting dataset before registering it.")
return self._dataset
def register_input(self, dataset: Dict[str, tf.Tensor]) -> None:
self._dataset = dataset
| bsd-3-clause | -153,059,823,488,650,460 | 32 | 79 | 0.664371 | false |
shaunster0/object_recognition_service | recognition_server/recognition_server.py | 1 | 8875 | # -*- coding: utf-8 -*-
"""
Created on Sat Aug 12 18:08:52 2017
@author: Shaun Werkhoven
@purpose: To create a image classification system for the Image Intelligence
TakeHome Assignment
Licensed under the Apache License, Version 2.0 (the "License");
Simple image classification with flask-based HTTP API, TensorFlow
and the Inception model (trained on ImageNet 2012 Challenge data
set).
The server maintains a list of images, with URLs, on which image inference can
be run, or has been run. It is a list of tasks to do, or that
have been done. Functions to add, delete or run inference on images are given
as HTTP addresses, using JSON arguments.
This program creates a graph from a saved GraphDef protocol buffer,
and runs inference on an input JPEG, GIF or PNG image. It outputs human readable
strings of the top 5 predictions along with their probabilities.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import json
from flask import Flask, jsonify, abort, make_response, request
from flask_httpauth import HTTPBasicAuth
try:
from recognition_server import tf_operations
except:
import tf_operations
# set up HTTP app
auth = HTTPBasicAuth()
app = Flask(__name__)
# initialise image list with some random images, not strictly necessary
images = [
{
'id': 1,
'title': u'Nikes',
'url': 'http://imgdirect.s3-website-us-west-2.amazonaws.com/nike.jpg',
'results': '',
'resize': False,
'size': ""
},
{
'id': 2,
'title': u'Altra',
'url': 'https://s3-us-west-2.amazonaws.com/imgdirect/altra.jpg',
'results': '',
'resize': False,
'size': ""
}
]
# set up some HTTP error handlers
@auth.error_handler
def unauthorized():
return make_response(jsonify({'error': 'unauthorized access'}), 403)
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({'error': 'not found'}), 404)
@app.errorhandler(400)
def bad_request(error):
return make_response(jsonify({'error': 'missing json data'}), 400)
@app.errorhandler(410)
def missing_URL(error):
return make_response(jsonify({'error': 'missing URL field'}), 410)
# first API function, can be used for testing
@app.route('/')
@app.route('/index')
def index():
"""returns Hello, World!"""
return "Hello, World!"
# test string
# curl -i http://127.0.0.1:5000/img/api/v1.0/images
@app.route('/img/api/v1.0/images', methods=['GET'])
#@auth.login_required
def get_imgs():
"""
returns in JSON format all the images currently stored by the server.
Includes all fields, such as ID, and URL
"""
return jsonify({'images': images})
# test String
# curl -i http://127.0.0.1:5000/img/api/v1.0/images/2
@app.route('/img/api/v1.0/images/<int:img_id>', methods = ['GET'])
#@auth.login_required
def get_img(img_id):
"""
returns in JSON format a specific image currently stored by the server.
Requires the image ID to be included in the HTTP address
"""
img = [img for img in images if img['id'] == img_id]
if len(img) == 0:
abort(404)
return jsonify({'img': img[0]})
# test String
# curl -i -H "Content-Type: application/json" -X POST -d '{"url":"http://imgdirect.s3-website-us-west-2.amazonaws.com/neither.jpg"}' http://127.0.0.1:5000/img/api/v1.0/images
@app.route('/img/api/v1.0/images', methods = ['POST'])
#@auth.login_required
def add_imgs():
"""
adds images to the server image list. The images must be provided as a list
encoded with JSON and sent with the HTTP post. A URL is required. Inference
is not automatically run on them.
"""
if not request.json:
abort(400)
missing_url = False
json_str = request.json
img_data = json_str['new_imgs']
new_images = []
for img in img_data:
# URL is required, other fields not
if img.get('url') == None:
missing_url = True
continue
if img.get('title') == None:
new_title = ""
else:
new_title = img.get('title')
if img.get('results') == None:
new_results = ""
else:
new_results = img.get('results')
image = {
# simple way to ensure a unique id
'id' : images[-1]['id'] + 1,
'title': new_title,
# url is required, otherwise return error
'url': img['url'],
'results': new_results,
'resize': False,
'size': ""
}
# add new image records to image list
images.append(image)
new_images.append(image)
if missing_url:
return_val = jsonify(new_images), 410
else:
return_val = jsonify(new_images), 201
return return_val
# test string
# curl -X PUT -i -H "Content-Type: application/json" -d '{ \"id\": \"1\"}' http://127.0.0.1:5000/img/api/v1.0/infer/1
@app.route('/img/api/v1.0/infer/<int:img_id>', methods = ['PUT'])
#@auth.login_required
def infer(img_id):
"""
runs TensorFlow inference (recognition) on an image which is already in the images
list. The image ID must be included in the HTTP address and encoded with JSON.
Results are returned in JSON
"""
img = [img for img in images if img['id'] == img_id]
if len(img) == 0:
abort(404)
if not request.json:
abort(400)
url = img[0]['url']
# call TensorFlow
img[0]['results'] = tf_operations.run_inference_on_image(url)
return jsonify({'img': img[0]}), 200
# test string
# curl -X PUT -i http://127.0.0.1:5000/img/api/v1.0/inferundone
# calls TensorFlow, so can be slow if many images are undone
@app.route('/img/api/v1.0/inferundone', methods = ['PUT'])
#@auth.login_required
def infer_undone():
"""
runs TensorFlow inference (recognition) on all images which are in the images
list but for which inference has not already been run. Results are returned in JSON
"""
undone_imgs = [img for img in images if img['results'] == '']
if len(undone_imgs) == 0:
abort(404)
for img in undone_imgs:
# call TensorFlow
img['results'] = tf_operations.run_inference_on_image(img['url'])
return jsonify({'images': undone_imgs}), 200
# test String
# curl -i -H "Content-Type: application/json" -X POST -d '{"url":"http://imgdirect.s3-website-us-west-2.amazonaws.com/neither.jpg"}' http://127.0.0.1:5000/img/api/v1.0/imagesinfer
# another TensorFlow function, again can be slow if many images are added
@app.route('/img/api/v1.0/imagesinfer', methods = ['POST'])
#@auth.login_required
def add_imgs_infer():
"""
adds new images to the image list and runs TensorFlow inference (recognition)
on them. New images must be provided with a URL, and given in JSON format.
Results are returned in JSON format.
"""
if not request.json:
abort(400)
missing_url = False
json_str = request.json
img_data = json_str['new_imgs']
new_images = []
for img in img_data:
# URL is required, other fields not
if img.get('url') == None:
missing_url = True
continue
if img.get('title') == None:
new_title = ""
else:
new_title = img.get('title')
# call TensorFlow
new_results = tf_operations.run_inference_on_image(img['url'])
image = {
# simple way to ensure a unique id
'id' : images[-1]['id'] + 1,
'title': new_title,
# url is required, otherwise return error
'url': img['url'],
'results': new_results,
'resize': False,
'size': ""
}
images.append(image)
new_images.append(image)
if missing_url:
return_val = jsonify(new_images), 410
else:
return_val = jsonify(new_images), 201
return return_val
# test String
# curl -i -H "Content-Type: application/json" -X DELETE http://127.0.0.1:5000/img/api/v1.0/images/3
@app.route('/img/api/v1.0/images/<int:img_id>', methods=['DELETE'])
#@auth.login_required
def delete_img(img_id):
"""
deletes an image from the server image list. The image ID must be given in the HTTP
address
"""
img = [img for img in images if img['id'] == img_id]
if len(img) == 0:
abort(404)
images.remove(img[0])
return jsonify({'result': True})
def main(_):
tf_operations.parse_args()
# checks if model data is downloaded. If not, does that
tf_operations.download_and_extract_model_if_needed()
app.run(host = '0.0.0.0')
if __name__ == '__main__':
tf_operations.tf.app.run(main = main, argv = [sys.argv[0]])
| apache-2.0 | -1,915,504,158,426,327,300 | 28.986486 | 179 | 0.61262 | false |
Copper-Head/RoboSanta | clingo_stats.py | 1 | 13466 |
#!/usr/bin/python
import os
import sys
import clingo
import json
#
# STATS
#
class Stats:
def __init__(self):
self.__width = 13
def __ratio(self,x,y):
return float(x)/float(y) if float(y)!=0 else 0
def __percent(self,x,y):
return 100*self.__ratio(x,y)
def __print_key(self,key):
return key + " "*(self.__width-len(key)) + ": "
def __print_key_value(self,key,value):
return self.__print_key(key) + value
# requires Control initialized with --stats
def summary(self,control,models=True):
out = ""
summary = control.statistics['summary']
moreStr = "+" if int(summary['exhausted'])==0 else ""
numEnum = int(summary['models']['enumerated'])
if models:
out += self.__print_key("Models")
out += "{}{}\n".format(numEnum,moreStr)
step = int(summary['call'])
out += self.__print_key_value("Calls","{}\n".format(step+1))
# return out if no stats
if not 'accu' in control.statistics: return out
times = control.statistics['accu']['times']
out += self.__print_key("Time")
totalTime = float(times['total'])
solveTime = float(times['solve'])
satTime = float(times['sat'])
unsatTime = float(times['unsat'])
cpuTime = float(times['cpu'])
out += "{:.3f}s (Solving: {:.2f}s 1st Model: {:.2f}s Unsat: {:.2f}s)\n".format(totalTime,solveTime,satTime,unsatTime)
out += self.__print_key_value("CPU Time","{:.3f}s".format(cpuTime))
concurrency = int(summary['concurrency'])
if concurrency > 1:
out += "\n" + self.__print_key_value("Threads","{:<8}".format(concurrency))
# when winner info becomes available: " (Winner: {})\n".format(winner)
return out
# requires Control initialized with --stats
def statistics(self,control):
# return "" if no stats
if not 'accu' in control.statistics: return ""
# choices...
solver = control.statistics['accu']['solving']['solvers']
extra = solver['extra']
choices = int(solver['choices'])
domChoices = int(extra['domain_choices'])
conflicts = int(solver['conflicts'])
backjumps = int(solver['conflicts_analyzed'])
restarts = int(solver['restarts'])
avgRestart = self.__ratio(backjumps,restarts)
lastRestart = int(solver['restarts_last'])
out = "\n" + self.__print_key_value("Choices","{:<8}".format(choices))
if domChoices: out += " (Domain: {})".format(domChoices)
out += "\n"
out += self.__print_key_value("Conflicts","{:<8}".format(conflicts))
out += " (Analyzed: {})\n".format(backjumps)
out += self.__print_key_value("Restarts","{:<8}".format(restarts))
if restarts>0: out += " (Average: {:.2f} Last: {})".format(avgRestart,lastRestart)
out += "\n"
# hccs
hccTests = int(extra['hcc_tests'])
hccPartial = int(extra['hcc_partial'])
if hccTests:
out += self.__print_key_value("Stab. Tests","{:<8}".format(hccTests))
out += " (Full: {} Partial: {})\n".format(hccTests-hccPartial,hccPartial)
# model level
models = extra['models']
modelLits = extra['models_level']
avgModel = self.__ratio(modelLits,models)
if models:
out += self.__print_key_value("Model-Level","{:<8.1f}\n".format(avgModel))
# lemmas
gps = int(extra['guiding_paths'])
gpLits = int(extra['guiding_paths_lits'])
avgGp = self.__ratio(gpLits, gps)
splits = int(extra['splits'])
sum = int(extra['lemmas'])
deleted = int(extra['lemmas_deleted'])
binary = int(extra['lemmas_binary'])
ternary = int(extra['lemmas_ternary'])
conflict = int(extra['lemmas_conflict'])
loop = int(extra['lemmas_loop'])
other = int(extra['lemmas_other'])
lits_conflict = int(extra['lits_conflict'])
lits_loop = int(extra['lits_loop'])
lits_other = int(extra['lits_other'])
out += self.__print_key_value("Problems","{:<8}".format(gps))
out += " (Average Length: {:.2f} Splits: {})\n".format(avgGp,splits)
out += self.__print_key_value("Lemmas","{:<8}".format(sum))
out += " (Deleted: {})\n".format(deleted)
out += self.__print_key_value(" Binary","{:<8}".format(binary))
out += " (Ratio: {:6.2f}%)\n".format(self.__percent(binary,sum))
out += self.__print_key_value(" Ternary","{:<8}".format(ternary))
out += " (Ratio: {:6.2f}%)\n".format(self.__percent(ternary,sum))
out += self.__print_key_value(" Conflict","{:<8}".format(conflict))
out += " (Average Length: {:6.1f} Ratio: {:6.2f}%) \n".format(self.__ratio(lits_conflict,conflict),self.__percent(conflict,sum))
out += self.__print_key_value(" Loop","{:<8}".format(loop))
out += " (Average Length: {:6.1f} Ratio: {:6.2f}%) \n".format(self.__ratio(lits_loop,loop),self.__percent(loop,sum))
out += self.__print_key_value(" Other","{:<8}".format(other))
out += " (Average Length: {:6.1f} Ratio: {:6.2f}%) \n".format(self.__ratio(lits_other,other),self.__percent(other,sum))
# distributed...
distributed = int(extra['distributed'])
integrated = int(extra['integrated'])
if distributed or integrated:
distRatio = self.__ratio(distributed,conflict+loop)
sumDistLbd = int(extra['distributed_sum_lbd'])
avgDistLbd = self.__ratio(sumDistLbd,distributed)
intRatio = self.__ratio(integrated,distributed)
intImps = int(extra['integrated_imps'])
intJumps = int(extra['integrated_jumps'])
avgIntJump = self.__ratio(intJumps,intImps)
out += self.__print_key_value(" Distributed","{:<8}".format(distributed))
out += " (Ratio: {:6.2f}% Average LBD: {:.2f}) \n".format(distRatio*100.0,avgDistLbd)
out += self.__print_key_value(" Integrated","{:<8}".format(integrated))
out += " (Ratio: {:6.2f}% ".format(intRatio*100.0) # for not accu: if not _accu: "("
out += "Unit: {} Average Jumps: {:.2f})\n".format(intImps,avgIntJump)
# jumps
jumps = extra['jumps']
_jumps = int(jumps['jumps'])
bounded = int(jumps['jumps_bounded'])
jumpSum = int(jumps['levels'])
boundSum = int(jumps['levels_bounded'])
maxJump = int(jumps['max'])
maxJumpEx = int(jumps['max_executed'])
maxBound = int(jumps['max_bounded'])
jumped = jumpSum - boundSum
jumpedRatio = self.__ratio(jumped,jumpSum)
avgBound = self.__ratio(boundSum,bounded)
avgJump = self.__ratio(jumpSum,_jumps)
avgJumpEx = self.__ratio(jumped,_jumps)
out += self.__print_key_value("Backjumps","{:<8}".format(_jumps))
out += " (Average: {:5.2f} Max: {:>3} Sum: {:>6})\n".format(avgJump,maxJump,jumpSum)
out += self.__print_key_value(" Executed","{:<8}".format(_jumps-bounded))
out += " (Average: {:5.2f} Max: {:>3} Sum: {:>6} Ratio: {:6.2f}%)\n".format(avgJumpEx,maxJumpEx,jumped,jumpedRatio*100.0)
out += self.__print_key_value(" Bounded","{:<8}".format(bounded))
out += " (Average: {:5.2f} Max: {:>3} Sum: {:>6} Ratio: {:6.2f}%)\n".format(avgBound,maxBound,boundSum,100.0 - (jumpedRatio*100.0))
out += "\n"
# logic program
lp = control.statistics['problem']['lp']
# rules
rOriginal = int(lp['rules'])
rules_normal = int(lp['rules_normal'])
rules_choice = int(lp['rules_choice'])
rules_minimize = int(lp['rules_minimize'])
rules_acyc = int(lp['rules_acyc'])
rules_heuristic = int(lp['rules_heuristic'])
rFinal = int(lp['rules_tr'])
rules_tr_normal = int(lp['rules_tr_normal'])
rules_tr_choice = int(lp['rules_tr_choice'])
rules_tr_minimize = int(lp['rules_tr_minimize'])
rules_tr_acyc = int(lp['rules_tr_acyc'])
rules_tr_heuristic = int(lp['rules_tr_heuristic'])
out += self.__print_key_value("Rules","{:<8}".format(rFinal))
if (rFinal != rOriginal):
out += " (Original: {})".format(rOriginal)
out += "\n"
for i in [#[" Normal", rules_normal, rules_tr_normal],
[" Choice", rules_choice, rules_tr_choice],
[" Minimize", rules_minimize, rules_tr_minimize],
[" Acyc", rules_acyc, rules_tr_acyc],
[" Heuristic", rules_heuristic, rules_tr_heuristic]]:
if i[2]:
out += self.__print_key_value(i[0],"{:<8}".format(i[2]))
if (i[2] != i[1]):
out += " (Original: {})".format(i[1])
out += "\n"
# atoms
atoms = int(lp['atoms'])
auxAtoms = int(lp['atoms_aux'])
out += self.__print_key_value("Atoms","{:<8}".format(atoms))
if (auxAtoms):
out += " (Original: {} Auxiliary: {})".format(atoms-auxAtoms,auxAtoms)
out += "\n"
# disjunctions
disjunctions = int(lp['disjunctions'])
disjunctions_tr = int(lp['disjunctions_non_hcf'])
if disjunctions:
out += self.__print_key_value("Disjunctions","{:<8}".format(disjunctions_tr))
out += " (Original: {})\n".format(disjunctions)
# bodies
bFinal = int(lp['bodies_tr'])
bOriginal = int(lp['bodies'])
count_bodies = int(lp['count_bodies'])
count_bodies_tr = int(lp['count_bodies_tr'])
sum_bodies = int(lp['sum_bodies'])
sum_bodies_tr = int(lp['sum_bodies_tr'])
out += self.__print_key_value("Bodies","{:<8}".format(bFinal))
if (bFinal != bOriginal):
out += " (Original: {})".format(bOriginal)
out += "\n"
for i in [[" Count", count_bodies, count_bodies_tr],
[" Sum", sum_bodies, sum_bodies_tr ]]:
if i[1]:
out += self.__print_key_value(i[0],"{:<8}".format(i[2]))
if (i[2] != i[1]):
out += " (Original: {})".format(i[1])
out += "\n"
# equivalences
eqs = int(lp['eqs'])
eqsAtom = int(lp['eqs_atom'])
eqsBody = int(lp['eqs_body'])
eqsOther = int(lp['eqs_other'])
if eqs > 0:
out += self.__print_key_value("Equivalences","{:<8}".format(eqs))
out += " (Atom=Atom: {} Body=Body: {} Other: {})\n".format(eqsAtom,eqsBody,eqsOther)
# sccs
sccs = int(lp['sccs'])
nonHcfs = int(lp['sccs_non_hcf'])
ufsNodes = int(lp['ufs_nodes'])
gammas = int(lp['gammas'])
out += self.__print_key("Tight")
if sccs==0: out += "Yes"
# for supported models: elif sccs == PrgNode:noScc
else: out += "{:<8} (SCCs: {} Non-Hcfs: {} Nodes: {} Gammas: {})".format("No",sccs,nonHcfs,ufsNodes,gammas)
out += "\n"
# problem
gen = control.statistics['problem']['generator']
vars = int(gen['vars'])
eliminated = int(gen['vars_eliminated'])
frozen = int(gen['vars_frozen'])
binary = int(gen['constraints_binary'])
ternary = int(gen['constraints_ternary'])
sum = int(gen['constraints']) + binary + ternary
acycEdges = int(gen['acyc_edges'])
out += self.__print_key_value("Variables","{:<8}".format(vars))
out += " (Eliminated: {:>4} Frozen: {:>4})\n".format(eliminated,frozen)
out += self.__print_key_value("Constraints","{:<8}".format(sum))
out += " (Binary: {:5.1f}% Ternary: {:5.1f}% Other: {:5.1f}%)\n".format(self.__percent(binary,sum),self.__percent(ternary,sum),self.__percent(sum-binary-ternary,sum))
return out
program = """
% pigeonhole problem
#const n=8.
pigeon(1..n+1). box(1..n).
1 { in(X,Y) : box(Y) } 1 :- pigeon(X).
:- 2 { in(X,Y) : pigeon(X) }, box(Y).
% heuristic
#heuristic in(X,Y) : pigeon(X), box(Y). [1,true]
% disjunction
a | b. a :- b. b :- a.
% SAT
box(n+1).
"""
satisfiable = False
def on_model(model):
global satisfiable
sys.stdout.write("Answer: 1\n{}\n".format(str(model)))
satisfiable = True
def run():
# set options
options = "-t4 --stats --heuristic=Domain"
#options = ""
# with Control()
control = clingo.Control(options.split())
control.add("a",[],program)
control.ground([("a",[])])
control.solve(on_model=on_model)
if satisfiable: sys.stdout.write("SATISFIABLE\n")
else: sys.stdout.write("UNSATISFIABLE\n")
sys.stdout.write(Stats().summary(control)+"\n")
sys.stdout.write(Stats().statistics(control)+"\n")
# with $clingo
file = "tmp.lp"
with open(file, "w") as text_file:
text_file.write(program)
os.system("clingo {} {}; rm {}".format(options,file,file))
if __name__ == "__main__":
run()
| mit | 4,832,443,471,218,957,000 | 40.180428 | 174 | 0.527551 | false |
nwjs/chromium.src | PRESUBMIT_test.py | 1 | 109037 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os.path
import subprocess
import unittest
import PRESUBMIT
from PRESUBMIT_test_mocks import MockFile, MockAffectedFile
from PRESUBMIT_test_mocks import MockInputApi, MockOutputApi
_TEST_DATA_DIR = 'base/test/data/presubmit'
class VersionControlConflictsTest(unittest.TestCase):
def testTypicalConflict(self):
lines = ['<<<<<<< HEAD',
' base::ScopedTempDir temp_dir_;',
'=======',
' ScopedTempDir temp_dir_;',
'>>>>>>> master']
errors = PRESUBMIT._CheckForVersionControlConflictsInFile(
MockInputApi(), MockFile('some/path/foo_platform.cc', lines))
self.assertEqual(3, len(errors))
self.assertTrue('1' in errors[0])
self.assertTrue('3' in errors[1])
self.assertTrue('5' in errors[2])
def testIgnoresReadmes(self):
lines = ['A First Level Header',
'====================',
'',
'A Second Level Header',
'---------------------']
errors = PRESUBMIT._CheckForVersionControlConflictsInFile(
MockInputApi(), MockFile('some/polymer/README.md', lines))
self.assertEqual(0, len(errors))
class UmaHistogramChangeMatchedOrNotTest(unittest.TestCase):
def testTypicalCorrectlyMatchedChange(self):
diff_cc = ['UMA_HISTOGRAM_BOOL("Bla.Foo.Dummy", true)']
diff_java = [
'RecordHistogram.recordBooleanHistogram("Bla.Foo.Dummy", true)']
diff_xml = ['<histogram name="Bla.Foo.Dummy"> </histogram>']
mock_input_api = MockInputApi()
mock_input_api.files = [
MockFile('some/path/foo.cc', diff_cc),
MockFile('some/path/foo.java', diff_java),
MockFile('tools/metrics/histograms/histograms.xml', diff_xml),
]
warnings = PRESUBMIT._CheckUmaHistogramChanges(mock_input_api,
MockOutputApi())
self.assertEqual(0, len(warnings))
def testTypicalNotMatchedChange(self):
diff_cc = ['UMA_HISTOGRAM_BOOL("Bla.Foo.Dummy", true)']
diff_java = [
'RecordHistogram.recordBooleanHistogram("Bla.Foo.Dummy", true)']
mock_input_api = MockInputApi()
mock_input_api.files = [
MockFile('some/path/foo.cc', diff_cc),
MockFile('some/path/foo.java', diff_java),
]
warnings = PRESUBMIT._CheckUmaHistogramChanges(mock_input_api,
MockOutputApi())
self.assertEqual(1, len(warnings))
self.assertEqual('warning', warnings[0].type)
self.assertTrue('foo.cc' in warnings[0].items[0])
self.assertTrue('foo.java' in warnings[0].items[1])
def testTypicalNotMatchedChangeViaSuffixes(self):
diff_cc = ['UMA_HISTOGRAM_BOOL("Bla.Foo.Dummy", true)']
diff_java = [
'RecordHistogram.recordBooleanHistogram("Bla.Foo.Dummy", true)']
diff_xml = ['<histogram_suffixes name="SuperHistogram">',
' <suffix name="Dummy"/>',
' <affected-histogram name="Snafu.Dummy"/>',
'</histogram>']
mock_input_api = MockInputApi()
mock_input_api.files = [
MockFile('some/path/foo.cc', diff_cc),
MockFile('some/path/foo.java', diff_java),
MockFile('tools/metrics/histograms/histograms.xml', diff_xml),
]
warnings = PRESUBMIT._CheckUmaHistogramChanges(mock_input_api,
MockOutputApi())
self.assertEqual(1, len(warnings))
self.assertEqual('warning', warnings[0].type)
self.assertTrue('foo.cc' in warnings[0].items[0])
self.assertTrue('foo.java' in warnings[0].items[1])
def testTypicalCorrectlyMatchedChangeViaSuffixes(self):
diff_cc = ['UMA_HISTOGRAM_BOOL("Bla.Foo.Dummy", true)']
diff_java = [
'RecordHistogram.recordBooleanHistogram("Bla.Foo.Dummy", true)']
diff_xml = ['<histogram_suffixes name="SuperHistogram">',
' <suffix name="Dummy"/>',
' <affected-histogram name="Bla.Foo"/>',
'</histogram>']
mock_input_api = MockInputApi()
mock_input_api.files = [
MockFile('some/path/foo.cc', diff_cc),
MockFile('some/path/foo.java', diff_java),
MockFile('tools/metrics/histograms/histograms.xml', diff_xml),
]
warnings = PRESUBMIT._CheckUmaHistogramChanges(mock_input_api,
MockOutputApi())
self.assertEqual(0, len(warnings))
def testTypicalCorrectlyMatchedChangeViaSuffixesWithSeparator(self):
diff_cc = ['UMA_HISTOGRAM_BOOL("Snafu_Dummy", true)']
diff_java = ['RecordHistogram.recordBooleanHistogram("Snafu_Dummy", true)']
diff_xml = ['<histogram_suffixes name="SuperHistogram" separator="_">',
' <suffix name="Dummy"/>',
' <affected-histogram name="Snafu"/>',
'</histogram>']
mock_input_api = MockInputApi()
mock_input_api.files = [
MockFile('some/path/foo.cc', diff_cc),
MockFile('some/path/foo.java', diff_java),
MockFile('tools/metrics/histograms/histograms.xml', diff_xml),
]
warnings = PRESUBMIT._CheckUmaHistogramChanges(mock_input_api,
MockOutputApi())
self.assertEqual(0, len(warnings))
def testCorrectlyMatchedChangeViaSuffixesWithLineWrapping(self):
diff_cc = [
'UMA_HISTOGRAM_BOOL("LongHistogramNameNeedsLineWrapping.Dummy", true)']
diff_java = ['RecordHistogram.recordBooleanHistogram(' +
'"LongHistogramNameNeedsLineWrapping.Dummy", true)']
diff_xml = ['<histogram_suffixes',
' name="LongHistogramNameNeedsLineWrapping"',
' separator=".">',
' <suffix name="Dummy"/>',
' <affected-histogram',
' name="LongHistogramNameNeedsLineWrapping"/>',
'</histogram>']
mock_input_api = MockInputApi()
mock_input_api.files = [
MockFile('some/path/foo.cc', diff_cc),
MockFile('some/path/foo.java', diff_java),
MockFile('tools/metrics/histograms/histograms.xml', diff_xml),
]
warnings = PRESUBMIT._CheckUmaHistogramChanges(mock_input_api,
MockOutputApi())
self.assertEqual(0, len(warnings))
def testNameMatch(self):
# Check that the detected histogram name is "Dummy" and not, e.g.,
# "Dummy\", true); // The \"correct"
diff_cc = ['UMA_HISTOGRAM_BOOL("Dummy", true); // The "correct" histogram']
diff_java = [
'RecordHistogram.recordBooleanHistogram("Dummy", true);' +
' // The "correct" histogram']
diff_xml = ['<histogram name="Dummy"> </histogram>']
mock_input_api = MockInputApi()
mock_input_api.files = [
MockFile('some/path/foo.cc', diff_cc),
MockFile('some/path/foo.java', diff_java),
MockFile('tools/metrics/histograms/histograms.xml', diff_xml),
]
warnings = PRESUBMIT._CheckUmaHistogramChanges(mock_input_api,
MockOutputApi())
self.assertEqual(0, len(warnings))
def testSimilarMacroNames(self):
diff_cc = ['PUMA_HISTOGRAM_COOL("Mountain Lion", 42)']
diff_java = [
'FakeRecordHistogram.recordFakeHistogram("Mountain Lion", 42)']
mock_input_api = MockInputApi()
mock_input_api.files = [
MockFile('some/path/foo.cc', diff_cc),
MockFile('some/path/foo.java', diff_java),
]
warnings = PRESUBMIT._CheckUmaHistogramChanges(mock_input_api,
MockOutputApi())
self.assertEqual(0, len(warnings))
def testMultiLine(self):
diff_cc = ['UMA_HISTOGRAM_BOOLEAN(', ' "Multi.Line", true)']
diff_cc2 = ['UMA_HISTOGRAM_BOOLEAN(', ' "Multi.Line"', ' , true)']
diff_java = [
'RecordHistogram.recordBooleanHistogram(',
' "Multi.Line", true);',
]
mock_input_api = MockInputApi()
mock_input_api.files = [
MockFile('some/path/foo.cc', diff_cc),
MockFile('some/path/foo2.cc', diff_cc2),
MockFile('some/path/foo.java', diff_java),
]
warnings = PRESUBMIT._CheckUmaHistogramChanges(mock_input_api,
MockOutputApi())
self.assertEqual(1, len(warnings))
self.assertEqual('warning', warnings[0].type)
self.assertTrue('foo.cc' in warnings[0].items[0])
self.assertTrue('foo2.cc' in warnings[0].items[1])
class BadExtensionsTest(unittest.TestCase):
def testBadRejFile(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockFile('some/path/foo.cc', ''),
MockFile('some/path/foo.cc.rej', ''),
MockFile('some/path2/bar.h.rej', ''),
]
results = PRESUBMIT._CheckPatchFiles(mock_input_api, MockOutputApi())
self.assertEqual(1, len(results))
self.assertEqual(2, len(results[0].items))
self.assertTrue('foo.cc.rej' in results[0].items[0])
self.assertTrue('bar.h.rej' in results[0].items[1])
def testBadOrigFile(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockFile('other/path/qux.h.orig', ''),
MockFile('other/path/qux.h', ''),
MockFile('other/path/qux.cc', ''),
]
results = PRESUBMIT._CheckPatchFiles(mock_input_api, MockOutputApi())
self.assertEqual(1, len(results))
self.assertEqual(1, len(results[0].items))
self.assertTrue('qux.h.orig' in results[0].items[0])
def testGoodFiles(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockFile('other/path/qux.h', ''),
MockFile('other/path/qux.cc', ''),
]
results = PRESUBMIT._CheckPatchFiles(mock_input_api, MockOutputApi())
self.assertEqual(0, len(results))
class CheckSingletonInHeadersTest(unittest.TestCase):
def testSingletonInArbitraryHeader(self):
diff_singleton_h = ['base::subtle::AtomicWord '
'base::Singleton<Type, Traits, DifferentiatingType>::']
diff_foo_h = ['// base::Singleton<Foo> in comment.',
'friend class base::Singleton<Foo>']
diff_foo2_h = [' //Foo* bar = base::Singleton<Foo>::get();']
diff_bad_h = ['Foo* foo = base::Singleton<Foo>::get();']
mock_input_api = MockInputApi()
mock_input_api.files = [MockAffectedFile('base/memory/singleton.h',
diff_singleton_h),
MockAffectedFile('foo.h', diff_foo_h),
MockAffectedFile('foo2.h', diff_foo2_h),
MockAffectedFile('bad.h', diff_bad_h)]
warnings = PRESUBMIT._CheckSingletonInHeaders(mock_input_api,
MockOutputApi())
self.assertEqual(1, len(warnings))
self.assertEqual(1, len(warnings[0].items))
self.assertEqual('error', warnings[0].type)
self.assertTrue('Found base::Singleton<T>' in warnings[0].message)
def testSingletonInCC(self):
diff_cc = ['Foo* foo = base::Singleton<Foo>::get();']
mock_input_api = MockInputApi()
mock_input_api.files = [MockAffectedFile('some/path/foo.cc', diff_cc)]
warnings = PRESUBMIT._CheckSingletonInHeaders(mock_input_api,
MockOutputApi())
self.assertEqual(0, len(warnings))
class InvalidOSMacroNamesTest(unittest.TestCase):
def testInvalidOSMacroNames(self):
lines = ['#if defined(OS_WINDOWS)',
' #elif defined(OS_WINDOW)',
' # if defined(OS_MACOSX) || defined(OS_CHROME)',
'# else // defined(OS_MAC)',
'#endif // defined(OS_MACOS)']
errors = PRESUBMIT._CheckForInvalidOSMacrosInFile(
MockInputApi(), MockFile('some/path/foo_platform.cc', lines))
self.assertEqual(len(lines), len(errors))
self.assertTrue(':1 OS_WINDOWS' in errors[0])
self.assertTrue('(did you mean OS_WIN?)' in errors[0])
def testValidOSMacroNames(self):
lines = ['#if defined(%s)' % m for m in PRESUBMIT._VALID_OS_MACROS]
errors = PRESUBMIT._CheckForInvalidOSMacrosInFile(
MockInputApi(), MockFile('some/path/foo_platform.cc', lines))
self.assertEqual(0, len(errors))
class InvalidIfDefinedMacroNamesTest(unittest.TestCase):
def testInvalidIfDefinedMacroNames(self):
lines = ['#if defined(TARGET_IPHONE_SIMULATOR)',
'#if !defined(TARGET_IPHONE_SIMULATOR)',
'#elif defined(TARGET_IPHONE_SIMULATOR)',
'#ifdef TARGET_IPHONE_SIMULATOR',
' # ifdef TARGET_IPHONE_SIMULATOR',
'# if defined(VALID) || defined(TARGET_IPHONE_SIMULATOR)',
'# else // defined(TARGET_IPHONE_SIMULATOR)',
'#endif // defined(TARGET_IPHONE_SIMULATOR)']
errors = PRESUBMIT._CheckForInvalidIfDefinedMacrosInFile(
MockInputApi(), MockFile('some/path/source.mm', lines))
self.assertEqual(len(lines), len(errors))
def testValidIfDefinedMacroNames(self):
lines = ['#if defined(FOO)',
'#ifdef BAR']
errors = PRESUBMIT._CheckForInvalidIfDefinedMacrosInFile(
MockInputApi(), MockFile('some/path/source.cc', lines))
self.assertEqual(0, len(errors))
class CheckAddedDepsHaveTestApprovalsTest(unittest.TestCase):
def calculate(self, old_include_rules, old_specific_include_rules,
new_include_rules, new_specific_include_rules):
return PRESUBMIT._CalculateAddedDeps(
os.path, 'include_rules = %r\nspecific_include_rules = %r' % (
old_include_rules, old_specific_include_rules),
'include_rules = %r\nspecific_include_rules = %r' % (
new_include_rules, new_specific_include_rules))
def testCalculateAddedDeps(self):
old_include_rules = [
'+base',
'-chrome',
'+content',
'-grit',
'-grit/",',
'+jni/fooblat.h',
'!sandbox',
]
old_specific_include_rules = {
'compositor\.*': {
'+cc',
},
}
new_include_rules = [
'-ash',
'+base',
'+chrome',
'+components',
'+content',
'+grit',
'+grit/generated_resources.h",',
'+grit/",',
'+jni/fooblat.h',
'+policy',
'+' + os.path.join('third_party', 'WebKit'),
]
new_specific_include_rules = {
'compositor\.*': {
'+cc',
},
'widget\.*': {
'+gpu',
},
}
expected = set([
os.path.join('chrome', 'DEPS'),
os.path.join('gpu', 'DEPS'),
os.path.join('components', 'DEPS'),
os.path.join('policy', 'DEPS'),
os.path.join('third_party', 'WebKit', 'DEPS'),
])
self.assertEqual(
expected,
self.calculate(old_include_rules, old_specific_include_rules,
new_include_rules, new_specific_include_rules))
def testCalculateAddedDepsIgnoresPermutations(self):
old_include_rules = [
'+base',
'+chrome',
]
new_include_rules = [
'+chrome',
'+base',
]
self.assertEqual(set(),
self.calculate(old_include_rules, {}, new_include_rules,
{}))
class JSONParsingTest(unittest.TestCase):
def testSuccess(self):
input_api = MockInputApi()
filename = 'valid_json.json'
contents = ['// This is a comment.',
'{',
' "key1": ["value1", "value2"],',
' "key2": 3 // This is an inline comment.',
'}'
]
input_api.files = [MockFile(filename, contents)]
self.assertEqual(None,
PRESUBMIT._GetJSONParseError(input_api, filename))
def testFailure(self):
input_api = MockInputApi()
test_data = [
('invalid_json_1.json',
['{ x }'],
'Expecting property name:'),
('invalid_json_2.json',
['// Hello world!',
'{ "hello": "world }'],
'Unterminated string starting at:'),
('invalid_json_3.json',
['{ "a": "b", "c": "d", }'],
'Expecting property name:'),
('invalid_json_4.json',
['{ "a": "b" "c": "d" }'],
'Expecting , delimiter:'),
]
input_api.files = [MockFile(filename, contents)
for (filename, contents, _) in test_data]
for (filename, _, expected_error) in test_data:
actual_error = PRESUBMIT._GetJSONParseError(input_api, filename)
self.assertTrue(expected_error in str(actual_error),
"'%s' not found in '%s'" % (expected_error, actual_error))
def testNoEatComments(self):
input_api = MockInputApi()
file_with_comments = 'file_with_comments.json'
contents_with_comments = ['// This is a comment.',
'{',
' "key1": ["value1", "value2"],',
' "key2": 3 // This is an inline comment.',
'}'
]
file_without_comments = 'file_without_comments.json'
contents_without_comments = ['{',
' "key1": ["value1", "value2"],',
' "key2": 3',
'}'
]
input_api.files = [MockFile(file_with_comments, contents_with_comments),
MockFile(file_without_comments,
contents_without_comments)]
self.assertEqual('No JSON object could be decoded',
str(PRESUBMIT._GetJSONParseError(input_api,
file_with_comments,
eat_comments=False)))
self.assertEqual(None,
PRESUBMIT._GetJSONParseError(input_api,
file_without_comments,
eat_comments=False))
class IDLParsingTest(unittest.TestCase):
def testSuccess(self):
input_api = MockInputApi()
filename = 'valid_idl_basics.idl'
contents = ['// Tests a valid IDL file.',
'namespace idl_basics {',
' enum EnumType {',
' name1,',
' name2',
' };',
'',
' dictionary MyType1 {',
' DOMString a;',
' };',
'',
' callback Callback1 = void();',
' callback Callback2 = void(long x);',
' callback Callback3 = void(MyType1 arg);',
' callback Callback4 = void(EnumType type);',
'',
' interface Functions {',
' static void function1();',
' static void function2(long x);',
' static void function3(MyType1 arg);',
' static void function4(Callback1 cb);',
' static void function5(Callback2 cb);',
' static void function6(Callback3 cb);',
' static void function7(Callback4 cb);',
' };',
'',
' interface Events {',
' static void onFoo1();',
' static void onFoo2(long x);',
' static void onFoo2(MyType1 arg);',
' static void onFoo3(EnumType type);',
' };',
'};'
]
input_api.files = [MockFile(filename, contents)]
self.assertEqual(None,
PRESUBMIT._GetIDLParseError(input_api, filename))
def testFailure(self):
input_api = MockInputApi()
test_data = [
('invalid_idl_1.idl',
['//',
'namespace test {',
' dictionary {',
' DOMString s;',
' };',
'};'],
'Unexpected "{" after keyword "dictionary".\n'),
# TODO(yoz): Disabled because it causes the IDL parser to hang.
# See crbug.com/363830.
# ('invalid_idl_2.idl',
# (['namespace test {',
# ' dictionary MissingSemicolon {',
# ' DOMString a',
# ' DOMString b;',
# ' };',
# '};'],
# 'Unexpected symbol DOMString after symbol a.'),
('invalid_idl_3.idl',
['//',
'namespace test {',
' enum MissingComma {',
' name1',
' name2',
' };',
'};'],
'Unexpected symbol name2 after symbol name1.'),
('invalid_idl_4.idl',
['//',
'namespace test {',
' enum TrailingComma {',
' name1,',
' name2,',
' };',
'};'],
'Trailing comma in block.'),
('invalid_idl_5.idl',
['//',
'namespace test {',
' callback Callback1 = void(;',
'};'],
'Unexpected ";" after "(".'),
('invalid_idl_6.idl',
['//',
'namespace test {',
' callback Callback1 = void(long );',
'};'],
'Unexpected ")" after symbol long.'),
('invalid_idl_7.idl',
['//',
'namespace test {',
' interace Events {',
' static void onFoo1();',
' };',
'};'],
'Unexpected symbol Events after symbol interace.'),
('invalid_idl_8.idl',
['//',
'namespace test {',
' interface NotEvent {',
' static void onFoo1();',
' };',
'};'],
'Did not process Interface Interface(NotEvent)'),
('invalid_idl_9.idl',
['//',
'namespace test {',
' interface {',
' static void function1();',
' };',
'};'],
'Interface missing name.'),
]
input_api.files = [MockFile(filename, contents)
for (filename, contents, _) in test_data]
for (filename, _, expected_error) in test_data:
actual_error = PRESUBMIT._GetIDLParseError(input_api, filename)
self.assertTrue(expected_error in str(actual_error),
"'%s' not found in '%s'" % (expected_error, actual_error))
class TryServerMasterTest(unittest.TestCase):
def testTryServerMasters(self):
bots = {
'master.tryserver.chromium.android': [
'android_archive_rel_ng',
'android_arm64_dbg_recipe',
'android_blink_rel',
'android_clang_dbg_recipe',
'android_compile_dbg',
'android_compile_x64_dbg',
'android_compile_x86_dbg',
'android_coverage',
'android_cronet_tester'
'android_swarming_rel',
'cast_shell_android',
'linux_android_dbg_ng',
'linux_android_rel_ng',
],
'master.tryserver.chromium.mac': [
'ios_dbg_simulator',
'ios_rel_device',
'ios_rel_device_ninja',
'mac_asan',
'mac_asan_64',
'mac_chromium_compile_dbg',
'mac_chromium_compile_rel',
'mac_chromium_dbg',
'mac_chromium_rel',
'mac_nacl_sdk',
'mac_nacl_sdk_build',
'mac_rel_naclmore',
'mac_x64_rel',
'mac_xcodebuild',
],
'master.tryserver.chromium.linux': [
'chromium_presubmit',
'linux_arm_cross_compile',
'linux_arm_tester',
'linux_chromeos_asan',
'linux_chromeos_browser_asan',
'linux_chromeos_valgrind',
'linux_chromium_chromeos_dbg',
'linux_chromium_chromeos_rel',
'linux_chromium_compile_dbg',
'linux_chromium_compile_rel',
'linux_chromium_dbg',
'linux_chromium_gn_dbg',
'linux_chromium_gn_rel',
'linux_chromium_rel',
'linux_chromium_trusty32_dbg',
'linux_chromium_trusty32_rel',
'linux_chromium_trusty_dbg',
'linux_chromium_trusty_rel',
'linux_clang_tsan',
'linux_ecs_ozone',
'linux_layout',
'linux_layout_asan',
'linux_layout_rel',
'linux_layout_rel_32',
'linux_nacl_sdk',
'linux_nacl_sdk_bionic',
'linux_nacl_sdk_bionic_build',
'linux_nacl_sdk_build',
'linux_redux',
'linux_rel_naclmore',
'linux_rel_precise32',
'linux_valgrind',
'tools_build_presubmit',
],
'master.tryserver.chromium.win': [
'win8_aura',
'win8_chromium_dbg',
'win8_chromium_rel',
'win_chromium_compile_dbg',
'win_chromium_compile_rel',
'win_chromium_dbg',
'win_chromium_rel',
'win_chromium_rel',
'win_chromium_x64_dbg',
'win_chromium_x64_rel',
'win_nacl_sdk',
'win_nacl_sdk_build',
'win_rel_naclmore',
],
}
for master, bots in bots.iteritems():
for bot in bots:
self.assertEqual(master, PRESUBMIT.GetTryServerMasterForBot(bot),
'bot=%s: expected %s, computed %s' % (
bot, master, PRESUBMIT.GetTryServerMasterForBot(bot)))
class UserMetricsActionTest(unittest.TestCase):
def testUserMetricsActionInActions(self):
input_api = MockInputApi()
file_with_user_action = 'file_with_user_action.cc'
contents_with_user_action = [
'base::UserMetricsAction("AboutChrome")'
]
input_api.files = [MockFile(file_with_user_action,
contents_with_user_action)]
self.assertEqual(
[], PRESUBMIT._CheckUserActionUpdate(input_api, MockOutputApi()))
def testUserMetricsActionNotAddedToActions(self):
input_api = MockInputApi()
file_with_user_action = 'file_with_user_action.cc'
contents_with_user_action = [
'base::UserMetricsAction("NotInActionsXml")'
]
input_api.files = [MockFile(file_with_user_action,
contents_with_user_action)]
output = PRESUBMIT._CheckUserActionUpdate(input_api, MockOutputApi())
self.assertEqual(
('File %s line %d: %s is missing in '
'tools/metrics/actions/actions.xml. Please run '
'tools/metrics/actions/extract_actions.py to update.'
% (file_with_user_action, 1, 'NotInActionsXml')),
output[0].message)
class PydepsNeedsUpdatingTest(unittest.TestCase):
class MockSubprocess(object):
CalledProcessError = subprocess.CalledProcessError
def setUp(self):
mock_all_pydeps = ['A.pydeps', 'B.pydeps']
self.old_ALL_PYDEPS_FILES = PRESUBMIT._ALL_PYDEPS_FILES
PRESUBMIT._ALL_PYDEPS_FILES = mock_all_pydeps
self.mock_input_api = MockInputApi()
self.mock_output_api = MockOutputApi()
self.mock_input_api.subprocess = PydepsNeedsUpdatingTest.MockSubprocess()
self.checker = PRESUBMIT.PydepsChecker(self.mock_input_api, mock_all_pydeps)
self.checker._file_cache = {
'A.pydeps': '# Generated by:\n# CMD A\nA.py\nC.py\n',
'B.pydeps': '# Generated by:\n# CMD B\nB.py\nC.py\n',
}
def tearDown(self):
PRESUBMIT._ALL_PYDEPS_FILES = self.old_ALL_PYDEPS_FILES
def _RunCheck(self):
return PRESUBMIT._CheckPydepsNeedsUpdating(self.mock_input_api,
self.mock_output_api,
checker_for_tests=self.checker)
def testAddedPydep(self):
# PRESUBMIT._CheckPydepsNeedsUpdating is only implemented for Android.
if self.mock_input_api.platform != 'linux2':
return []
self.mock_input_api.files = [
MockAffectedFile('new.pydeps', [], action='A'),
]
self.mock_input_api.CreateMockFileInPath(
[x.LocalPath() for x in self.mock_input_api.AffectedFiles(
include_deletes=True)])
results = self._RunCheck()
self.assertEqual(1, len(results))
self.assertTrue('PYDEPS_FILES' in str(results[0]))
def testPydepNotInSrc(self):
self.mock_input_api.files = [
MockAffectedFile('new.pydeps', [], action='A'),
]
self.mock_input_api.CreateMockFileInPath([])
results = self._RunCheck()
self.assertEqual(0, len(results))
def testRemovedPydep(self):
# PRESUBMIT._CheckPydepsNeedsUpdating is only implemented for Android.
if self.mock_input_api.platform != 'linux2':
return []
self.mock_input_api.files = [
MockAffectedFile(PRESUBMIT._ALL_PYDEPS_FILES[0], [], action='D'),
]
self.mock_input_api.CreateMockFileInPath(
[x.LocalPath() for x in self.mock_input_api.AffectedFiles(
include_deletes=True)])
results = self._RunCheck()
self.assertEqual(1, len(results))
self.assertTrue('PYDEPS_FILES' in str(results[0]))
def testRandomPyIgnored(self):
# PRESUBMIT._CheckPydepsNeedsUpdating is only implemented for Android.
if self.mock_input_api.platform != 'linux2':
return []
self.mock_input_api.files = [
MockAffectedFile('random.py', []),
]
results = self._RunCheck()
self.assertEqual(0, len(results), 'Unexpected results: %r' % results)
def testRelevantPyNoChange(self):
# PRESUBMIT._CheckPydepsNeedsUpdating is only implemented for Android.
if self.mock_input_api.platform != 'linux2':
return []
self.mock_input_api.files = [
MockAffectedFile('A.py', []),
]
def mock_check_output(cmd, shell=False, env=None):
self.assertEqual('CMD A --output ""', cmd)
return self.checker._file_cache['A.pydeps']
self.mock_input_api.subprocess.check_output = mock_check_output
results = self._RunCheck()
self.assertEqual(0, len(results), 'Unexpected results: %r' % results)
def testRelevantPyOneChange(self):
# PRESUBMIT._CheckPydepsNeedsUpdating is only implemented for Android.
if self.mock_input_api.platform != 'linux2':
return []
self.mock_input_api.files = [
MockAffectedFile('A.py', []),
]
def mock_check_output(cmd, shell=False, env=None):
self.assertEqual('CMD A --output ""', cmd)
return 'changed data'
self.mock_input_api.subprocess.check_output = mock_check_output
results = self._RunCheck()
self.assertEqual(1, len(results))
self.assertTrue('File is stale' in str(results[0]))
def testRelevantPyTwoChanges(self):
# PRESUBMIT._CheckPydepsNeedsUpdating is only implemented for Android.
if self.mock_input_api.platform != 'linux2':
return []
self.mock_input_api.files = [
MockAffectedFile('C.py', []),
]
def mock_check_output(cmd, shell=False, env=None):
return 'changed data'
self.mock_input_api.subprocess.check_output = mock_check_output
results = self._RunCheck()
self.assertEqual(2, len(results))
self.assertTrue('File is stale' in str(results[0]))
self.assertTrue('File is stale' in str(results[1]))
class IncludeGuardTest(unittest.TestCase):
def testIncludeGuardChecks(self):
mock_input_api = MockInputApi()
mock_output_api = MockOutputApi()
mock_input_api.files = [
MockAffectedFile('content/browser/thing/foo.h', [
'// Comment',
'#ifndef CONTENT_BROWSER_THING_FOO_H_',
'#define CONTENT_BROWSER_THING_FOO_H_',
'struct McBoatFace;',
'#endif // CONTENT_BROWSER_THING_FOO_H_',
]),
MockAffectedFile('content/browser/thing/bar.h', [
'#ifndef CONTENT_BROWSER_THING_BAR_H_',
'#define CONTENT_BROWSER_THING_BAR_H_',
'namespace content {',
'#endif // CONTENT_BROWSER_THING_BAR_H_',
'} // namespace content',
]),
MockAffectedFile('content/browser/test1.h', [
'namespace content {',
'} // namespace content',
]),
MockAffectedFile('content\\browser\\win.h', [
'#ifndef CONTENT_BROWSER_WIN_H_',
'#define CONTENT_BROWSER_WIN_H_',
'struct McBoatFace;',
'#endif // CONTENT_BROWSER_WIN_H_',
]),
MockAffectedFile('content/browser/test2.h', [
'// Comment',
'#ifndef CONTENT_BROWSER_TEST2_H_',
'struct McBoatFace;',
'#endif // CONTENT_BROWSER_TEST2_H_',
]),
MockAffectedFile('content/browser/internal.h', [
'// Comment',
'#ifndef CONTENT_BROWSER_INTERNAL_H_',
'#define CONTENT_BROWSER_INTERNAL_H_',
'// Comment',
'#ifndef INTERNAL_CONTENT_BROWSER_INTERNAL_H_',
'#define INTERNAL_CONTENT_BROWSER_INTERNAL_H_',
'namespace internal {',
'} // namespace internal',
'#endif // INTERNAL_CONTENT_BROWSER_THING_BAR_H_',
'namespace content {',
'} // namespace content',
'#endif // CONTENT_BROWSER_THING_BAR_H_',
]),
MockAffectedFile('content/browser/thing/foo.cc', [
'// This is a non-header.',
]),
MockAffectedFile('content/browser/disabled.h', [
'// no-include-guard-because-multiply-included',
'struct McBoatFace;',
]),
# New files don't allow misspelled include guards.
MockAffectedFile('content/browser/spleling.h', [
'#ifndef CONTENT_BROWSER_SPLLEING_H_',
'#define CONTENT_BROWSER_SPLLEING_H_',
'struct McBoatFace;',
'#endif // CONTENT_BROWSER_SPLLEING_H_',
]),
# New files don't allow + in include guards.
MockAffectedFile('content/browser/foo+bar.h', [
'#ifndef CONTENT_BROWSER_FOO+BAR_H_',
'#define CONTENT_BROWSER_FOO+BAR_H_',
'struct McBoatFace;',
'#endif // CONTENT_BROWSER_FOO+BAR_H_',
]),
# Old files allow misspelled include guards (for now).
MockAffectedFile('chrome/old.h', [
'// New contents',
'#ifndef CHROME_ODL_H_',
'#define CHROME_ODL_H_',
'#endif // CHROME_ODL_H_',
], [
'// Old contents',
'#ifndef CHROME_ODL_H_',
'#define CHROME_ODL_H_',
'#endif // CHROME_ODL_H_',
]),
# Using a Blink style include guard outside Blink is wrong.
MockAffectedFile('content/NotInBlink.h', [
'#ifndef NotInBlink_h',
'#define NotInBlink_h',
'struct McBoatFace;',
'#endif // NotInBlink_h',
]),
# Using a Blink style include guard in Blink is no longer ok.
MockAffectedFile('third_party/blink/InBlink.h', [
'#ifndef InBlink_h',
'#define InBlink_h',
'struct McBoatFace;',
'#endif // InBlink_h',
]),
# Using a bad include guard in Blink is not ok.
MockAffectedFile('third_party/blink/AlsoInBlink.h', [
'#ifndef WrongInBlink_h',
'#define WrongInBlink_h',
'struct McBoatFace;',
'#endif // WrongInBlink_h',
]),
# Using a bad include guard in Blink is not accepted even if
# it's an old file.
MockAffectedFile('third_party/blink/StillInBlink.h', [
'// New contents',
'#ifndef AcceptedInBlink_h',
'#define AcceptedInBlink_h',
'struct McBoatFace;',
'#endif // AcceptedInBlink_h',
], [
'// Old contents',
'#ifndef AcceptedInBlink_h',
'#define AcceptedInBlink_h',
'struct McBoatFace;',
'#endif // AcceptedInBlink_h',
]),
# Using a non-Chromium include guard in third_party
# (outside blink) is accepted.
MockAffectedFile('third_party/foo/some_file.h', [
'#ifndef REQUIRED_RPCNDR_H_',
'#define REQUIRED_RPCNDR_H_',
'struct SomeFileFoo;',
'#endif // REQUIRED_RPCNDR_H_',
]),
# Not having proper include guard in *_message_generator.h
# for old IPC messages is allowed.
MockAffectedFile('content/common/content_message_generator.h', [
'#undef CONTENT_COMMON_FOO_MESSAGES_H_',
'#include "content/common/foo_messages.h"',
'#ifndef CONTENT_COMMON_FOO_MESSAGES_H_',
'#error "Failed to include content/common/foo_messages.h"',
'#endif',
]),
]
msgs = PRESUBMIT._CheckForIncludeGuards(
mock_input_api, mock_output_api)
expected_fail_count = 8
self.assertEqual(expected_fail_count, len(msgs),
'Expected %d items, found %d: %s'
% (expected_fail_count, len(msgs), msgs))
self.assertEqual(msgs[0].items, ['content/browser/thing/bar.h'])
self.assertEqual(msgs[0].message,
'Include guard CONTENT_BROWSER_THING_BAR_H_ '
'not covering the whole file')
self.assertEqual(msgs[1].items, ['content/browser/test1.h'])
self.assertEqual(msgs[1].message,
'Missing include guard CONTENT_BROWSER_TEST1_H_')
self.assertEqual(msgs[2].items, ['content/browser/test2.h:3'])
self.assertEqual(msgs[2].message,
'Missing "#define CONTENT_BROWSER_TEST2_H_" for '
'include guard')
self.assertEqual(msgs[3].items, ['content/browser/spleling.h:1'])
self.assertEqual(msgs[3].message,
'Header using the wrong include guard name '
'CONTENT_BROWSER_SPLLEING_H_')
self.assertEqual(msgs[4].items, ['content/browser/foo+bar.h'])
self.assertEqual(msgs[4].message,
'Missing include guard CONTENT_BROWSER_FOO_BAR_H_')
self.assertEqual(msgs[5].items, ['content/NotInBlink.h:1'])
self.assertEqual(msgs[5].message,
'Header using the wrong include guard name '
'NotInBlink_h')
self.assertEqual(msgs[6].items, ['third_party/blink/InBlink.h:1'])
self.assertEqual(msgs[6].message,
'Header using the wrong include guard name '
'InBlink_h')
self.assertEqual(msgs[7].items, ['third_party/blink/AlsoInBlink.h:1'])
self.assertEqual(msgs[7].message,
'Header using the wrong include guard name '
'WrongInBlink_h')
class AndroidDeprecatedTestAnnotationTest(unittest.TestCase):
def testCheckAndroidTestAnnotationUsage(self):
mock_input_api = MockInputApi()
mock_output_api = MockOutputApi()
mock_input_api.files = [
MockAffectedFile('LalaLand.java', [
'random stuff'
]),
MockAffectedFile('CorrectUsage.java', [
'import android.support.test.filters.LargeTest;',
'import android.support.test.filters.MediumTest;',
'import android.support.test.filters.SmallTest;',
]),
MockAffectedFile('UsedDeprecatedLargeTestAnnotation.java', [
'import android.test.suitebuilder.annotation.LargeTest;',
]),
MockAffectedFile('UsedDeprecatedMediumTestAnnotation.java', [
'import android.test.suitebuilder.annotation.MediumTest;',
]),
MockAffectedFile('UsedDeprecatedSmallTestAnnotation.java', [
'import android.test.suitebuilder.annotation.SmallTest;',
]),
MockAffectedFile('UsedDeprecatedSmokeAnnotation.java', [
'import android.test.suitebuilder.annotation.Smoke;',
])
]
msgs = PRESUBMIT._CheckAndroidTestAnnotationUsage(
mock_input_api, mock_output_api)
self.assertEqual(1, len(msgs),
'Expected %d items, found %d: %s'
% (1, len(msgs), msgs))
self.assertEqual(4, len(msgs[0].items),
'Expected %d items, found %d: %s'
% (4, len(msgs[0].items), msgs[0].items))
self.assertTrue('UsedDeprecatedLargeTestAnnotation.java:1' in msgs[0].items,
'UsedDeprecatedLargeTestAnnotation not found in errors')
self.assertTrue('UsedDeprecatedMediumTestAnnotation.java:1'
in msgs[0].items,
'UsedDeprecatedMediumTestAnnotation not found in errors')
self.assertTrue('UsedDeprecatedSmallTestAnnotation.java:1' in msgs[0].items,
'UsedDeprecatedSmallTestAnnotation not found in errors')
self.assertTrue('UsedDeprecatedSmokeAnnotation.java:1' in msgs[0].items,
'UsedDeprecatedSmokeAnnotation not found in errors')
class AndroidDeprecatedJUnitFrameworkTest(unittest.TestCase):
def testCheckAndroidTestJUnitFramework(self):
mock_input_api = MockInputApi()
mock_output_api = MockOutputApi()
mock_input_api.files = [
MockAffectedFile('LalaLand.java', [
'random stuff'
]),
MockAffectedFile('CorrectUsage.java', [
'import org.junit.ABC',
'import org.junit.XYZ;',
]),
MockAffectedFile('UsedDeprecatedJUnit.java', [
'import junit.framework.*;',
]),
MockAffectedFile('UsedDeprecatedJUnitAssert.java', [
'import junit.framework.Assert;',
]),
]
msgs = PRESUBMIT._CheckAndroidTestJUnitFrameworkImport(
mock_input_api, mock_output_api)
self.assertEqual(1, len(msgs),
'Expected %d items, found %d: %s'
% (1, len(msgs), msgs))
self.assertEqual(2, len(msgs[0].items),
'Expected %d items, found %d: %s'
% (2, len(msgs[0].items), msgs[0].items))
self.assertTrue('UsedDeprecatedJUnit.java:1' in msgs[0].items,
'UsedDeprecatedJUnit.java not found in errors')
self.assertTrue('UsedDeprecatedJUnitAssert.java:1'
in msgs[0].items,
'UsedDeprecatedJUnitAssert not found in errors')
class AndroidJUnitBaseClassTest(unittest.TestCase):
def testCheckAndroidTestJUnitBaseClass(self):
mock_input_api = MockInputApi()
mock_output_api = MockOutputApi()
mock_input_api.files = [
MockAffectedFile('LalaLand.java', [
'random stuff'
]),
MockAffectedFile('CorrectTest.java', [
'@RunWith(ABC.class);'
'public class CorrectTest {',
'}',
]),
MockAffectedFile('HistoricallyIncorrectTest.java', [
'public class Test extends BaseCaseA {',
'}',
], old_contents=[
'public class Test extends BaseCaseB {',
'}',
]),
MockAffectedFile('CorrectTestWithInterface.java', [
'@RunWith(ABC.class);'
'public class CorrectTest implement Interface {',
'}',
]),
MockAffectedFile('IncorrectTest.java', [
'public class IncorrectTest extends TestCase {',
'}',
]),
MockAffectedFile('IncorrectWithInterfaceTest.java', [
'public class Test implements X extends BaseClass {',
'}',
]),
MockAffectedFile('IncorrectMultiLineTest.java', [
'public class Test implements X, Y, Z',
' extends TestBase {',
'}',
]),
]
msgs = PRESUBMIT._CheckAndroidTestJUnitInheritance(
mock_input_api, mock_output_api)
self.assertEqual(1, len(msgs),
'Expected %d items, found %d: %s'
% (1, len(msgs), msgs))
self.assertEqual(3, len(msgs[0].items),
'Expected %d items, found %d: %s'
% (3, len(msgs[0].items), msgs[0].items))
self.assertTrue('IncorrectTest.java:1' in msgs[0].items,
'IncorrectTest not found in errors')
self.assertTrue('IncorrectWithInterfaceTest.java:1'
in msgs[0].items,
'IncorrectWithInterfaceTest not found in errors')
self.assertTrue('IncorrectMultiLineTest.java:2' in msgs[0].items,
'IncorrectMultiLineTest not found in errors')
class AndroidDebuggableBuildTest(unittest.TestCase):
def testCheckAndroidDebuggableBuild(self):
mock_input_api = MockInputApi()
mock_output_api = MockOutputApi()
mock_input_api.files = [
MockAffectedFile('RandomStuff.java', [
'random stuff'
]),
MockAffectedFile('CorrectUsage.java', [
'import org.chromium.base.BuildInfo;',
'some random stuff',
'boolean isOsDebuggable = BuildInfo.isDebugAndroid();',
]),
MockAffectedFile('JustCheckUserdebugBuild.java', [
'import android.os.Build;',
'some random stuff',
'boolean isOsDebuggable = Build.TYPE.equals("userdebug")',
]),
MockAffectedFile('JustCheckEngineeringBuild.java', [
'import android.os.Build;',
'some random stuff',
'boolean isOsDebuggable = "eng".equals(Build.TYPE)',
]),
MockAffectedFile('UsedBuildType.java', [
'import android.os.Build;',
'some random stuff',
'boolean isOsDebuggable = Build.TYPE.equals("userdebug")'
'|| "eng".equals(Build.TYPE)',
]),
MockAffectedFile('UsedExplicitBuildType.java', [
'some random stuff',
'boolean isOsDebuggable = android.os.Build.TYPE.equals("userdebug")'
'|| "eng".equals(android.os.Build.TYPE)',
]),
]
msgs = PRESUBMIT._CheckAndroidDebuggableBuild(
mock_input_api, mock_output_api)
self.assertEqual(1, len(msgs),
'Expected %d items, found %d: %s'
% (1, len(msgs), msgs))
self.assertEqual(4, len(msgs[0].items),
'Expected %d items, found %d: %s'
% (4, len(msgs[0].items), msgs[0].items))
self.assertTrue('JustCheckUserdebugBuild.java:3' in msgs[0].items)
self.assertTrue('JustCheckEngineeringBuild.java:3' in msgs[0].items)
self.assertTrue('UsedBuildType.java:3' in msgs[0].items)
self.assertTrue('UsedExplicitBuildType.java:2' in msgs[0].items)
class LogUsageTest(unittest.TestCase):
def testCheckAndroidCrLogUsage(self):
mock_input_api = MockInputApi()
mock_output_api = MockOutputApi()
mock_input_api.files = [
MockAffectedFile('RandomStuff.java', [
'random stuff'
]),
MockAffectedFile('HasAndroidLog.java', [
'import android.util.Log;',
'some random stuff',
'Log.d("TAG", "foo");',
]),
MockAffectedFile('HasExplicitUtilLog.java', [
'some random stuff',
'android.util.Log.d("TAG", "foo");',
]),
MockAffectedFile('IsInBasePackage.java', [
'package org.chromium.base;',
'private static final String TAG = "cr_Foo";',
'Log.d(TAG, "foo");',
]),
MockAffectedFile('IsInBasePackageButImportsLog.java', [
'package org.chromium.base;',
'import android.util.Log;',
'private static final String TAG = "cr_Foo";',
'Log.d(TAG, "foo");',
]),
MockAffectedFile('HasBothLog.java', [
'import org.chromium.base.Log;',
'some random stuff',
'private static final String TAG = "cr_Foo";',
'Log.d(TAG, "foo");',
'android.util.Log.d("TAG", "foo");',
]),
MockAffectedFile('HasCorrectTag.java', [
'import org.chromium.base.Log;',
'some random stuff',
'private static final String TAG = "cr_Foo";',
'Log.d(TAG, "foo");',
]),
MockAffectedFile('HasOldTag.java', [
'import org.chromium.base.Log;',
'some random stuff',
'private static final String TAG = "cr.Foo";',
'Log.d(TAG, "foo");',
]),
MockAffectedFile('HasDottedTag.java', [
'import org.chromium.base.Log;',
'some random stuff',
'private static final String TAG = "cr_foo.bar";',
'Log.d(TAG, "foo");',
]),
MockAffectedFile('HasDottedTagPublic.java', [
'import org.chromium.base.Log;',
'some random stuff',
'public static final String TAG = "cr_foo.bar";',
'Log.d(TAG, "foo");',
]),
MockAffectedFile('HasNoTagDecl.java', [
'import org.chromium.base.Log;',
'some random stuff',
'Log.d(TAG, "foo");',
]),
MockAffectedFile('HasIncorrectTagDecl.java', [
'import org.chromium.base.Log;',
'private static final String TAHG = "cr_Foo";',
'some random stuff',
'Log.d(TAG, "foo");',
]),
MockAffectedFile('HasInlineTag.java', [
'import org.chromium.base.Log;',
'some random stuff',
'private static final String TAG = "cr_Foo";',
'Log.d("TAG", "foo");',
]),
MockAffectedFile('HasUnprefixedTag.java', [
'import org.chromium.base.Log;',
'some random stuff',
'private static final String TAG = "rubbish";',
'Log.d(TAG, "foo");',
]),
MockAffectedFile('HasTooLongTag.java', [
'import org.chromium.base.Log;',
'some random stuff',
'private static final String TAG = "21_charachers_long___";',
'Log.d(TAG, "foo");',
]),
]
msgs = PRESUBMIT._CheckAndroidCrLogUsage(
mock_input_api, mock_output_api)
self.assertEqual(5, len(msgs),
'Expected %d items, found %d: %s' % (5, len(msgs), msgs))
# Declaration format
nb = len(msgs[0].items)
self.assertEqual(2, nb,
'Expected %d items, found %d: %s' % (2, nb, msgs[0].items))
self.assertTrue('HasNoTagDecl.java' in msgs[0].items)
self.assertTrue('HasIncorrectTagDecl.java' in msgs[0].items)
# Tag length
nb = len(msgs[1].items)
self.assertEqual(1, nb,
'Expected %d items, found %d: %s' % (1, nb, msgs[1].items))
self.assertTrue('HasTooLongTag.java' in msgs[1].items)
# Tag must be a variable named TAG
nb = len(msgs[2].items)
self.assertEqual(1, nb,
'Expected %d items, found %d: %s' % (1, nb, msgs[2].items))
self.assertTrue('HasInlineTag.java:4' in msgs[2].items)
# Util Log usage
nb = len(msgs[3].items)
self.assertEqual(2, nb,
'Expected %d items, found %d: %s' % (2, nb, msgs[3].items))
self.assertTrue('HasAndroidLog.java:3' in msgs[3].items)
self.assertTrue('IsInBasePackageButImportsLog.java:4' in msgs[3].items)
# Tag must not contain
nb = len(msgs[4].items)
self.assertEqual(3, nb,
'Expected %d items, found %d: %s' % (2, nb, msgs[4].items))
self.assertTrue('HasDottedTag.java' in msgs[4].items)
self.assertTrue('HasDottedTagPublic.java' in msgs[4].items)
self.assertTrue('HasOldTag.java' in msgs[4].items)
class GoogleAnswerUrlFormatTest(unittest.TestCase):
def testCatchAnswerUrlId(self):
input_api = MockInputApi()
input_api.files = [
MockFile('somewhere/file.cc',
['char* host = '
' "https://support.google.com/chrome/answer/123456";']),
MockFile('somewhere_else/file.cc',
['char* host = '
' "https://support.google.com/chrome/a/answer/123456";']),
]
warnings = PRESUBMIT._CheckGoogleSupportAnswerUrl(
input_api, MockOutputApi())
self.assertEqual(1, len(warnings))
self.assertEqual(2, len(warnings[0].items))
def testAllowAnswerUrlParam(self):
input_api = MockInputApi()
input_api.files = [
MockFile('somewhere/file.cc',
['char* host = '
' "https://support.google.com/chrome/?p=cpn_crash_reports";']),
]
warnings = PRESUBMIT._CheckGoogleSupportAnswerUrl(
input_api, MockOutputApi())
self.assertEqual(0, len(warnings))
class HardcodedGoogleHostsTest(unittest.TestCase):
def testWarnOnAssignedLiterals(self):
input_api = MockInputApi()
input_api.files = [
MockFile('content/file.cc',
['char* host = "https://www.google.com";']),
MockFile('content/file.cc',
['char* host = "https://www.googleapis.com";']),
MockFile('content/file.cc',
['char* host = "https://clients1.google.com";']),
]
warnings = PRESUBMIT._CheckHardcodedGoogleHostsInLowerLayers(
input_api, MockOutputApi())
self.assertEqual(1, len(warnings))
self.assertEqual(3, len(warnings[0].items))
def testAllowInComment(self):
input_api = MockInputApi()
input_api.files = [
MockFile('content/file.cc',
['char* host = "https://www.aol.com"; // google.com'])
]
warnings = PRESUBMIT._CheckHardcodedGoogleHostsInLowerLayers(
input_api, MockOutputApi())
self.assertEqual(0, len(warnings))
class ChromeOsSyncedPrefRegistrationTest(unittest.TestCase):
def testWarnsOnChromeOsDirectories(self):
input_api = MockInputApi()
input_api.files = [
MockFile('ash/file.cc',
['PrefRegistrySyncable::SYNCABLE_PREF']),
MockFile('chrome/browser/chromeos/file.cc',
['PrefRegistrySyncable::SYNCABLE_PREF']),
MockFile('chromeos/file.cc',
['PrefRegistrySyncable::SYNCABLE_PREF']),
MockFile('components/arc/file.cc',
['PrefRegistrySyncable::SYNCABLE_PREF']),
MockFile('components/exo/file.cc',
['PrefRegistrySyncable::SYNCABLE_PREF']),
]
warnings = PRESUBMIT._CheckChromeOsSyncedPrefRegistration(
input_api, MockOutputApi())
self.assertEqual(1, len(warnings))
def testDoesNotWarnOnSyncOsPref(self):
input_api = MockInputApi()
input_api.files = [
MockFile('chromeos/file.cc',
['PrefRegistrySyncable::SYNCABLE_OS_PREF']),
]
warnings = PRESUBMIT._CheckChromeOsSyncedPrefRegistration(
input_api, MockOutputApi())
self.assertEqual(0, len(warnings))
def testDoesNotWarnOnCrossPlatformDirectories(self):
input_api = MockInputApi()
input_api.files = [
MockFile('chrome/browser/ui/file.cc',
['PrefRegistrySyncable::SYNCABLE_PREF']),
MockFile('components/sync/file.cc',
['PrefRegistrySyncable::SYNCABLE_PREF']),
MockFile('content/browser/file.cc',
['PrefRegistrySyncable::SYNCABLE_PREF']),
]
warnings = PRESUBMIT._CheckChromeOsSyncedPrefRegistration(
input_api, MockOutputApi())
self.assertEqual(0, len(warnings))
def testSeparateWarningForPriorityPrefs(self):
input_api = MockInputApi()
input_api.files = [
MockFile('chromeos/file.cc',
['PrefRegistrySyncable::SYNCABLE_PREF',
'PrefRegistrySyncable::SYNCABLE_PRIORITY_PREF']),
]
warnings = PRESUBMIT._CheckChromeOsSyncedPrefRegistration(
input_api, MockOutputApi())
self.assertEqual(2, len(warnings))
class ForwardDeclarationTest(unittest.TestCase):
def testCheckHeadersOnlyOutsideThirdParty(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockAffectedFile('somewhere/file.cc', [
'class DummyClass;'
]),
MockAffectedFile('third_party/header.h', [
'class DummyClass;'
])
]
warnings = PRESUBMIT._CheckUselessForwardDeclarations(mock_input_api,
MockOutputApi())
self.assertEqual(0, len(warnings))
def testNoNestedDeclaration(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockAffectedFile('somewhere/header.h', [
'class SomeClass {',
' protected:',
' class NotAMatch;',
'};'
])
]
warnings = PRESUBMIT._CheckUselessForwardDeclarations(mock_input_api,
MockOutputApi())
self.assertEqual(0, len(warnings))
def testSubStrings(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockAffectedFile('somewhere/header.h', [
'class NotUsefulClass;',
'struct SomeStruct;',
'UsefulClass *p1;',
'SomeStructPtr *p2;'
])
]
warnings = PRESUBMIT._CheckUselessForwardDeclarations(mock_input_api,
MockOutputApi())
self.assertEqual(2, len(warnings))
def testUselessForwardDeclaration(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockAffectedFile('somewhere/header.h', [
'class DummyClass;',
'struct DummyStruct;',
'class UsefulClass;',
'std::unique_ptr<UsefulClass> p;'
])
]
warnings = PRESUBMIT._CheckUselessForwardDeclarations(mock_input_api,
MockOutputApi())
self.assertEqual(2, len(warnings))
def testBlinkHeaders(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockAffectedFile('third_party/blink/header.h', [
'class DummyClass;',
'struct DummyStruct;',
]),
MockAffectedFile('third_party\\blink\\header.h', [
'class DummyClass;',
'struct DummyStruct;',
])
]
warnings = PRESUBMIT._CheckUselessForwardDeclarations(mock_input_api,
MockOutputApi())
self.assertEqual(4, len(warnings))
class RelativeIncludesTest(unittest.TestCase):
def testThirdPartyNotWebKitIgnored(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockAffectedFile('third_party/test.cpp', '#include "../header.h"'),
MockAffectedFile('third_party/test/test.cpp', '#include "../header.h"'),
]
mock_output_api = MockOutputApi()
errors = PRESUBMIT._CheckForRelativeIncludes(
mock_input_api, mock_output_api)
self.assertEqual(0, len(errors))
def testNonCppFileIgnored(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockAffectedFile('test.py', '#include "../header.h"'),
]
mock_output_api = MockOutputApi()
errors = PRESUBMIT._CheckForRelativeIncludes(
mock_input_api, mock_output_api)
self.assertEqual(0, len(errors))
def testInnocuousChangesAllowed(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockAffectedFile('test.cpp', '#include "header.h"'),
MockAffectedFile('test2.cpp', '../'),
]
mock_output_api = MockOutputApi()
errors = PRESUBMIT._CheckForRelativeIncludes(
mock_input_api, mock_output_api)
self.assertEqual(0, len(errors))
def testRelativeIncludeNonWebKitProducesError(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockAffectedFile('test.cpp', ['#include "../header.h"']),
]
mock_output_api = MockOutputApi()
errors = PRESUBMIT._CheckForRelativeIncludes(
mock_input_api, mock_output_api)
self.assertEqual(1, len(errors))
def testRelativeIncludeWebKitProducesError(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockAffectedFile('third_party/blink/test.cpp',
['#include "../header.h']),
]
mock_output_api = MockOutputApi()
errors = PRESUBMIT._CheckForRelativeIncludes(
mock_input_api, mock_output_api)
self.assertEqual(1, len(errors))
class CCIncludeTest(unittest.TestCase):
def testThirdPartyNotBlinkIgnored(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockAffectedFile('third_party/test.cpp', '#include "file.cc"'),
]
mock_output_api = MockOutputApi()
errors = PRESUBMIT._CheckForCcIncludes(
mock_input_api, mock_output_api)
self.assertEqual(0, len(errors))
def testPythonFileIgnored(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockAffectedFile('test.py', '#include "file.cc"'),
]
mock_output_api = MockOutputApi()
errors = PRESUBMIT._CheckForCcIncludes(
mock_input_api, mock_output_api)
self.assertEqual(0, len(errors))
def testIncFilesAccepted(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockAffectedFile('test.py', '#include "file.inc"'),
]
mock_output_api = MockOutputApi()
errors = PRESUBMIT._CheckForCcIncludes(
mock_input_api, mock_output_api)
self.assertEqual(0, len(errors))
def testInnocuousChangesAllowed(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockAffectedFile('test.cpp', '#include "header.h"'),
MockAffectedFile('test2.cpp', 'Something "file.cc"'),
]
mock_output_api = MockOutputApi()
errors = PRESUBMIT._CheckForCcIncludes(
mock_input_api, mock_output_api)
self.assertEqual(0, len(errors))
def testCcIncludeNonBlinkProducesError(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockAffectedFile('test.cpp', ['#include "file.cc"']),
]
mock_output_api = MockOutputApi()
errors = PRESUBMIT._CheckForCcIncludes(
mock_input_api, mock_output_api)
self.assertEqual(1, len(errors))
def testCppIncludeBlinkProducesError(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockAffectedFile('third_party/blink/test.cpp',
['#include "foo/file.cpp"']),
]
mock_output_api = MockOutputApi()
errors = PRESUBMIT._CheckForCcIncludes(
mock_input_api, mock_output_api)
self.assertEqual(1, len(errors))
class NewHeaderWithoutGnChangeTest(unittest.TestCase):
def testAddHeaderWithoutGn(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockAffectedFile('base/stuff.h', ''),
]
warnings = PRESUBMIT._CheckNewHeaderWithoutGnChange(
mock_input_api, MockOutputApi())
self.assertEqual(1, len(warnings))
self.assertTrue('base/stuff.h' in warnings[0].items)
def testModifyHeader(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockAffectedFile('base/stuff.h', '', action='M'),
]
warnings = PRESUBMIT._CheckNewHeaderWithoutGnChange(
mock_input_api, MockOutputApi())
self.assertEqual(0, len(warnings))
def testDeleteHeader(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockAffectedFile('base/stuff.h', '', action='D'),
]
warnings = PRESUBMIT._CheckNewHeaderWithoutGnChange(
mock_input_api, MockOutputApi())
self.assertEqual(0, len(warnings))
def testAddHeaderWithGn(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockAffectedFile('base/stuff.h', ''),
MockAffectedFile('base/BUILD.gn', 'stuff.h'),
]
warnings = PRESUBMIT._CheckNewHeaderWithoutGnChange(
mock_input_api, MockOutputApi())
self.assertEqual(0, len(warnings))
def testAddHeaderWithGni(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockAffectedFile('base/stuff.h', ''),
MockAffectedFile('base/files.gni', 'stuff.h'),
]
warnings = PRESUBMIT._CheckNewHeaderWithoutGnChange(
mock_input_api, MockOutputApi())
self.assertEqual(0, len(warnings))
def testAddHeaderWithOther(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockAffectedFile('base/stuff.h', ''),
MockAffectedFile('base/stuff.cc', 'stuff.h'),
]
warnings = PRESUBMIT._CheckNewHeaderWithoutGnChange(
mock_input_api, MockOutputApi())
self.assertEqual(1, len(warnings))
def testAddHeaderWithWrongGn(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockAffectedFile('base/stuff.h', ''),
MockAffectedFile('base/BUILD.gn', 'stuff_h'),
]
warnings = PRESUBMIT._CheckNewHeaderWithoutGnChange(
mock_input_api, MockOutputApi())
self.assertEqual(1, len(warnings))
def testAddHeadersWithGn(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockAffectedFile('base/stuff.h', ''),
MockAffectedFile('base/another.h', ''),
MockAffectedFile('base/BUILD.gn', 'another.h\nstuff.h'),
]
warnings = PRESUBMIT._CheckNewHeaderWithoutGnChange(
mock_input_api, MockOutputApi())
self.assertEqual(0, len(warnings))
def testAddHeadersWithWrongGn(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockAffectedFile('base/stuff.h', ''),
MockAffectedFile('base/another.h', ''),
MockAffectedFile('base/BUILD.gn', 'another_h\nstuff.h'),
]
warnings = PRESUBMIT._CheckNewHeaderWithoutGnChange(
mock_input_api, MockOutputApi())
self.assertEqual(1, len(warnings))
self.assertFalse('base/stuff.h' in warnings[0].items)
self.assertTrue('base/another.h' in warnings[0].items)
def testAddHeadersWithWrongGn2(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockAffectedFile('base/stuff.h', ''),
MockAffectedFile('base/another.h', ''),
MockAffectedFile('base/BUILD.gn', 'another_h\nstuff_h'),
]
warnings = PRESUBMIT._CheckNewHeaderWithoutGnChange(
mock_input_api, MockOutputApi())
self.assertEqual(1, len(warnings))
self.assertTrue('base/stuff.h' in warnings[0].items)
self.assertTrue('base/another.h' in warnings[0].items)
class CorrectProductNameInMessagesTest(unittest.TestCase):
def testProductNameInDesc(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockAffectedFile('chrome/app/google_chrome_strings.grd', [
'<message name="Foo" desc="Welcome to Chrome">',
' Welcome to Chrome!',
'</message>',
]),
MockAffectedFile('chrome/app/chromium_strings.grd', [
'<message name="Bar" desc="Welcome to Chrome">',
' Welcome to Chromium!',
'</message>',
]),
]
warnings = PRESUBMIT._CheckCorrectProductNameInMessages(
mock_input_api, MockOutputApi())
self.assertEqual(0, len(warnings))
def testChromeInChromium(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockAffectedFile('chrome/app/google_chrome_strings.grd', [
'<message name="Foo" desc="Welcome to Chrome">',
' Welcome to Chrome!',
'</message>',
]),
MockAffectedFile('chrome/app/chromium_strings.grd', [
'<message name="Bar" desc="Welcome to Chrome">',
' Welcome to Chrome!',
'</message>',
]),
]
warnings = PRESUBMIT._CheckCorrectProductNameInMessages(
mock_input_api, MockOutputApi())
self.assertEqual(1, len(warnings))
self.assertTrue('chrome/app/chromium_strings.grd' in warnings[0].items[0])
def testChromiumInChrome(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockAffectedFile('chrome/app/google_chrome_strings.grd', [
'<message name="Foo" desc="Welcome to Chrome">',
' Welcome to Chromium!',
'</message>',
]),
MockAffectedFile('chrome/app/chromium_strings.grd', [
'<message name="Bar" desc="Welcome to Chrome">',
' Welcome to Chromium!',
'</message>',
]),
]
warnings = PRESUBMIT._CheckCorrectProductNameInMessages(
mock_input_api, MockOutputApi())
self.assertEqual(1, len(warnings))
self.assertTrue(
'chrome/app/google_chrome_strings.grd:2' in warnings[0].items[0])
def testMultipleInstances(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockAffectedFile('chrome/app/chromium_strings.grd', [
'<message name="Bar" desc="Welcome to Chrome">',
' Welcome to Chrome!',
'</message>',
'<message name="Baz" desc="A correct message">',
' Chromium is the software you are using.',
'</message>',
'<message name="Bat" desc="An incorrect message">',
' Google Chrome is the software you are using.',
'</message>',
]),
]
warnings = PRESUBMIT._CheckCorrectProductNameInMessages(
mock_input_api, MockOutputApi())
self.assertEqual(1, len(warnings))
self.assertTrue(
'chrome/app/chromium_strings.grd:2' in warnings[0].items[0])
self.assertTrue(
'chrome/app/chromium_strings.grd:8' in warnings[0].items[1])
def testMultipleWarnings(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockAffectedFile('chrome/app/chromium_strings.grd', [
'<message name="Bar" desc="Welcome to Chrome">',
' Welcome to Chrome!',
'</message>',
'<message name="Baz" desc="A correct message">',
' Chromium is the software you are using.',
'</message>',
'<message name="Bat" desc="An incorrect message">',
' Google Chrome is the software you are using.',
'</message>',
]),
MockAffectedFile('components/components_google_chrome_strings.grd', [
'<message name="Bar" desc="Welcome to Chrome">',
' Welcome to Chrome!',
'</message>',
'<message name="Baz" desc="A correct message">',
' Chromium is the software you are using.',
'</message>',
'<message name="Bat" desc="An incorrect message">',
' Google Chrome is the software you are using.',
'</message>',
]),
]
warnings = PRESUBMIT._CheckCorrectProductNameInMessages(
mock_input_api, MockOutputApi())
self.assertEqual(2, len(warnings))
self.assertTrue(
'components/components_google_chrome_strings.grd:5'
in warnings[0].items[0])
self.assertTrue(
'chrome/app/chromium_strings.grd:2' in warnings[1].items[0])
self.assertTrue(
'chrome/app/chromium_strings.grd:8' in warnings[1].items[1])
class ServiceManifestOwnerTest(unittest.TestCase):
def testServiceManifestJsonChangeNeedsSecurityOwner(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockAffectedFile('services/goat/manifest.json',
[
'{',
' "name": "teleporter",',
' "display_name": "Goat Teleporter",'
' "interface_provider_specs": {',
' }',
'}',
])
]
mock_output_api = MockOutputApi()
errors = PRESUBMIT._CheckIpcOwners(
mock_input_api, mock_output_api)
self.assertEqual(1, len(errors))
self.assertEqual(
'Found OWNERS files that need to be updated for IPC security review ' +
'coverage.\nPlease update the OWNERS files below:', errors[0].message)
# No warning if already covered by an OWNERS rule.
def testNonManifestJsonChangesDoNotRequireSecurityOwner(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockAffectedFile('services/goat/species.json',
[
'[',
' "anglo-nubian",',
' "angora"',
']',
])
]
mock_output_api = MockOutputApi()
errors = PRESUBMIT._CheckIpcOwners(
mock_input_api, mock_output_api)
self.assertEqual([], errors)
def testServiceManifestChangeNeedsSecurityOwner(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockAffectedFile('services/goat/public/cpp/manifest.cc',
[
'#include "services/goat/public/cpp/manifest.h"',
'const service_manager::Manifest& GetManifest() {}',
])]
mock_output_api = MockOutputApi()
errors = PRESUBMIT._CheckIpcOwners(
mock_input_api, mock_output_api)
self.assertEqual(1, len(errors))
self.assertEqual(
'Found OWNERS files that need to be updated for IPC security review ' +
'coverage.\nPlease update the OWNERS files below:', errors[0].message)
def testNonServiceManifestSourceChangesDoNotRequireSecurityOwner(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockAffectedFile('some/non/service/thing/foo_manifest.cc',
[
'const char kNoEnforcement[] = "not a manifest!";',
])]
mock_output_api = MockOutputApi()
errors = PRESUBMIT._CheckIpcOwners(
mock_input_api, mock_output_api)
self.assertEqual([], errors)
class BannedTypeCheckTest(unittest.TestCase):
def testBannedCppFunctions(self):
input_api = MockInputApi()
input_api.files = [
MockFile('some/cpp/problematic/file.cc',
['using namespace std;']),
MockFile('third_party/blink/problematic/file.cc',
['GetInterfaceProvider()']),
MockFile('some/cpp/ok/file.cc',
['using std::string;']),
]
results = PRESUBMIT._CheckNoBannedFunctions(input_api, MockOutputApi())
# warnings are results[0], errors are results[1]
self.assertEqual(2, len(results))
self.assertTrue('some/cpp/problematic/file.cc' in results[1].message)
self.assertTrue(
'third_party/blink/problematic/file.cc' in results[0].message)
self.assertTrue('some/cpp/ok/file.cc' not in results[1].message)
def testBannedBlinkDowncastHelpers(self):
input_api = MockInputApi()
input_api.files = [
MockFile('some/cpp/problematic/file1.cc',
['DEFINE_TYPE_CASTS(ToType, FromType, from_argument,'
'PointerPredicate(), ReferencePredicate());']),
MockFile('some/cpp/problematic/file2.cc',
['bool is_test_ele = IsHTMLTestElement(n);']),
MockFile('some/cpp/problematic/file3.cc',
['auto* html_test_ele = ToHTMLTestElement(n);']),
MockFile('some/cpp/problematic/file4.cc',
['auto* html_test_ele_or_null = ToHTMLTestElementOrNull(n);']),
MockFile('some/cpp/ok/file1.cc',
['bool is_test_ele = IsA<HTMLTestElement>(n);']),
MockFile('some/cpp/ok/file2.cc',
['auto* html_test_ele = To<HTMLTestElement>(n);']),
MockFile('some/cpp/ok/file3.cc',
['auto* html_test_ele_or_null = ',
'DynamicTo<HTMLTestElement>(n);']),
]
# warnings are errors[0], errors are errors[1]
errors = PRESUBMIT._CheckNoBannedFunctions(input_api, MockOutputApi())
self.assertEqual(2, len(errors))
self.assertTrue('some/cpp/problematic/file1.cc' in errors[1].message)
self.assertTrue('some/cpp/problematic/file2.cc' in errors[0].message)
self.assertTrue('some/cpp/problematic/file3.cc' in errors[0].message)
self.assertTrue('some/cpp/problematic/file4.cc' in errors[0].message)
self.assertTrue('some/cpp/ok/file1.cc' not in errors[0].message)
self.assertTrue('some/cpp/ok/file2.cc' not in errors[0].message)
self.assertTrue('some/cpp/ok/file3.cc' not in errors[0].message)
def testBannedIosObjcFunctions(self):
input_api = MockInputApi()
input_api.files = [
MockFile('some/ios/file.mm',
['TEST(SomeClassTest, SomeInteraction) {',
'}']),
MockFile('some/mac/file.mm',
['TEST(SomeClassTest, SomeInteraction) {',
'}']),
MockFile('another/ios_file.mm',
['class SomeTest : public testing::Test {};']),
MockFile('some/ios/file_egtest.mm',
['- (void)testSomething { EXPECT_OCMOCK_VERIFY(aMock); }']),
MockFile('some/ios/file_unittest.mm',
['TEST_F(SomeTest, TestThis) { EXPECT_OCMOCK_VERIFY(aMock); }']),
]
errors = PRESUBMIT._CheckNoBannedFunctions(input_api, MockOutputApi())
self.assertEqual(1, len(errors))
self.assertTrue('some/ios/file.mm' in errors[0].message)
self.assertTrue('another/ios_file.mm' in errors[0].message)
self.assertTrue('some/mac/file.mm' not in errors[0].message)
self.assertTrue('some/ios/file_egtest.mm' in errors[0].message)
self.assertTrue('some/ios/file_unittest.mm' not in errors[0].message)
def testBannedMojoFunctions(self):
input_api = MockInputApi()
input_api.files = [
MockFile('some/cpp/problematic/file.cc',
['mojo::DataPipe();']),
MockFile('some/cpp/problematic/file2.cc',
['mojo::ConvertTo<>']),
MockFile('some/cpp/ok/file.cc',
['CreateDataPipe();']),
MockFile('some/cpp/ok/file2.cc',
['mojo::DataPipeDrainer();']),
MockFile('third_party/blink/ok/file3.cc',
['mojo::ConvertTo<>']),
MockFile('content/renderer/ok/file3.cc',
['mojo::ConvertTo<>']),
]
results = PRESUBMIT._CheckNoBannedFunctions(input_api, MockOutputApi())
# warnings are results[0], errors are results[1]
self.assertEqual(2, len(results))
self.assertTrue('some/cpp/problematic/file.cc' in results[1].message)
self.assertTrue('some/cpp/problematic/file2.cc' in results[0].message)
self.assertTrue('some/cpp/ok/file.cc' not in results[1].message)
self.assertTrue('some/cpp/ok/file2.cc' not in results[1].message)
self.assertTrue('third_party/blink/ok/file3.cc' not in results[0].message)
self.assertTrue('content/renderer/ok/file3.cc' not in results[0].message)
def testDeprecatedMojoTypes(self):
ok_paths = ['components/arc']
warning_paths = ['some/cpp']
error_paths = ['third_party/blink', 'content']
test_cases = [
{
'type': 'mojo::AssociatedBinding<>;',
'file': 'file1.c'
},
{
'type': 'mojo::AssociatedBindingSet<>;',
'file': 'file2.c'
},
{
'type': 'mojo::AssociatedInterfacePtr<>',
'file': 'file3.cc'
},
{
'type': 'mojo::AssociatedInterfacePtrInfo<>',
'file': 'file4.cc'
},
{
'type': 'mojo::AssociatedInterfaceRequest<>',
'file': 'file5.cc'
},
{
'type': 'mojo::Binding<>',
'file': 'file6.cc'
},
{
'type': 'mojo::BindingSet<>',
'file': 'file7.cc'
},
{
'type': 'mojo::InterfacePtr<>',
'file': 'file8.cc'
},
{
'type': 'mojo::InterfacePtrInfo<>',
'file': 'file9.cc'
},
{
'type': 'mojo::InterfaceRequest<>',
'file': 'file10.cc'
},
{
'type': 'mojo::MakeRequest()',
'file': 'file11.cc'
},
{
'type': 'mojo::MakeRequestAssociatedWithDedicatedPipe()',
'file': 'file12.cc'
},
{
'type': 'mojo::MakeStrongBinding()<>',
'file': 'file13.cc'
},
{
'type': 'mojo::MakeStrongAssociatedBinding()<>',
'file': 'file14.cc'
},
{
'type': 'mojo::StrongAssociatedBindingSet<>',
'file': 'file15.cc'
},
{
'type': 'mojo::StrongBindingSet<>',
'file': 'file16.cc'
},
]
# Build the list of MockFiles considering paths that should trigger warnings
# as well as paths that should trigger errors.
input_api = MockInputApi()
input_api.files = []
for test_case in test_cases:
for path in ok_paths:
input_api.files.append(MockFile(os.path.join(path, test_case['file']),
[test_case['type']]))
for path in warning_paths:
input_api.files.append(MockFile(os.path.join(path, test_case['file']),
[test_case['type']]))
for path in error_paths:
input_api.files.append(MockFile(os.path.join(path, test_case['file']),
[test_case['type']]))
results = PRESUBMIT._CheckNoDeprecatedMojoTypes(input_api, MockOutputApi())
# warnings are results[0], errors are results[1]
self.assertEqual(2, len(results))
for test_case in test_cases:
# Check that no warnings nor errors have been triggered for these paths.
for path in ok_paths:
self.assertFalse(path in results[0].message)
self.assertFalse(path in results[1].message)
# Check warnings have been triggered for these paths.
for path in warning_paths:
self.assertTrue(path in results[0].message)
self.assertFalse(path in results[1].message)
# Check errors have been triggered for these paths.
for path in error_paths:
self.assertFalse(path in results[0].message)
self.assertTrue(path in results[1].message)
class NoProductionCodeUsingTestOnlyFunctionsTest(unittest.TestCase):
def testTruePositives(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockFile('some/path/foo.cc', ['foo_for_testing();']),
MockFile('some/path/foo.mm', ['FooForTesting();']),
MockFile('some/path/foo.cxx', ['FooForTests();']),
MockFile('some/path/foo.cpp', ['foo_for_test();']),
]
results = PRESUBMIT._CheckNoProductionCodeUsingTestOnlyFunctions(
mock_input_api, MockOutputApi())
self.assertEqual(1, len(results))
self.assertEqual(4, len(results[0].items))
self.assertTrue('foo.cc' in results[0].items[0])
self.assertTrue('foo.mm' in results[0].items[1])
self.assertTrue('foo.cxx' in results[0].items[2])
self.assertTrue('foo.cpp' in results[0].items[3])
def testFalsePositives(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockFile('some/path/foo.h', ['foo_for_testing();']),
MockFile('some/path/foo.mm', ['FooForTesting() {']),
MockFile('some/path/foo.cc', ['::FooForTests();']),
MockFile('some/path/foo.cpp', ['// foo_for_test();']),
]
results = PRESUBMIT._CheckNoProductionCodeUsingTestOnlyFunctions(
mock_input_api, MockOutputApi())
self.assertEqual(0, len(results))
class NoProductionJavaCodeUsingTestOnlyFunctionsTest(unittest.TestCase):
def testTruePositives(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockFile('dir/java/src/foo.java', ['FooForTesting();']),
MockFile('dir/java/src/bar.java', ['FooForTests(x);']),
MockFile('dir/java/src/baz.java', ['FooForTest(', 'y', ');']),
MockFile('dir/java/src/mult.java', [
'int x = SomethingLongHere()',
' * SomethingLongHereForTesting();'
])
]
results = PRESUBMIT._CheckNoProductionCodeUsingTestOnlyFunctionsJava(
mock_input_api, MockOutputApi())
self.assertEqual(1, len(results))
self.assertEqual(4, len(results[0].items))
self.assertTrue('foo.java' in results[0].items[0])
self.assertTrue('bar.java' in results[0].items[1])
self.assertTrue('baz.java' in results[0].items[2])
self.assertTrue('mult.java' in results[0].items[3])
def testFalsePositives(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockFile('dir/java/src/foo.xml', ['FooForTesting();']),
MockFile('dir/java/src/foo.java', ['FooForTests() {']),
MockFile('dir/java/src/bar.java', ['// FooForTest();']),
MockFile('dir/java/src/bar2.java', ['x = 1; // FooForTest();']),
MockFile('dir/javatests/src/baz.java', ['FooForTest(', 'y', ');']),
MockFile('dir/junit/src/baz.java', ['FooForTest(', 'y', ');']),
MockFile('dir/junit/src/javadoc.java', [
'/** Use FooForTest(); to obtain foo in tests.'
' */'
]),
MockFile('dir/junit/src/javadoc2.java', [
'/** ',
' * Use FooForTest(); to obtain foo in tests.'
' */'
]),
]
results = PRESUBMIT._CheckNoProductionCodeUsingTestOnlyFunctionsJava(
mock_input_api, MockOutputApi())
self.assertEqual(0, len(results))
class NewImagesWarningTest(unittest.TestCase):
def testTruePositives(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockFile('dir/android/res/drawable/foo.png', []),
MockFile('dir/android/res/drawable-v21/bar.svg', []),
MockFile('dir/android/res/mipmap-v21-en/baz.webp', []),
MockFile('dir/android/res_gshoe/drawable-mdpi/foobar.png', []),
]
results = PRESUBMIT._CheckNewImagesWarning(mock_input_api, MockOutputApi())
self.assertEqual(1, len(results))
self.assertEqual(4, len(results[0].items))
self.assertTrue('foo.png' in results[0].items[0].LocalPath())
self.assertTrue('bar.svg' in results[0].items[1].LocalPath())
self.assertTrue('baz.webp' in results[0].items[2].LocalPath())
self.assertTrue('foobar.png' in results[0].items[3].LocalPath())
def testFalsePositives(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockFile('dir/pngs/README.md', []),
MockFile('java/test/res/drawable/foo.png', []),
MockFile('third_party/blink/foo.png', []),
MockFile('dir/third_party/libpng/src/foo.cc', ['foobar']),
MockFile('dir/resources.webp/.gitignore', ['foo.png']),
]
results = PRESUBMIT._CheckNewImagesWarning(mock_input_api, MockOutputApi())
self.assertEqual(0, len(results))
class CheckUniquePtrTest(unittest.TestCase):
def testTruePositivesNullptr(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockFile('dir/baz.cc', ['std::unique_ptr<T>()']),
MockFile('dir/baz-p.cc', ['std::unique_ptr<T<P>>()']),
]
results = PRESUBMIT._CheckUniquePtr(mock_input_api, MockOutputApi())
self.assertEqual(1, len(results))
self.assertTrue('nullptr' in results[0].message)
self.assertEqual(2, len(results[0].items))
self.assertTrue('baz.cc' in results[0].items[0])
self.assertTrue('baz-p.cc' in results[0].items[1])
def testTruePositivesConstructor(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockFile('dir/foo.cc', ['return std::unique_ptr<T>(foo);']),
MockFile('dir/bar.mm', ['bar = std::unique_ptr<T>(foo)']),
MockFile('dir/mult.cc', [
'return',
' std::unique_ptr<T>(barVeryVeryLongFooSoThatItWouldNotFitAbove);'
]),
MockFile('dir/mult2.cc', [
'barVeryVeryLongLongBaaaaaarSoThatTheLineLimitIsAlmostReached =',
' std::unique_ptr<T>(foo);'
]),
MockFile('dir/mult3.cc', [
'bar = std::unique_ptr<T>(',
' fooVeryVeryVeryLongStillGoingWellThisWillTakeAWhileFinallyThere);'
]),
MockFile('dir/multi_arg.cc', [
'auto p = std::unique_ptr<std::pair<T, D>>(new std::pair(T, D));']),
]
results = PRESUBMIT._CheckUniquePtr(mock_input_api, MockOutputApi())
self.assertEqual(1, len(results))
self.assertTrue('std::make_unique' in results[0].message)
self.assertEqual(6, len(results[0].items))
self.assertTrue('foo.cc' in results[0].items[0])
self.assertTrue('bar.mm' in results[0].items[1])
self.assertTrue('mult.cc' in results[0].items[2])
self.assertTrue('mult2.cc' in results[0].items[3])
self.assertTrue('mult3.cc' in results[0].items[4])
self.assertTrue('multi_arg.cc' in results[0].items[5])
def testFalsePositives(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockFile('dir/foo.cc', ['return std::unique_ptr<T[]>(foo);']),
MockFile('dir/bar.mm', ['bar = std::unique_ptr<T[]>(foo)']),
MockFile('dir/file.cc', ['std::unique_ptr<T> p = Foo();']),
MockFile('dir/baz.cc', [
'std::unique_ptr<T> result = std::make_unique<T>();'
]),
MockFile('dir/baz2.cc', [
'std::unique_ptr<T> result = std::make_unique<T>('
]),
MockFile('dir/nested.cc', ['set<std::unique_ptr<T>>();']),
MockFile('dir/nested2.cc', ['map<U, std::unique_ptr<T>>();']),
# Two-argument invocation of std::unique_ptr is exempt because there is
# no equivalent using std::make_unique.
MockFile('dir/multi_arg.cc', [
'auto p = std::unique_ptr<T, D>(new T(), D());']),
]
results = PRESUBMIT._CheckUniquePtr(mock_input_api, MockOutputApi())
self.assertEqual(0, len(results))
class CheckNoDirectIncludesHeadersWhichRedefineStrCat(unittest.TestCase):
def testBlocksDirectIncludes(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockFile('dir/foo_win.cc', ['#include "shlwapi.h"']),
MockFile('dir/bar.h', ['#include <propvarutil.h>']),
MockFile('dir/baz.h', ['#include <atlbase.h>']),
MockFile('dir/jumbo.h', ['#include "sphelper.h"']),
]
results = PRESUBMIT._CheckNoStrCatRedefines(mock_input_api, MockOutputApi())
self.assertEquals(1, len(results))
self.assertEquals(4, len(results[0].items))
self.assertTrue('StrCat' in results[0].message)
self.assertTrue('foo_win.cc' in results[0].items[0])
self.assertTrue('bar.h' in results[0].items[1])
self.assertTrue('baz.h' in results[0].items[2])
self.assertTrue('jumbo.h' in results[0].items[3])
def testAllowsToIncludeWrapper(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockFile('dir/baz_win.cc', ['#include "base/win/shlwapi.h"']),
MockFile('dir/baz-win.h', ['#include "base/win/atl.h"']),
]
results = PRESUBMIT._CheckNoStrCatRedefines(mock_input_api, MockOutputApi())
self.assertEquals(0, len(results))
def testAllowsToCreateWrapper(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockFile('base/win/shlwapi.h', [
'#include <shlwapi.h>',
'#include "base/win/windows_defines.inc"']),
]
results = PRESUBMIT._CheckNoStrCatRedefines(mock_input_api, MockOutputApi())
self.assertEquals(0, len(results))
class TranslationScreenshotsTest(unittest.TestCase):
# An empty grd file.
OLD_GRD_CONTENTS = """<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="1" current_release="1">
<release seq="1">
<messages></messages>
</release>
</grit>
""".splitlines()
# A grd file with a single message.
NEW_GRD_CONTENTS1 = """<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="1" current_release="1">
<release seq="1">
<messages>
<message name="IDS_TEST1">
Test string 1
</message>
</messages>
</release>
</grit>
""".splitlines()
# A grd file with two messages.
NEW_GRD_CONTENTS2 = """<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="1" current_release="1">
<release seq="1">
<messages>
<message name="IDS_TEST1">
Test string 1
</message>
<message name="IDS_TEST2">
Test string 2
</message>
</messages>
</release>
</grit>
""".splitlines()
OLD_GRDP_CONTENTS = (
'<?xml version="1.0" encoding="utf-8"?>',
'<grit-part>',
'</grit-part>'
)
NEW_GRDP_CONTENTS1 = (
'<?xml version="1.0" encoding="utf-8"?>',
'<grit-part>',
'<message name="IDS_PART_TEST1">',
'Part string 1',
'</message>',
'</grit-part>')
NEW_GRDP_CONTENTS2 = (
'<?xml version="1.0" encoding="utf-8"?>',
'<grit-part>',
'<message name="IDS_PART_TEST1">',
'Part string 1',
'</message>',
'<message name="IDS_PART_TEST2">',
'Part string 2',
'</message>',
'</grit-part>')
DO_NOT_UPLOAD_PNG_MESSAGE = ('Do not include actual screenshots in the '
'changelist. Run '
'tools/translate/upload_screenshots.py to '
'upload them instead:')
GENERATE_SIGNATURES_MESSAGE = ('You are adding or modifying UI strings.\n'
'To ensure the best translations, take '
'screenshots of the relevant UI '
'(https://g.co/chrome/translation) and add '
'these files to your changelist:')
REMOVE_SIGNATURES_MESSAGE = ('You removed strings associated with these '
'files. Remove:')
def makeInputApi(self, files):
input_api = MockInputApi()
input_api.files = files
# Override os_path.exists because the presubmit uses the actual
# os.path.exists.
input_api.CreateMockFileInPath(
[x.LocalPath() for x in input_api.AffectedFiles(include_deletes=True)])
return input_api
""" CL modified and added messages, but didn't add any screenshots."""
def testNoScreenshots(self):
# No new strings (file contents same). Should not warn.
input_api = self.makeInputApi([
MockAffectedFile('test.grd', self.NEW_GRD_CONTENTS1,
self.NEW_GRD_CONTENTS1, action='M'),
MockAffectedFile('part.grdp', self.NEW_GRDP_CONTENTS1,
self.NEW_GRDP_CONTENTS1, action='M')])
warnings = PRESUBMIT._CheckTranslationScreenshots(input_api,
MockOutputApi())
self.assertEqual(0, len(warnings))
# Add two new strings. Should have two warnings.
input_api = self.makeInputApi([
MockAffectedFile('test.grd', self.NEW_GRD_CONTENTS2,
self.NEW_GRD_CONTENTS1, action='M'),
MockAffectedFile('part.grdp', self.NEW_GRDP_CONTENTS2,
self.NEW_GRDP_CONTENTS1, action='M')])
warnings = PRESUBMIT._CheckTranslationScreenshots(input_api,
MockOutputApi())
self.assertEqual(1, len(warnings))
self.assertEqual(self.GENERATE_SIGNATURES_MESSAGE, warnings[0].message)
self.assertEqual([
os.path.join('part_grdp', 'IDS_PART_TEST2.png.sha1'),
os.path.join('test_grd', 'IDS_TEST2.png.sha1')],
warnings[0].items)
# Add four new strings. Should have four warnings.
input_api = self.makeInputApi([
MockAffectedFile('test.grd', self.NEW_GRD_CONTENTS2,
self.OLD_GRD_CONTENTS, action='M'),
MockAffectedFile('part.grdp', self.NEW_GRDP_CONTENTS2,
self.OLD_GRDP_CONTENTS, action='M')])
warnings = PRESUBMIT._CheckTranslationScreenshots(input_api,
MockOutputApi())
self.assertEqual(1, len(warnings))
self.assertEqual(self.GENERATE_SIGNATURES_MESSAGE, warnings[0].message)
self.assertEqual([
os.path.join('part_grdp', 'IDS_PART_TEST1.png.sha1'),
os.path.join('part_grdp', 'IDS_PART_TEST2.png.sha1'),
os.path.join('test_grd', 'IDS_TEST1.png.sha1'),
os.path.join('test_grd', 'IDS_TEST2.png.sha1'),
], warnings[0].items)
def testPngAddedSha1NotAdded(self):
# CL added one new message in a grd file and added the png file associated
# with it, but did not add the corresponding sha1 file. This should warn
# twice:
# - Once for the added png file (because we don't want developers to upload
# actual images)
# - Once for the missing .sha1 file
input_api = self.makeInputApi([
MockAffectedFile(
'test.grd',
self.NEW_GRD_CONTENTS1,
self.OLD_GRD_CONTENTS,
action='M'),
MockAffectedFile(
os.path.join('test_grd', 'IDS_TEST1.png'), 'binary', action='A')
])
warnings = PRESUBMIT._CheckTranslationScreenshots(input_api,
MockOutputApi())
self.assertEqual(2, len(warnings))
self.assertEqual(self.DO_NOT_UPLOAD_PNG_MESSAGE, warnings[0].message)
self.assertEqual([os.path.join('test_grd', 'IDS_TEST1.png')],
warnings[0].items)
self.assertEqual(self.GENERATE_SIGNATURES_MESSAGE, warnings[1].message)
self.assertEqual([os.path.join('test_grd', 'IDS_TEST1.png.sha1')],
warnings[1].items)
# CL added two messages (one in grd, one in grdp) and added the png files
# associated with the messages, but did not add the corresponding sha1
# files. This should warn twice:
# - Once for the added png files (because we don't want developers to upload
# actual images)
# - Once for the missing .sha1 files
input_api = self.makeInputApi([
# Modified files:
MockAffectedFile(
'test.grd',
self.NEW_GRD_CONTENTS1,
self.OLD_GRD_CONTENTS,
action='M'),
MockAffectedFile(
'part.grdp',
self.NEW_GRDP_CONTENTS1,
self.OLD_GRDP_CONTENTS,
action='M'),
# Added files:
MockAffectedFile(
os.path.join('test_grd', 'IDS_TEST1.png'), 'binary', action='A'),
MockAffectedFile(
os.path.join('part_grdp', 'IDS_PART_TEST1.png'), 'binary',
action='A')
])
warnings = PRESUBMIT._CheckTranslationScreenshots(input_api,
MockOutputApi())
self.assertEqual(2, len(warnings))
self.assertEqual(self.DO_NOT_UPLOAD_PNG_MESSAGE, warnings[0].message)
self.assertEqual([os.path.join('part_grdp', 'IDS_PART_TEST1.png'),
os.path.join('test_grd', 'IDS_TEST1.png')],
warnings[0].items)
self.assertEqual(self.GENERATE_SIGNATURES_MESSAGE, warnings[1].message)
self.assertEqual([os.path.join('part_grdp', 'IDS_PART_TEST1.png.sha1'),
os.path.join('test_grd', 'IDS_TEST1.png.sha1')],
warnings[1].items)
def testScreenshotsWithSha1(self):
# CL added four messages (two each in a grd and grdp) and their
# corresponding .sha1 files. No warnings.
input_api = self.makeInputApi([
# Modified files:
MockAffectedFile(
'test.grd',
self.NEW_GRD_CONTENTS2,
self.OLD_GRD_CONTENTS,
action='M'),
MockAffectedFile(
'part.grdp',
self.NEW_GRDP_CONTENTS2,
self.OLD_GRDP_CONTENTS,
action='M'),
# Added files:
MockFile(
os.path.join('test_grd', 'IDS_TEST1.png.sha1'),
'binary',
action='A'),
MockFile(
os.path.join('test_grd', 'IDS_TEST2.png.sha1'),
'binary',
action='A'),
MockFile(
os.path.join('part_grdp', 'IDS_PART_TEST1.png.sha1'),
'binary',
action='A'),
MockFile(
os.path.join('part_grdp', 'IDS_PART_TEST2.png.sha1'),
'binary',
action='A'),
])
warnings = PRESUBMIT._CheckTranslationScreenshots(input_api,
MockOutputApi())
self.assertEqual([], warnings)
def testScreenshotsRemovedWithSha1(self):
# Replace new contents with old contents in grd and grp files, removing
# IDS_TEST1, IDS_TEST2, IDS_PART_TEST1 and IDS_PART_TEST2.
# Should warn to remove the sha1 files associated with these strings.
input_api = self.makeInputApi([
# Modified files:
MockAffectedFile(
'test.grd',
self.OLD_GRD_CONTENTS, # new_contents
self.NEW_GRD_CONTENTS2, # old_contents
action='M'),
MockAffectedFile(
'part.grdp',
self.OLD_GRDP_CONTENTS, # new_contents
self.NEW_GRDP_CONTENTS2, # old_contents
action='M'),
# Unmodified files:
MockFile(os.path.join('test_grd', 'IDS_TEST1.png.sha1'), 'binary', ''),
MockFile(os.path.join('test_grd', 'IDS_TEST2.png.sha1'), 'binary', ''),
MockFile(os.path.join('part_grdp', 'IDS_PART_TEST1.png.sha1'),
'binary', ''),
MockFile(os.path.join('part_grdp', 'IDS_PART_TEST2.png.sha1'),
'binary', '')
])
warnings = PRESUBMIT._CheckTranslationScreenshots(input_api,
MockOutputApi())
self.assertEqual(1, len(warnings))
self.assertEqual(self.REMOVE_SIGNATURES_MESSAGE, warnings[0].message)
self.assertEqual([
os.path.join('part_grdp', 'IDS_PART_TEST1.png.sha1'),
os.path.join('part_grdp', 'IDS_PART_TEST2.png.sha1'),
os.path.join('test_grd', 'IDS_TEST1.png.sha1'),
os.path.join('test_grd', 'IDS_TEST2.png.sha1')
], warnings[0].items)
# Same as above, but this time one of the .sha1 files is also removed.
input_api = self.makeInputApi([
# Modified files:
MockAffectedFile(
'test.grd',
self.OLD_GRD_CONTENTS, # new_contents
self.NEW_GRD_CONTENTS2, # old_contents
action='M'),
MockAffectedFile(
'part.grdp',
self.OLD_GRDP_CONTENTS, # new_contents
self.NEW_GRDP_CONTENTS2, # old_contents
action='M'),
# Unmodified files:
MockFile(os.path.join('test_grd', 'IDS_TEST1.png.sha1'), 'binary', ''),
MockFile(os.path.join('part_grdp', 'IDS_PART_TEST1.png.sha1'),
'binary', ''),
# Deleted files:
MockAffectedFile(
os.path.join('test_grd', 'IDS_TEST2.png.sha1'),
'',
'old_contents',
action='D'),
MockAffectedFile(
os.path.join('part_grdp', 'IDS_PART_TEST2.png.sha1'),
'',
'old_contents',
action='D')
])
warnings = PRESUBMIT._CheckTranslationScreenshots(input_api,
MockOutputApi())
self.assertEqual(1, len(warnings))
self.assertEqual(self.REMOVE_SIGNATURES_MESSAGE, warnings[0].message)
self.assertEqual([os.path.join('part_grdp', 'IDS_PART_TEST1.png.sha1'),
os.path.join('test_grd', 'IDS_TEST1.png.sha1')
], warnings[0].items)
# Remove all sha1 files. There should be no warnings.
input_api = self.makeInputApi([
# Modified files:
MockAffectedFile(
'test.grd',
self.OLD_GRD_CONTENTS,
self.NEW_GRD_CONTENTS2,
action='M'),
MockAffectedFile(
'part.grdp',
self.OLD_GRDP_CONTENTS,
self.NEW_GRDP_CONTENTS2,
action='M'),
# Deleted files:
MockFile(
os.path.join('test_grd', 'IDS_TEST1.png.sha1'),
'binary',
action='D'),
MockFile(
os.path.join('test_grd', 'IDS_TEST2.png.sha1'),
'binary',
action='D'),
MockFile(
os.path.join('part_grdp', 'IDS_PART_TEST1.png.sha1'),
'binary',
action='D'),
MockFile(
os.path.join('part_grdp', 'IDS_PART_TEST2.png.sha1'),
'binary',
action='D')
])
warnings = PRESUBMIT._CheckTranslationScreenshots(input_api,
MockOutputApi())
self.assertEqual([], warnings)
class DISABLETypoInTest(unittest.TestCase):
def testPositive(self):
# Verify the typo "DISABLE_" instead of "DISABLED_" in various contexts
# where the desire is to disable a test.
tests = [
# Disabled on one platform:
'#if defined(OS_WIN)\n'
'#define MAYBE_FoobarTest DISABLE_FoobarTest\n'
'#else\n'
'#define MAYBE_FoobarTest FoobarTest\n'
'#endif\n',
# Disabled on one platform spread cross lines:
'#if defined(OS_WIN)\n'
'#define MAYBE_FoobarTest \\\n'
' DISABLE_FoobarTest\n'
'#else\n'
'#define MAYBE_FoobarTest FoobarTest\n'
'#endif\n',
# Disabled on all platforms:
' TEST_F(FoobarTest, DISABLE_Foo)\n{\n}',
# Disabled on all platforms but multiple lines
' TEST_F(FoobarTest,\n DISABLE_foo){\n}\n',
]
for test in tests:
mock_input_api = MockInputApi()
mock_input_api.files = [
MockFile('some/path/foo_unittest.cc', test.splitlines()),
]
results = PRESUBMIT._CheckNoDISABLETypoInTests(mock_input_api,
MockOutputApi())
self.assertEqual(
1,
len(results),
msg=('expected len(results) == 1 but got %d in test: %s' %
(len(results), test)))
self.assertTrue(
'foo_unittest.cc' in results[0].message,
msg=('expected foo_unittest.cc in message but got %s in test %s' %
(results[0].message, test)))
def testIngoreNotTestFiles(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockFile('some/path/foo.cc', 'TEST_F(FoobarTest, DISABLE_Foo)'),
]
results = PRESUBMIT._CheckNoDISABLETypoInTests(mock_input_api,
MockOutputApi())
self.assertEqual(0, len(results))
def testIngoreDeletedFiles(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockFile('some/path/foo.cc', 'TEST_F(FoobarTest, Foo)', action='D'),
]
results = PRESUBMIT._CheckNoDISABLETypoInTests(mock_input_api,
MockOutputApi())
self.assertEqual(0, len(results))
class BuildtoolsRevisionsAreInSyncTest(unittest.TestCase):
# TODO(crbug.com/941824): We need to make sure the entries in
# //buildtools/DEPS are kept in sync with the entries in //DEPS
# so that users of //buildtools in other projects get the same tooling
# Chromium gets. If we ever fix the referenced bug and add 'includedeps'
# support to gclient, we can eliminate the duplication and delete
# these tests for the corresponding presubmit check.
def _check(self, files):
mock_input_api = MockInputApi()
mock_input_api.files = []
for fname, contents in files.items():
mock_input_api.files.append(MockFile(fname, contents.splitlines()))
return PRESUBMIT._CheckBuildtoolsRevisionsAreInSync(mock_input_api,
MockOutputApi())
def testOneFileChangedButNotTheOther(self):
results = self._check({
"DEPS": "'libunwind_revision': 'onerev'",
})
self.assertNotEqual(results, [])
def testNeitherFileChanged(self):
results = self._check({
"OWNERS": "[email protected]",
})
self.assertEqual(results, [])
def testBothFilesChangedAndMatch(self):
results = self._check({
"DEPS": "'libunwind_revision': 'onerev'",
"buildtools/DEPS": "'libunwind_revision': 'onerev'",
})
self.assertEqual(results, [])
def testBothFilesWereChangedAndDontMatch(self):
results = self._check({
"DEPS": "'libunwind_revision': 'onerev'",
"buildtools/DEPS": "'libunwind_revision': 'anotherrev'",
})
self.assertNotEqual(results, [])
class CheckFuzzTargetsTest(unittest.TestCase):
def _check(self, files):
mock_input_api = MockInputApi()
mock_input_api.files = []
for fname, contents in files.items():
mock_input_api.files.append(MockFile(fname, contents.splitlines()))
return PRESUBMIT._CheckFuzzTargets(mock_input_api, MockOutputApi())
def testLibFuzzerSourcesIgnored(self):
results = self._check({
"third_party/lib/Fuzzer/FuzzerDriver.cpp": "LLVMFuzzerInitialize",
})
self.assertEqual(results, [])
def testNonCodeFilesIgnored(self):
results = self._check({
"README.md": "LLVMFuzzerInitialize",
})
self.assertEqual(results, [])
def testNoErrorHeaderPresent(self):
results = self._check({
"fuzzer.cc": (
"#include \"testing/libfuzzer/libfuzzer_exports.h\"\n" +
"LLVMFuzzerInitialize"
)
})
self.assertEqual(results, [])
def testErrorMissingHeader(self):
results = self._check({
"fuzzer.cc": "LLVMFuzzerInitialize"
})
self.assertEqual(len(results), 1)
self.assertEqual(results[0].items, ['fuzzer.cc'])
class SetNoParentTest(unittest.TestCase):
def testSetNoParentMissing(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockAffectedFile('goat/OWNERS',
[
'set noparent',
'[email protected]',
'per-file *.json=set noparent',
'per-file *[email protected]',
])
]
mock_output_api = MockOutputApi()
errors = PRESUBMIT._CheckSetNoParent(mock_input_api, mock_output_api)
self.assertEqual(1, len(errors))
self.assertTrue('goat/OWNERS:1' in errors[0].long_text)
self.assertTrue('goat/OWNERS:3' in errors[0].long_text)
def testSetNoParentWithCorrectRule(self):
mock_input_api = MockInputApi()
mock_input_api.files = [
MockAffectedFile('goat/OWNERS',
[
'set noparent',
'file://ipc/SECURITY_OWNERS',
'per-file *.json=set noparent',
'per-file *.json=file://ipc/SECURITY_OWNERS',
])
]
mock_output_api = MockOutputApi()
errors = PRESUBMIT._CheckSetNoParent(mock_input_api, mock_output_api)
self.assertEqual([], errors)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | 2,874,766,208,941,343,000 | 36.264867 | 80 | 0.585563 | false |
FluidityProject/fluidity | tests/mms_tracer_P1dg_cdg_diff_steady_3d_cjc/cdg3d.py | 1 | 1175 | import os
from fluidity_tools import stat_parser
from sympy import *
from numpy import array,max,abs
meshtemplate='''
Point(1) = {0.0,0.0,0,0.1};
Extrude {1,0,0} {
Point{1}; Layers{<layers>};
}
Extrude {0,1,0} {
Line{1}; Layers{<layers>};
}
Extrude {0,0,1} {
Surface{5}; Layers{<layers>};
}
Physical Surface(28) = {5,14,26,22,27,18};
Physical Volume(29) = {1};
'''
def generate_meshfile(name,layers):
geo = meshtemplate.replace('<layers>',str(layers))
open(name+".geo",'w').write(geo)
os.system("gmsh -3 "+name+".geo")
def run_test(layers, binary):
'''run_test(layers, binary)
Run a single test of the channel problem. Layers is the number of mesh
points in the cross-channel direction. The mesh is unstructured and
isotropic. binary is a string containing the fluidity command to run.
The return value is the error in u and p at the end of the simulation.'''
generate_meshfile("channel",layers)
os.system(binary+" channel_viscous.flml")
s=stat_parser("channel-flow-dg.stat")
return (s["Water"]['AnalyticUVelocitySolutionError']['l2norm'][-1],
s["Water"]['AnalyticPressureSolutionError']['l2norm'][-1])
| lgpl-2.1 | 7,518,357,913,638,427,000 | 26.325581 | 77 | 0.669787 | false |
tamland/trakt-sync | xbmc_library.py | 1 | 4438 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2015 Thomas Amland
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import logging
import json
import pykka
from models import Movie, Episode
logger = logging.getLogger(__name__)
class XBMCLibrary(pykka.ThreadingActor):
_movie_properties = ['title', 'year', 'imdbnumber', 'playcount']
def __init__(self):
pykka.ThreadingActor.__init__(self)
def movie(self, movieid):
params = {
'movieid': movieid,
'properties': self._movie_properties
}
response = jsonrpc('VideoLibrary.GetMovieDetails', params)
movie = response['result']['moviedetails']
return _load_movie(movie)
def episode(self, episodeid):
params = {
'episodeid': episodeid,
'properties': ['season', 'episode', 'playcount', 'tvshowid'],
}
episode = jsonrpc('VideoLibrary.GetEpisodeDetails', params)['result']['episodedetails']
params = {'tvshowid': episode['tvshowid'], 'properties': ['imdbnumber']}
tvshow = jsonrpc('VideoLibrary.GetTVShowDetails', params)['result']['tvshowdetails']
return _load_episode(episode, tvshow['imdbnumber'])
def movies(self):
params = {'properties': self._movie_properties}
response = jsonrpc('VideoLibrary.GetMovies', params)
movies = response['result'].get('movies', [])
movies = map(_load_movie, movies)
return [m for m in movies if m is not None]
def episodes(self):
params = {'properties': ['imdbnumber']}
tvshows = jsonrpc('VideoLibrary.GetTVShows', params)['result']\
.get('tvshows', [])
ret = []
for tvshow in tvshows:
params = {
'tvshowid': tvshow['tvshowid'],
'properties': ['season', 'episode', 'playcount', 'lastplayed']
}
episodes = jsonrpc('VideoLibrary.GetEpisodes', params)['result']\
.get('episodes', [])
episodes = [_load_episode(ep, tvshow['imdbnumber']) for ep in episodes]
ret.extend(episodes)
return ret
def update_movie_details(self, movie):
if not movie.xbmcid or movie.playcount <= 0:
return False
params = {'movieid': movie.xbmcid, 'playcount': movie.playcount}
r = jsonrpc('VideoLibrary.SetMovieDetails', params)
return r.get('result') == 'OK'
def update_episode_details(self, item):
if not item.xbmcid or item.playcount <= 0:
return False
params = {'episodeid': item.xbmcid, 'playcount': item.playcount}
r = jsonrpc('VideoLibrary.SetEpisodeDetails', params)
return r.get('result') == 'OK'
def _load_movie(r):
return Movie(
title=r['title'],
year=r['year'],
imdbid=r['imdbnumber'],
xbmcid=r['movieid'],
playcount=r['playcount'],
)
def _load_episode(r, tvshowid):
return Episode(
tvdbid=tvshowid,
season=r['season'],
episode=r['episode'],
xbmcid=r['episodeid'],
playcount=r['playcount'],
)
def jsonrpc(method, params=None):
if params is None:
params = {}
payload = {
'jsonrpc': '2.0',
'id': 1,
'method': method,
'params': params,
}
payload = json.dumps(payload, encoding='utf-8')
try:
import xbmc
except:
import requests
response = requests.post(
"http://localhost:8081/jsonrpc",
data=payload,
headers={'content-type': 'application/json'}).json()
else:
response = json.loads(xbmc.executeJSONRPC(payload), encoding='utf-8')
if 'error' in response:
logger.error("jsonrpc error: %r" % response)
return None
return response
| gpl-3.0 | 6,390,157,565,330,041,000 | 31.874074 | 95 | 0.605228 | false |
dgouldin/invisible-ink | invisible_ink.py | 1 | 2998 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
import uuid
_INVISIBLE_CHARS = (
'\u200b',
'\u200c',
'\u200d',
'\ufeff',
)
_INVISIBLE_MAP = dict(zip(
'0123456789abcdef',
(''.join((i, j)) for i in _INVISIBLE_CHARS for j in _INVISIBLE_CHARS),
))
_INVISIBLE_REVERSE_MAP = {v: k for k, v in _INVISIBLE_MAP.iteritems()}
def uuid_to_watermark(watermark_uuid):
"Returns the watermark unicode string for a given uuid"
return ''.join(_INVISIBLE_MAP[c] for c in watermark_uuid.get_hex())
_WATERMARK_LENGTH = len(uuid_to_watermark(uuid.uuid4()))
_WATERMARK_RE = re.compile(r'[{}]{{{}}}'.format(
''.join(_INVISIBLE_CHARS),
_WATERMARK_LENGTH,
))
def watermark_to_uuid(watermark):
"Returns the uuid for a given watermark string"
if len(watermark) != _WATERMARK_LENGTH:
raise ValueError('Watermark must be {} characters'.format(
_WATERMARK_LENGTH))
try:
watermark_hex = ''.join(
_INVISIBLE_REVERSE_MAP[k]
for k in map(''.join, zip(*[iter(watermark)] * 2))
)
except KeyError:
raise ValueError('Watermark contains invalid characters')
return uuid.UUID(hex=watermark_hex)
def find_all_watermark_uuids(encoded_text):
return map(watermark_to_uuid, _WATERMARK_RE.findall(encoded_text))
def encode_watermark(text, watermark_uuid=None, prepend=False):
"""Encodes the given text with a watermark string generated from the given
uuid. Optionally appends or prepends the watermark string.
Returns a 2-tuple (encoded_text, watermark_uuid)
"""
if not isinstance(text, unicode):
raise ValueError('text must be a unicode string')
watermark_uuid = watermark_uuid or uuid.uuid4()
watermark = uuid_to_watermark(watermark_uuid)
if prepend:
encoded_text = ''.join((watermark, text))
else:
encoded_text = ''.join((text, watermark))
return encoded_text, watermark_uuid
def decode_watermark(encoded_text):
"""Decodes the given text, separating out the original text and the
watermark uuid.
Returns a 2-tuple (text, watermark_uuid). If no watermark is detected, text
is the original text and watermark_uuid is None.
"""
if not isinstance(encoded_text, unicode):
raise ValueError('encoded_text must be a unicode string')
if len(encoded_text) < _WATERMARK_LENGTH:
return encoded_text, None
# appended watermark
watermark = encoded_text[-_WATERMARK_LENGTH:]
text = encoded_text[:-_WATERMARK_LENGTH]
try:
watermark_uuid = watermark_to_uuid(watermark)
except ValueError:
pass
else:
return text, watermark_uuid
# prepended watermark
watermark = encoded_text[:_WATERMARK_LENGTH]
text = encoded_text[_WATERMARK_LENGTH:]
try:
watermark_uuid = watermark_to_uuid(watermark)
except ValueError:
pass
else:
return text, watermark_uuid
return encoded_text, None
| mit | 6,584,132,110,583,274,000 | 27.552381 | 79 | 0.657772 | false |
samdowd/drumm-farm | drumm_env/lib/python2.7/site-packages/storages/backends/s3boto.py | 1 | 20374 | import os
import posixpath
import mimetypes
from datetime import datetime
from gzip import GzipFile
from tempfile import SpooledTemporaryFile
from django.core.files.base import File
from django.core.files.storage import Storage
from django.core.exceptions import ImproperlyConfigured, SuspiciousOperation
from django.utils.deconstruct import deconstructible
from django.utils.encoding import force_text, smart_str, filepath_to_uri, force_bytes
from django.utils.six import BytesIO
from django.utils.six.moves.urllib import parse as urlparse
try:
from boto import __version__ as boto_version
from boto.s3.connection import S3Connection, SubdomainCallingFormat
from boto.exception import S3ResponseError
from boto.s3.key import Key as S3Key
from boto.utils import parse_ts, ISO8601
except ImportError:
raise ImproperlyConfigured("Could not load Boto's S3 bindings.\n"
"See https://github.com/boto/boto")
from storages.utils import setting
boto_version_info = tuple([int(i) for i in boto_version.split('-')[0].split('.')])
if boto_version_info[:2] < (2, 32):
raise ImproperlyConfigured("The installed Boto library must be 2.32 or "
"higher.\nSee https://github.com/boto/boto")
def safe_join(base, *paths):
"""
A version of django.utils._os.safe_join for S3 paths.
Joins one or more path components to the base path component
intelligently. Returns a normalized version of the final path.
The final path must be located inside of the base path component
(otherwise a ValueError is raised).
Paths outside the base path indicate a possible security
sensitive operation.
"""
base_path = force_text(base)
base_path = base_path.rstrip('/')
paths = [force_text(p) for p in paths]
final_path = base_path
for path in paths:
final_path = urlparse.urljoin(final_path.rstrip('/') + "/", path)
# Ensure final_path starts with base_path and that the next character after
# the final path is '/' (or nothing, in which case final_path must be
# equal to base_path).
base_path_len = len(base_path)
if (not final_path.startswith(base_path) or
final_path[base_path_len:base_path_len + 1] not in ('', '/')):
raise ValueError('the joined path is located outside of the base path'
' component')
return final_path.lstrip('/')
@deconstructible
class S3BotoStorageFile(File):
"""
The default file object used by the S3BotoStorage backend.
This file implements file streaming using boto's multipart
uploading functionality. The file can be opened in read or
write mode.
This class extends Django's File class. However, the contained
data is only the data contained in the current buffer. So you
should not access the contained file object directly. You should
access the data via this class.
Warning: This file *must* be closed using the close() method in
order to properly write the file to S3. Be sure to close the file
in your application.
"""
# TODO: Read/Write (rw) mode may be a bit undefined at the moment. Needs testing.
# TODO: When Django drops support for Python 2.5, rewrite to use the
# BufferedIO streams in the Python 2.6 io module.
buffer_size = setting('AWS_S3_FILE_BUFFER_SIZE', 5242880)
def __init__(self, name, mode, storage, buffer_size=None):
self._storage = storage
self.name = name[len(self._storage.location):].lstrip('/')
self._mode = mode
self.key = storage.bucket.get_key(self._storage._encode_name(name))
if not self.key and 'w' in mode:
self.key = storage.bucket.new_key(storage._encode_name(name))
self._is_dirty = False
self._file = None
self._multipart = None
# 5 MB is the minimum part size (if there is more than one part).
# Amazon allows up to 10,000 parts. The default supports uploads
# up to roughly 50 GB. Increase the part size to accommodate
# for files larger than this.
if buffer_size is not None:
self.buffer_size = buffer_size
self._write_counter = 0
@property
def size(self):
return self.key.size
def _get_file(self):
if self._file is None:
self._file = SpooledTemporaryFile(
max_size=self._storage.max_memory_size,
suffix=".S3BotoStorageFile",
dir=setting("FILE_UPLOAD_TEMP_DIR", None)
)
if 'r' in self._mode:
self._is_dirty = False
self.key.get_contents_to_file(self._file)
self._file.seek(0)
if self._storage.gzip and self.key.content_encoding == 'gzip':
self._file = GzipFile(mode=self._mode, fileobj=self._file)
return self._file
def _set_file(self, value):
self._file = value
file = property(_get_file, _set_file)
def read(self, *args, **kwargs):
if 'r' not in self._mode:
raise AttributeError("File was not opened in read mode.")
return super(S3BotoStorageFile, self).read(*args, **kwargs)
def write(self, content, *args, **kwargs):
if 'w' not in self._mode:
raise AttributeError("File was not opened in write mode.")
self._is_dirty = True
if self._multipart is None:
provider = self.key.bucket.connection.provider
upload_headers = {
provider.acl_header: self._storage.default_acl
}
upload_headers.update({'Content-Type': mimetypes.guess_type(self.key.name)[0] or self._storage.key_class.DefaultContentType})
upload_headers.update(self._storage.headers)
self._multipart = self._storage.bucket.initiate_multipart_upload(
self.key.name,
headers=upload_headers,
reduced_redundancy=self._storage.reduced_redundancy,
encrypt_key=self._storage.encryption,
)
if self.buffer_size <= self._buffer_file_size:
self._flush_write_buffer()
return super(S3BotoStorageFile, self).write(force_bytes(content), *args, **kwargs)
@property
def _buffer_file_size(self):
pos = self.file.tell()
self.file.seek(0, os.SEEK_END)
length = self.file.tell()
self.file.seek(pos)
return length
def _flush_write_buffer(self):
"""
Flushes the write buffer.
"""
if self._buffer_file_size:
self._write_counter += 1
self.file.seek(0)
headers = self._storage.headers.copy()
self._multipart.upload_part_from_file(
self.file, self._write_counter, headers=headers)
def close(self):
if self._is_dirty:
self._flush_write_buffer()
self._multipart.complete_upload()
else:
if not self._multipart is None:
self._multipart.cancel_upload()
self.key.close()
if self._file is not None:
self._file.close()
self._file = None
@deconstructible
class S3BotoStorage(Storage):
"""
Amazon Simple Storage Service using Boto
This storage backend supports opening files in read or write
mode and supports streaming(buffering) data in chunks to S3
when writing.
"""
connection_class = S3Connection
connection_response_error = S3ResponseError
file_class = S3BotoStorageFile
key_class = S3Key
# used for looking up the access and secret key from env vars
access_key_names = ['AWS_S3_ACCESS_KEY_ID', 'AWS_ACCESS_KEY_ID']
secret_key_names = ['AWS_S3_SECRET_ACCESS_KEY', 'AWS_SECRET_ACCESS_KEY']
access_key = setting('AWS_S3_ACCESS_KEY_ID', setting('AWS_ACCESS_KEY_ID'))
secret_key = setting('AWS_S3_SECRET_ACCESS_KEY', setting('AWS_SECRET_ACCESS_KEY'))
file_overwrite = setting('AWS_S3_FILE_OVERWRITE', True)
headers = setting('AWS_HEADERS', {})
bucket_name = setting('AWS_STORAGE_BUCKET_NAME')
auto_create_bucket = setting('AWS_AUTO_CREATE_BUCKET', False)
default_acl = setting('AWS_DEFAULT_ACL', 'public-read')
bucket_acl = setting('AWS_BUCKET_ACL', default_acl)
querystring_auth = setting('AWS_QUERYSTRING_AUTH', True)
querystring_expire = setting('AWS_QUERYSTRING_EXPIRE', 3600)
reduced_redundancy = setting('AWS_REDUCED_REDUNDANCY', False)
location = setting('AWS_LOCATION', '')
encryption = setting('AWS_S3_ENCRYPTION', False)
custom_domain = setting('AWS_S3_CUSTOM_DOMAIN')
calling_format = setting('AWS_S3_CALLING_FORMAT', SubdomainCallingFormat())
secure_urls = setting('AWS_S3_SECURE_URLS', True)
file_name_charset = setting('AWS_S3_FILE_NAME_CHARSET', 'utf-8')
gzip = setting('AWS_IS_GZIPPED', False)
preload_metadata = setting('AWS_PRELOAD_METADATA', False)
gzip_content_types = setting('GZIP_CONTENT_TYPES', (
'text/css',
'text/javascript',
'application/javascript',
'application/x-javascript',
'image/svg+xml',
))
url_protocol = setting('AWS_S3_URL_PROTOCOL', 'http:')
host = setting('AWS_S3_HOST', S3Connection.DefaultHost)
use_ssl = setting('AWS_S3_USE_SSL', True)
port = setting('AWS_S3_PORT', None)
proxy = setting('AWS_S3_PROXY_HOST', None)
proxy_port = setting('AWS_S3_PROXY_PORT', None)
# The max amount of memory a returned file can take up before being
# rolled over into a temporary file on disk. Default is 0: Do not roll over.
max_memory_size = setting('AWS_S3_MAX_MEMORY_SIZE', 0)
def __init__(self, acl=None, bucket=None, **settings):
# check if some of the settings we've provided as class attributes
# need to be overwritten with values passed in here
for name, value in settings.items():
if hasattr(self, name):
setattr(self, name, value)
# For backward-compatibility of old differing parameter names
if acl is not None:
self.default_acl = acl
if bucket is not None:
self.bucket_name = bucket
self.location = (self.location or '').lstrip('/')
# Backward-compatibility: given the anteriority of the SECURE_URL setting
# we fall back to https if specified in order to avoid the construction
# of unsecure urls.
if self.secure_urls:
self.url_protocol = 'https:'
self._entries = {}
self._bucket = None
self._connection = None
if not self.access_key and not self.secret_key:
self.access_key, self.secret_key = self._get_access_keys()
@property
def connection(self):
if self._connection is None:
self._connection = self.connection_class(
self.access_key,
self.secret_key,
is_secure=self.use_ssl,
calling_format=self.calling_format,
host=self.host,
port=self.port,
proxy=self.proxy,
proxy_port=self.proxy_port
)
return self._connection
@property
def bucket(self):
"""
Get the current bucket. If there is no current bucket object
create it.
"""
if self._bucket is None:
self._bucket = self._get_or_create_bucket(self.bucket_name)
return self._bucket
@property
def entries(self):
"""
Get the locally cached files for the bucket.
"""
if self.preload_metadata and not self._entries:
self._entries = dict((self._decode_name(entry.key), entry)
for entry in self.bucket.list(prefix=self.location))
return self._entries
def _get_access_keys(self):
"""
Gets the access keys to use when accessing S3. If none
are provided to the class in the constructor or in the
settings then get them from the environment variables.
"""
def lookup_env(names):
for name in names:
value = os.environ.get(name)
if value:
return value
access_key = self.access_key or lookup_env(self.access_key_names)
secret_key = self.secret_key or lookup_env(self.secret_key_names)
return access_key, secret_key
def _get_or_create_bucket(self, name):
"""
Retrieves a bucket if it exists, otherwise creates it.
"""
try:
return self.connection.get_bucket(name, validate=self.auto_create_bucket)
except self.connection_response_error:
if self.auto_create_bucket:
bucket = self.connection.create_bucket(name)
bucket.set_acl(self.bucket_acl)
return bucket
raise ImproperlyConfigured("Bucket %s does not exist. Buckets "
"can be automatically created by "
"setting AWS_AUTO_CREATE_BUCKET to "
"``True``." % name)
def _clean_name(self, name):
"""
Cleans the name so that Windows style paths work
"""
# Normalize Windows style paths
clean_name = posixpath.normpath(name).replace('\\', '/')
# os.path.normpath() can strip trailing slashes so we implement
# a workaround here.
if name.endswith('/') and not clean_name.endswith('/'):
# Add a trailing slash as it was stripped.
return clean_name + '/'
else:
return clean_name
def _normalize_name(self, name):
"""
Normalizes the name so that paths like /path/to/ignored/../something.txt
work. We check to make sure that the path pointed to is not outside
the directory specified by the LOCATION setting.
"""
try:
return safe_join(self.location, name)
except ValueError:
raise SuspiciousOperation("Attempted access to '%s' denied." %
name)
def _encode_name(self, name):
return smart_str(name, encoding=self.file_name_charset)
def _decode_name(self, name):
return force_text(name, encoding=self.file_name_charset)
def _compress_content(self, content):
"""Gzip a given string content."""
zbuf = BytesIO()
# The GZIP header has a modification time attribute (see http://www.zlib.org/rfc-gzip.html)
# This means each time a file is compressed it changes even if the other contents don't change
# For S3 this defeats detection of changes using MD5 sums on gzipped files
# Fixing the mtime at 0.0 at compression time avoids this problem
zfile = GzipFile(mode='wb', compresslevel=6, fileobj=zbuf, mtime=0.0)
try:
zfile.write(force_bytes(content.read()))
finally:
zfile.close()
zbuf.seek(0)
content.file = zbuf
content.seek(0)
return content
def _open(self, name, mode='rb'):
name = self._normalize_name(self._clean_name(name))
f = self.file_class(name, mode, self)
if not f.key:
raise IOError('File does not exist: %s' % name)
return f
def _save(self, name, content):
cleaned_name = self._clean_name(name)
name = self._normalize_name(cleaned_name)
headers = self.headers.copy()
_type, encoding = mimetypes.guess_type(name)
content_type = getattr(content, 'content_type',
_type or self.key_class.DefaultContentType)
# setting the content_type in the key object is not enough.
headers.update({'Content-Type': content_type})
if self.gzip and content_type in self.gzip_content_types:
content = self._compress_content(content)
headers.update({'Content-Encoding': 'gzip'})
elif encoding:
# If the content already has a particular encoding, set it
headers.update({'Content-Encoding': encoding})
content.name = cleaned_name
encoded_name = self._encode_name(name)
key = self.bucket.get_key(encoded_name)
if not key:
key = self.bucket.new_key(encoded_name)
if self.preload_metadata:
self._entries[encoded_name] = key
key.last_modified = datetime.utcnow().strftime(ISO8601)
key.set_metadata('Content-Type', content_type)
self._save_content(key, content, headers=headers)
return cleaned_name
def _save_content(self, key, content, headers):
# only pass backwards incompatible arguments if they vary from the default
kwargs = {}
if self.encryption:
kwargs['encrypt_key'] = self.encryption
key.set_contents_from_file(content, headers=headers,
policy=self.default_acl,
reduced_redundancy=self.reduced_redundancy,
rewind=True, **kwargs)
def delete(self, name):
name = self._normalize_name(self._clean_name(name))
self.bucket.delete_key(self._encode_name(name))
def exists(self, name):
if not name: # root element aka the bucket
try:
self.bucket
return True
except ImproperlyConfigured:
return False
name = self._normalize_name(self._clean_name(name))
if self.entries:
return name in self.entries
k = self.bucket.new_key(self._encode_name(name))
return k.exists()
def listdir(self, name):
name = self._normalize_name(self._clean_name(name))
# for the bucket.list and logic below name needs to end in /
# But for the root path "" we leave it as an empty string
if name and not name.endswith('/'):
name += '/'
dirlist = self.bucket.list(self._encode_name(name))
files = []
dirs = set()
base_parts = name.split("/")[:-1]
for item in dirlist:
parts = item.name.split("/")
parts = parts[len(base_parts):]
if len(parts) == 1:
# File
files.append(parts[0])
elif len(parts) > 1:
# Directory
dirs.add(parts[0])
return list(dirs), files
def size(self, name):
name = self._normalize_name(self._clean_name(name))
if self.entries:
entry = self.entries.get(name)
if entry:
return entry.size
return 0
return self.bucket.get_key(self._encode_name(name)).size
def modified_time(self, name):
name = self._normalize_name(self._clean_name(name))
entry = self.entries.get(name)
# only call self.bucket.get_key() if the key is not found
# in the preloaded metadata.
if entry is None:
entry = self.bucket.get_key(self._encode_name(name))
# Parse the last_modified string to a local datetime object.
return parse_ts(entry.last_modified)
def url(self, name, headers=None, response_headers=None, expire=None):
# Preserve the trailing slash after normalizing the path.
name = self._normalize_name(self._clean_name(name))
if self.custom_domain:
return "%s//%s/%s" % (self.url_protocol,
self.custom_domain, filepath_to_uri(name))
if expire is None:
expire = self.querystring_expire
return self.connection.generate_url(
expire,
method='GET',
bucket=self.bucket.name,
key=self._encode_name(name),
headers=headers,
query_auth=self.querystring_auth,
force_http=not self.secure_urls,
response_headers=response_headers,
)
def get_available_name(self, name, max_length=None):
""" Overwrite existing file with the same name. """
if self.file_overwrite:
name = self._clean_name(name)
return name
return super(S3BotoStorage, self).get_available_name(name, max_length)
| mit | 4,176,623,037,583,207,400 | 38.030651 | 137 | 0.605232 | false |
Smetterleen/Neopets-Python-API | neopapi/explore/world/island/TrainingSchool.py | 1 | 6309 | from neopapi.explore.world.island.Exceptions import UnknownStatException,\
PetNotFoundException, PetNotOnCourseException, PetAlreadyOnCourseException,\
StatTooHighException
from neopapi.core.browse import register_page
from neopapi.core.browse.Browser import BROWSER
import re
from datetime import timedelta
"""
This module provides the API for the Mystery Island Training school
"""
register_page('island/training.phtml',
['island/training.phtml?type=status', 'island/training.phtml?type=courses'])
register_page('island/training.phtml?type=status',
['island/training.phtml?type=status', 'island/training.phtml?type=courses'])
register_page('island/training.phtml?type=courses',
['island/training.phtml?type=status', 'island/training.phtml?type=courses'])
register_page('island/process_training.phtml')
# Stats to train
STATS = ['Level', 'Endurance', 'Strength', 'Defence', 'Agility']
LEVEL, HP, STRENGTH, DEFENCE, MOVEMENT = STATS
# Training statusses
IDLE, AWAITING_PAYMENT, TRAINING, FINISHED = 1, 2, 3, 4
def get_status(pet_name):
'''
Get the current status of the given pet in the island training school in the
form of a dictionary
'''
page = BROWSER.goto('island/training.phtml?type=status', force_refresh=True)
pet_td = page.find('td', text=re.compile(pet_name + '.*'))
if pet_td is None:
raise PetNotFoundException(pet_name)
infos = pet_td.find_parent('tr').find_next('tr').find_all('b')
info = {}
info['level'] = int(infos[0].text)
info['strength'] = int(infos[1].text)
info['defence'] = int(infos[2].text)
info['movement'] = int(infos[3].text)
info['current_hp'] = int(infos[4].text.split(' / ')[0])
info['hp'] = int(infos[4].text.split(' / ')[1])
return info
def get_course_status(pet_name):
page = BROWSER.goto('island/training.phtml?type=status', force_refresh=True)
pet_td = page.find('td', text=re.compile(pet_name + '.*'))
if pet_td is None:
raise PetNotFoundException(pet_name)
status_td = pet_td.find_parent('tr').find_next_sibling('tr').find_all('td')[1]
if status_td.text == 'Course Finished!':
return FINISHED
elif 'This course has not been paid for yet' in status_td.text:
return AWAITING_PAYMENT
elif 'Time till course finishes' in status_td.text:
return TRAINING
return IDLE
def get_course_time_remaining(pet_name):
page = BROWSER.goto('island/training.phtml?type=status', force_refresh=True)
status_td = page.find('td', text=re.compile(pet_name + '.*')).find_parent('tr').find_next_sibling('tr').find_all('td')[1]
if 'Time till course finishes' not in status_td.text:
raise PetNotOnCourseException(pet_name)
time_parts = status_td.find('b').text.split(',')
hours = int(time_parts[0].replace('hrs', '').strip())
minutes = int(time_parts[1].replace('minutes', '').strip())
seconds = int(time_parts[2].replace('seconds', '').strip())
return timedelta(hours=hours, minutes=minutes, seconds=seconds)
def start_course(pet_name, stat):
'''
This method starts a course for the given pet in the given stat
'''
if not stat in STATS:
raise UnknownStatException(stat)
page = BROWSER.goto('island/training.phtml?type=courses')
if page.find('select', {'name': 'pet_name'}).find('option', value=pet_name) is None:
raise PetNotFoundException(pet_name)
post_dict = {'course_type' : stat,
'pet_name' : pet_name,
'type' : 'start'}
result_page = BROWSER.post('island/process_training.phtml', post_dict)
if 'That pet is already doing a course' in result_page.text:
BROWSER.back()
raise PetAlreadyOnCourseException(pet_name)
if 'No statistic can go above twice your pet' in result_page.text or 'Endurance can not go above three times your pet\'s level' in result_page.text:
BROWSER.back()
raise StatTooHighException(pet_name)
# TODO: check if everything went all right
return result_page
def get_course_cost(pet_name):
'''
This method checks if the given pet is currently enrolled in a course that
still needs to be payed at the given school. If this is the case, it will
return an array of item names that are needed to pay for the course.
Otherwise it returns None.
'''
page = BROWSER.goto('island/training.phtml?type=status')
pet_td = page.find('td', text=re.compile(pet_name + '.*'))
if pet_td is None:
raise PetNotFoundException(pet_name)
status_td = pet_td.find_parent('tr').find_next_sibling('tr').find_all('td')[1]
if not 'This course has not been paid for yet' in status_td.text:
raise PetNotOnCourseException(pet_name)
return [tag.text for tag in status_td.find('p').find_all('b')]
def pay_course(pet_name):
'''
This method tries to pay the current course of the given pet.
'''
page = BROWSER.goto('island/training.phtml?type=status')
pet_td = page.find('td', text=re.compile(pet_name + '.*'))
if pet_td is None:
raise PetNotFoundException(pet_name)
status_td = pet_td.find_parent('tr').find_next_sibling('tr').find_all('td')[1]
if not 'This course has not been paid for yet' in status_td.text:
raise PetNotOnCourseException(pet_name)
BROWSER._get('island/process_training.phtml?type=pay&pet_name=' + pet_name)
return get_course_status(pet_name)
def finish_course(pet_name):
'''
This method finishes the current course of the given pet if it is finished
'''
page = BROWSER.goto('island/training.phtml?type=status', force_refresh=True)
pet_td = page.find('td', text=re.compile(pet_name + '.*'))
if pet_td is None:
raise PetNotFoundException(pet_name)
status_td = pet_td.find_parent('tr').find_next_sibling('tr').find_all('td')[1]
if not 'Course Finished!' in status_td.text:
raise PetNotOnCourseException(pet_name)
post_dict = {'pet_name': pet_name,
'type': 'complete'}
result_page = BROWSER.post('island/process_training.phtml', post_dict)
# TODO: check if everything went all right
return result_page | gpl-3.0 | -1,034,167,616,519,363,800 | 37.242424 | 152 | 0.656839 | false |
vigilo/vigiconf | src/vigilo/vigiconf/lib/server/base.py | 1 | 11398 | # -*- coding: utf-8 -*-
# Copyright (C) 2007-2020 CS GROUP - France
# License: GNU GPL v2 <http://www.gnu.org/licenses/gpl-2.0.html>
"""
Ce module contient la classe de base pour un serveur Vigilo: L{Server}.
"""
from __future__ import absolute_import
import os
import shutil
import glob
import re
from vigilo.common.conf import settings
from vigilo.models import tables
from vigilo.models.session import DBSession
from vigilo.common.logging import get_logger
LOGGER = get_logger(__name__)
from vigilo.common.gettext import translate
_ = translate(__name__)
from vigilo.vigiconf import conf
from vigilo.vigiconf.lib import VigiConfError
from vigilo.vigiconf.lib.systemcommand import SystemCommand, SystemCommandError
class ServerError(VigiConfError):
"""Exception concernant un objet L{Server}"""
def __init__(self, value, iServerName = ''):
super(ServerError, self).__init__(value)
self.value = value
self.mServer = iServerName
def __str__(self):
_srvStr = ""
if( len(self.mServer)>0):
_srvStr = " on server %s" % (self.mServer)
return repr("ServerError : %s%s" % (self.value, _srvStr))
class Server(object):
"""
Un serveur Vigilo.
@ivar name: nom du serveur (DNS)
@type name: C{str}
@ivar revisions: révisions des configurations déployées sur ce serveur.
@type revisions: C{dict}
"""
def __init__(self, name):
self.name = name
self._rev_filename = os.path.join(
settings["vigiconf"].get("libdir"),
"revisions" , "%s.revisions" % name)
self.revisions = {"conf": None,
"deployed": None,
"installed": None,
"previous": None,
}
def getName(self):
"""@return: L{name}"""
return self.name
def needsDeployment(self):
"""
Teste si le serveur nécessite un déploiement.
@rtype: C{bool}
"""
return self.revisions["conf"] != self.revisions["deployed"]
def needsRestart(self):
"""
Teste si le serveur nécessite un redémarrage des applications.
@rtype: C{bool}
"""
return self.revisions["deployed"] != self.revisions["installed"]
# external references
def getBaseDir(self): # pylint: disable-msg=R0201
"""
@return: Répertoire de base pour les déploiements.
@rtype: C{str}
"""
return os.path.join(settings["vigiconf"].get("libdir"), "deploy")
def createCommand(self, iCommand):
"""
@note: À réimplémenter dans les sous-classes.
@param iCommand: commande à exécuter.
@type iCommand: C{str}
@return: L'instance de la commande
@rtype: L{SystemCommand<lib.systemcommand.SystemCommand>}
"""
c = SystemCommand(iCommand)
c.simulate = self.is_simulation()
return c
def is_simulation(self):
"""
@return: État du mode simulation
@rtype: C{bool}
"""
simulate = False
try:
simulate = settings["vigiconf"].as_bool("simulate")
except KeyError:
pass
return simulate
# methods
def switch_directories(self):
"""
Archive le répertoire contenant les anciennes configurations, et
active les nouvelles, à l'aide de C{vigiconf-local}.
"""
cmd = ["vigiconf-local", "activate-conf"]
_command = self.createCommand(cmd)
try:
_command.execute()
except SystemCommandError as e:
raise ServerError(_("Can't activate the configuration on "
"%(server)s. COMMAND \"%(cmd)s\" FAILED. "
"REASON: %(reason)s") % {
'server': self.getName(),
'cmd': " ".join(cmd),
'reason': e.value,
}, self.getName())
LOGGER.debug("Switched directories on %s", self.name)
def tarConf(self):
"""
I{Tarre} les fichiers de configuration, avant déploiement.
"""
cmd = ["tar", "-C",
os.path.join(self.getBaseDir(), self.getName()), "-cvf",
os.path.join(self.getBaseDir(), "%s.tar" % self.getName()), "."]
cmd = SystemCommand(cmd)
try:
cmd.execute()
except SystemCommandError as e:
raise ServerError(_("Can't tar config for server "
"%(server)s: %(error)s") % {
'server': self.getName(),
'error': e.value,
})
def deployTar(self):
raise NotImplementedError
def deployFiles(self):
"""
Copie tous les fichiers de configuration.
"""
self.tarConf()
self.deployTar()
LOGGER.info(_("%s : deployment successful."), self.getName())
def _copy(self, source, destination):
"""
Un simple wrapper pour shutil.copyfile.
@param source: source
@type source: C{str}
@param destination: destination
@type destination: C{str}
"""
try:
os.makedirs(os.path.dirname(destination))
except OSError:
pass
try:
shutil.copyfile(source, destination)
except Exception as e:
raise ServerError(_("Cannot copy files (%(from)s to %(to)s): "
"%(error)s.") % {
'from': source,
'to': destination,
'error': e,
}, self.getName())
def getValidationDir(self):
return os.path.join(self.getBaseDir(), self.getName(), "validation")
def insertValidationDir(self):
"""
Prepare le répertoire avec les scripts de validation.
"""
validation_dir = self.getValidationDir()
if not os.path.exists(validation_dir):
os.makedirs(validation_dir)
validation_scripts = os.path.join(conf.CODEDIR, "validation", "*.sh")
for validation_script in glob.glob(validation_scripts):
shutil.copy(validation_script, validation_dir)
def deploy(self):
# insert the "validation" directory in the deployment directory
self.insertValidationDir()
# now, the deployment directory is complete.
self.deployFiles()
def set_revision(self, rev):
# update local revision files
self.revisions["conf"] = rev
self.revisions["deployed"] = rev
self.write_revisions()
cmd = self.createCommand(["vigiconf-local", "set-revision", str(rev)])
cmd.execute()
def update_revisions(self):
cmd = self.createCommand(["vigiconf-local", "get-revisions"])
cmd.execute()
rev_re = re.compile("^\s*(\w+)\s+(\d+)\s*$")
revisions = {"new": 0, "prod": 0, "old": 0}
for line in cmd.getResult().split("\n"):
rev_match = rev_re.match(line)
if not rev_match:
continue
directory = rev_match.group(1)
revision = rev_match.group(2)
revisions[directory] = int(revision)
self.revisions["deployed"] = revisions["new"]
self.revisions["installed"] = revisions["prod"]
self.revisions["previous"] = revisions["old"]
def write_revisions(self):
"""
Écrit la révision SVN dans le fichier d'état.
"""
directory = os.path.dirname(self._rev_filename)
if not os.path.exists(directory):
os.makedirs(directory)
try:
_file = open(self._rev_filename, 'wb')
_file.write("Revision: %d\n" % self.revisions["conf"])
_file.close()
except Exception as e: # pylint: disable-msg=W0703
LOGGER.exception(_("Cannot write the revision file: %s"), e)
def get_state_text(self, last_revision):
self.update_revisions()
self.revisions["conf"] = last_revision
state = ( _("Server %(server)s:\n"
" deployed: %(deployed)d\n"
" installed: %(installed)d\n"
" previous: %(previous)d"
)
% {"server": self.name,
"deployed": self.revisions["deployed"],
"installed": self.revisions["installed"],
"previous": self.revisions["previous"],
} )
if self.needsDeployment() or self.needsRestart():
todo = []
if self.needsDeployment():
todo.append(_("should be deployed"))
if self.needsRestart():
todo.append(_("should restart"))
state += "\n -> %s" % ", ".join(todo)
if not self.is_enabled():
state += "\n " + _("disabled").upper()
return state
def is_enabled(self):
"""
@return: L'état d'activation du serveur (C{True} pour actif, C{False}
pour inactif)
"""
server_db = tables.VigiloServer.by_vigiloserver_name(
unicode(self.name))
if server_db is None:
# pas en base, donc pas désactivé (peut-être qu'il vient
# d'être ajouté)
return True
if server_db.disabled:
return False
else:
return True
def disable(self):
"""
Désactive ce serveur Vigilo
"""
vserver = tables.VigiloServer.by_vigiloserver_name(unicode(self.name))
if vserver is None:
raise VigiConfError(_("The Vigilo server %s does not exist")
% self.name)
if vserver.disabled:
raise VigiConfError(_("The Vigilo server %s is already disabled")
% self.name)
vserver.disabled = True
DBSession.flush()
def enable(self):
"""
Active ce serveur Vigilo
"""
vserver = tables.VigiloServer.by_vigiloserver_name(unicode(self.name))
if vserver is None:
raise VigiConfError(_("The Vigilo server %s does not exist")
% self.name)
if not vserver.disabled:
raise VigiConfError(_("The Vigilo server %s is already enabled")
% self.name)
# On efface les associations précédentes
prev_ventil = DBSession.query(
tables.Ventilation.idapp, tables.Ventilation.idhost
).filter(
tables.Ventilation.idvigiloserver == vserver.idvigiloserver
).all()
for idapp, idhost in prev_ventil:
temp_ventils = DBSession.query(tables.Ventilation
).filter(
tables.Ventilation.idapp == idapp
).filter(
tables.Ventilation.idhost == idhost
).filter(
tables.Ventilation.idvigiloserver != vserver.idvigiloserver
).all()
for temp_ventil in temp_ventils:
DBSession.delete(temp_ventil)
vserver.disabled = False
DBSession.flush()
# vim:set expandtab tabstop=4 shiftwidth=4:
| gpl-2.0 | 4,204,519,866,043,522,000 | 33.550152 | 79 | 0.539456 | false |
h4ck3rm1k3/hacker-public-radio-publisher | setup.py | 1 | 1139 | """
setup
"""
from setuptools import setup, find_packages
setup(
name = "HackerPublicRadioPublisher",
version = "0.1",
description = "Python Uploader for Hacker Public Radio",
long_description=u'''
A set of scripts to manage the creation and uploading of shows into HPR
''',
platforms = "Debian GNU/Linux",
author = "James Michael DuPont",
author_email = "[email protected]",
license = "GNU GPLv3",
url = "github.com/h4ck3rm1k3/hacker-public-radio-publisher",
packages = find_packages(),
package_data = {
'': ['*.txt', '*.flac', '*.html'],
},
install_requires =
[
'nose',
'ftputil>=2.8',
'internetarchive>=0.4.4',
'Jinja>=1.2',
'PyYAML>=3.10',
'docopt>=0.6.1',
'pytest>=2.3.4',
'jsonpatch>=1.1',
'requests>=2.0.0',
# 'requests>=1.2.0',
'py>=1.4.14',
'jsonpointer>=1.1',
#'audiotools',
#not working with pip,
# get code from : https://github.com/tuffy/python-audio-tools.git
],
test_suite = 'nose.collector'
)
| gpl-3.0 | 755,017,810,238,400,400 | 24.311111 | 75 | 0.545215 | false |
sumsted/candybar | tests/test_candyBar39.py | 1 | 1178 | import hashlib
from unittest import TestCase
from candybar.CandyBarCode39 import CandyBar39
class TestCandyBar39(TestCase):
def test_generate_barcode_with_contents(self):
self.fail()
@staticmethod
def sha2_check(bs):
m = hashlib.sha224()
for b in bs:
m.update(str(b).encode())
print(m.hexdigest())
return m.hexdigest()
@staticmethod
def write_file(file_name, bs):
of = open(file_name, 'wb')
of.write(bs)
of.close()
def test_generate_barcode_with_contents(self):
cb39 = CandyBar39('', 400, 60)
bs = cb39.generate_barcode_with_contents('BARS ARE FUN')
self.write_file('./test3.png', bs)
bs = cb39.generate_barcode_with_contents('ABCDEFG')
self.write_file('./test_code_39_1.png', bs)
bs = cb39.generate_barcode_with_contents('1234567890')
self.write_file('./test_code_39_2.png', bs)
bs = cb39.generate_barcode_with_contents('HIJKLMNOPQRSTU')
self.write_file('./test_code_39_3.png', bs)
bs = cb39.generate_barcode_with_contents('UVWXYZ-. $/+%')
self.write_file('./test_code_39_4.png', bs)
| apache-2.0 | -2,149,034,555,611,100,700 | 32.657143 | 66 | 0.614601 | false |
hoaaoh/Audio2Vec | src/trans_dir_to_file.py | 1 | 2100 | #!/usr/bin/env python3
import os
import glob
import argparse
import random
FLAG = None
def parse_single_file(fn):
feat_list = []
with open(FLAG.dir_name + '/' + fn, 'r') as f:
for line in f:
line_sp = line.rstrip().split(',')
line_sp.append(fn)
feat_list.append(line_sp)
return feat_list
def get_all_files():
return_list = []
for i in os.listdir(FLAG.dir_name):
if i == '0':
continue
return_list.extend(parse_single_file(i))
return return_list
def split_train_test(feat_list):
shuf = list(range(len(feat_list)))
random.shuffle(shuf)
test_list = []
train_list= []
for i in range(len(feat_list)):
if i < FLAG.test_num:
test_list.append(feat_list[shuf[i]])
else:
train_list.append(feat_list[shuf[i]])
return train_list, test_list
def write_file(fn, feat_list):
with open(fn,'w') as f:
for i, feat_lab in enumerate(feat_list):
for j, feat in enumerate(feat_lab):
if j != len(feat_lab)-1:
f.write(feat+' ')
else:
f.write(feat+'\n')
return
def main():
all_feat_list = get_all_files()
# train, test = split_train_test(all_feat_list)
write_file(FLAG.out_name, all_feat_list)
# write_file(FLAG.out_name+'_test', test)
return
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='a python script for dividing whole data into'
' training and testing set')
parser.add_argument('dir_name',
metavar='<directory name>',
help='the directory that contains a lot of features in label file')
parser.add_argument('out_name',
metavar='<output name>',
help='the output will be extensioned with train and test\n'
'eg: tmp_train, tmp_test')
parser.add_argument('--test_num',type=int,
default=1000,
metavar='<num of testing>',
help='the number of testing number')
FLAG = parser.parse_args()
main()
| apache-2.0 | -621,771,863,118,600,700 | 25.923077 | 75 | 0.57 | false |
Azure/azure-sdk-for-python | sdk/core/azure-core/azure/core/_match_conditions.py | 1 | 1506 | # --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
from enum import Enum
class MatchConditions(Enum):
"""An enum to describe match conditions. """
Unconditionally = 1
IfNotModified = 2
IfModified = 3
IfPresent = 4
IfMissing = 5
| mit | 8,286,881,614,508,012,000 | 42.028571 | 78 | 0.679283 | false |
andrewalexander/cloud-custodian | c7n/commands.py | 1 | 9335 | # Copyright 2016 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from datetime import timedelta, datetime
from functools import wraps
import inspect
import json
import logging
import os
import pprint
import sys
import time
import yaml
from c7n.credentials import SessionFactory
from c7n.policy import Policy, load as policy_load
from c7n.reports import report as do_report
from c7n.utils import Bag, dumps
from c7n.manager import resources
from c7n.resources import load_resources
from c7n import mu, schema
log = logging.getLogger('custodian.commands')
def policy_command(f):
@wraps(f)
def _load_policies(options):
load_resources()
collection = policy_load(options, options.config)
policies = collection.filter(options.policy_filter)
return f(options, policies)
return _load_policies
def validate(options):
load_resources()
if options.config is not None:
# support the old -c option
options.configs.append(options.config)
if len(options.configs) < 1:
# no configs to test
# We don't have the parser object, so fake ArgumentParser.error
print('custodian validate: error: no config files specified',
file=sys.stderr)
sys.exit(2)
used_policy_names = set()
schm = schema.generate()
errors = []
for config_file in options.configs:
config_file = os.path.expanduser(config_file)
if not os.path.exists(config_file):
raise ValueError("Invalid path for config %r" % config_file)
options.dryrun = True
format = config_file.rsplit('.', 1)[-1]
with open(config_file) as fh:
if format in ('yml', 'yaml'):
data = yaml.safe_load(fh.read())
if format in ('json',):
data = json.load(fh)
errors = schema.validate(data, schm)
conf_policy_names = {p['name'] for p in data.get('policies', ())}
dupes = conf_policy_names.intersection(used_policy_names)
if len(dupes) >= 1:
errors.append(ValueError(
"Only one policy with a given name allowed, duplicates: %s" % (
", ".join(dupes)
)
))
used_policy_names = used_policy_names.union(conf_policy_names)
if not errors:
null_config = Bag(dryrun=True, log_group=None, cache=None, assume_role="na")
for p in data.get('policies', ()):
try:
Policy(p, null_config, Bag())
except Exception as e:
msg = "Policy: %s is invalid: %s" % (
p.get('name', 'unknown'), e)
errors.append(msg)
if not errors:
log.info("Configuration valid: {}".format(config_file))
continue
log.error("Configuration invalid: {}".format(config_file))
for e in errors:
log.error(" %s" % e)
if errors:
sys.exit(1)
@policy_command
def run(options, policies):
exit_code = 0
for policy in policies:
try:
policy()
except Exception:
exit_code = 1
if options.debug:
raise
log.exception(
"Error while executing policy %s, continuing" % (
policy.name))
sys.exit(exit_code)
@policy_command
def report(options, policies):
assert len(policies) == 1, "Only one policy report at a time"
policy = policies.pop()
d = datetime.now()
delta = timedelta(days=options.days)
begin_date = d - delta
do_report(
policy, begin_date, options, sys.stdout,
raw_output_fh=options.raw)
@policy_command
def logs(options, policies):
assert len(policies) == 1, "Only one policy log at a time"
policy = policies.pop()
if not policy.is_lambda:
log.debug('lambda only atm')
return
session_factory = SessionFactory(
options.region, options.profile, options.assume_role)
manager = mu.LambdaManager(session_factory)
for e in manager.logs(mu.PolicyLambda(policy)):
print("%s: %s" % (
time.strftime(
"%Y-%m-%d %H:%M:%S", time.localtime(e['timestamp'] / 1000)),
e['message']))
def _schema_get_docstring(starting_class):
""" Given a class, return its docstring.
If no docstring is present for the class, search base classes in MRO for a
docstring.
"""
for cls in inspect.getmro(starting_class):
if inspect.getdoc(cls):
return inspect.getdoc(cls)
def schema_cmd(options):
""" Print info about the resources, actions and filters available. """
if options.json:
schema.json_dump(options.resource)
return
load_resources()
resource_mapping = schema.resource_vocabulary()
if options.summary:
schema.summary(resource_mapping)
return
# Here are the formats for what we accept:
# - No argument
# - List all available RESOURCES
# - RESOURCE
# - List all available actions and filters for supplied RESOURCE
# - RESOURCE.actions
# - List all available actions for supplied RESOURCE
# - RESOURCE.actions.ACTION
# - Show class doc string and schema for supplied action
# - RESOURCE.filters
# - List all available filters for supplied RESOURCE
# - RESOURCE.filters.FILTER
# - Show class doc string and schema for supplied filter
if not options.resource:
resource_list = {'resources': sorted(resources.keys()) }
print(yaml.safe_dump(resource_list, default_flow_style=False))
return
# Format is RESOURCE.CATEGORY.ITEM
components = options.resource.split('.')
#
# Handle resource
#
resource = components[0].lower()
if resource not in resource_mapping:
print('{} is not a valid resource'.format(resource), file=sys.stderr)
sys.exit(2)
if len(components) == 1:
del(resource_mapping[resource]['classes'])
output = {resource: resource_mapping[resource]}
print(yaml.safe_dump(output))
return
#
# Handle category
#
category = components[1].lower()
if category not in ('actions', 'filters'):
print(("Valid choices are 'actions' and 'filters'."
" You supplied '{}'").format(category), file=sys.stderr)
sys.exit(2)
if len(components) == 2:
output = "No {} available for resource {}.".format(category, resource)
if category in resource_mapping[resource]:
output = {resource: {
category: resource_mapping[resource][category]}}
print(yaml.safe_dump(output))
return
#
# Handle item
#
item = components[2].lower()
if item not in resource_mapping[resource][category]:
print('{} is not in the {} list for resource {}'.format(
item, category, resource), file=sys.stderr)
sys.exit(2)
if len(components) == 3:
cls = resource_mapping[resource]['classes'][category][item]
# Print docstring
docstring = _schema_get_docstring(cls)
print("\nHelp\n----\n")
if docstring:
print(docstring)
else:
# Shouldn't ever hit this, so exclude from cover
print("No help is available for this item.") # pragma: no cover
# Print schema
print("\nSchema\n------\n")
pp = pprint.PrettyPrinter(indent=4)
if hasattr(cls, 'schema'):
pp.pprint(cls.schema)
else:
# Shouldn't ever hit this, so exclude from cover
print("No schema is available for this item.", file=sys.sterr) # pragma: no cover
print('')
return
# We received too much (e.g. s3.actions.foo.bar)
print("Invalid selector '{}'. Max of 3 components in the "
"format RESOURCE.CATEGORY.ITEM".format(options.resource),
file=sys.stderr)
sys.exit(2)
def _metrics_get_endpoints(options):
""" Determine the start and end dates based on user-supplied options. """
if bool(options.start) ^ bool(options.end):
print('Error: --start and --end must be specified together',
file=sys.stderr)
sys.exit(2)
if options.start and options.end:
start = options.start
end = options.end
else:
end = datetime.utcnow()
start = end - timedelta(options.days)
return start, end
@policy_command
def metrics_cmd(options, policies):
start, end = _metrics_get_endpoints(options)
data = {}
for p in policies:
log.info('Getting %s metrics', p)
data[p.name] = p.get_metrics(start, end, options.period)
print(dumps(data, indent=2))
| apache-2.0 | 4,219,695,080,922,849,300 | 30.537162 | 94 | 0.611034 | false |
punitvanjani/test1 | api/interface.py | 1 | 7869 | from errors import invalid_operation, no_records, no_communication
from models import Properties
from debugger import debug_msg
import serial
table = (
0x0000, 0xC0C1, 0xC181, 0x0140, 0xC301, 0x03C0, 0x0280, 0xC241,
0xC601, 0x06C0, 0x0780, 0xC741, 0x0500, 0xC5C1, 0xC481, 0x0440,
0xCC01, 0x0CC0, 0x0D80, 0xCD41, 0x0F00, 0xCFC1, 0xCE81, 0x0E40,
0x0A00, 0xCAC1, 0xCB81, 0x0B40, 0xC901, 0x09C0, 0x0880, 0xC841,
0xD801, 0x18C0, 0x1980, 0xD941, 0x1B00, 0xDBC1, 0xDA81, 0x1A40,
0x1E00, 0xDEC1, 0xDF81, 0x1F40, 0xDD01, 0x1DC0, 0x1C80, 0xDC41,
0x1400, 0xD4C1, 0xD581, 0x1540, 0xD701, 0x17C0, 0x1680, 0xD641,
0xD201, 0x12C0, 0x1380, 0xD341, 0x1100, 0xD1C1, 0xD081, 0x1040,
0xF001, 0x30C0, 0x3180, 0xF141, 0x3300, 0xF3C1, 0xF281, 0x3240,
0x3600, 0xF6C1, 0xF781, 0x3740, 0xF501, 0x35C0, 0x3480, 0xF441,
0x3C00, 0xFCC1, 0xFD81, 0x3D40, 0xFF01, 0x3FC0, 0x3E80, 0xFE41,
0xFA01, 0x3AC0, 0x3B80, 0xFB41, 0x3900, 0xF9C1, 0xF881, 0x3840,
0x2800, 0xE8C1, 0xE981, 0x2940, 0xEB01, 0x2BC0, 0x2A80, 0xEA41,
0xEE01, 0x2EC0, 0x2F80, 0xEF41, 0x2D00, 0xEDC1, 0xEC81, 0x2C40,
0xE401, 0x24C0, 0x2580, 0xE541, 0x2700, 0xE7C1, 0xE681, 0x2640,
0x2200, 0xE2C1, 0xE381, 0x2340, 0xE101, 0x21C0, 0x2080, 0xE041,
0xA001, 0x60C0, 0x6180, 0xA141, 0x6300, 0xA3C1, 0xA281, 0x6240,
0x6600, 0xA6C1, 0xA781, 0x6740, 0xA501, 0x65C0, 0x6480, 0xA441,
0x6C00, 0xACC1, 0xAD81, 0x6D40, 0xAF01, 0x6FC0, 0x6E80, 0xAE41,
0xAA01, 0x6AC0, 0x6B80, 0xAB41, 0x6900, 0xA9C1, 0xA881, 0x6840,
0x7800, 0xB8C1, 0xB981, 0x7940, 0xBB01, 0x7BC0, 0x7A80, 0xBA41,
0xBE01, 0x7EC0, 0x7F80, 0xBF41, 0x7D00, 0xBDC1, 0xBC81, 0x7C40,
0xB401, 0x74C0, 0x7580, 0xB541, 0x7700, 0xB7C1, 0xB681, 0x7640,
0x7200, 0xB2C1, 0xB381, 0x7340, 0xB101, 0x71C0, 0x7080, 0xB041,
0x5000, 0x90C1, 0x9181, 0x5140, 0x9301, 0x53C0, 0x5280, 0x9241,
0x9601, 0x56C0, 0x5780, 0x9741, 0x5500, 0x95C1, 0x9481, 0x5440,
0x9C01, 0x5CC0, 0x5D80, 0x9D41, 0x5F00, 0x9FC1, 0x9E81, 0x5E40,
0x5A00, 0x9AC1, 0x9B81, 0x5B40, 0x9901, 0x59C0, 0x5880, 0x9841,
0x8801, 0x48C0, 0x4980, 0x8941, 0x4B00, 0x8BC1, 0x8A81, 0x4A40,
0x4E00, 0x8EC1, 0x8F81, 0x4F40, 0x8D01, 0x4DC0, 0x4C80, 0x8C41,
0x4400, 0x84C1, 0x8581, 0x4540, 0x8701, 0x47C0, 0x4680, 0x8641,
0x8201, 0x42C0, 0x4380, 0x8341, 0x4100, 0x81C1, 0x8081, 0x4040 )
def calcString(st, crc=0xFFFF):
"""Given a hex string and starting CRC, Calc a final CRC-16 """
for ch in st:
crc = table[(crc ^ ord(ch)) & 0xFF] ^ (crc >> 8)
# after calculation, interchange LSB and MSB
crc1 = crc & 0xFF00
crc1 = crc1 >> 8
crc2 = crc & 0x00FF
crc2 = crc2 << 8
crc = crc2 ^ crc1
return crc
def convert(int_value):
encoded = format(int_value, 'x')
length = len(encoded)
encoded = encoded.zfill(length+length%2)
return encoded.decode('hex')
def bit_from_string(string, index):
i, j = divmod(index, 8)
if ord(string[i]) & (1 << j):
return 1
else:
return 0
def webswitch(endpoint,expected_status):
status = -1
errors = ""
# Fetch the mode of communication and Baud Rate for Webswitch
wbs_comm = Properties.query.filter_by(key='WbSwtComm').first()
if wbs_comm == None:
errors = no_records('interface.webswitch.properties','WbSwtComm')
status = -1
return (status,errors)
wbb_comm = Properties.query.filter_by(key='WbSwtBaud').first()
if wbb_comm == None:
errors = no_records('interface.webswitch.properties','WbSwtBaud')
status = -1
return (status,errors)
# Establish communication
try:
serusb = serial.Serial(wbs_comm.value, int(wbb_comm.value))
except:
errors = no_communication()
debug_msg("No communication", wbs_comm.value, int(wbb_comm.value))
status = -1
return (status,errors)
# Check the type of endpointtype, if it is switch proceed
if endpoint.endpoint_type==1000 or endpoint.endpoint_type==1002 or endpoint.endpoint_type==1004 or endpoint.endpoint_type==1005 or endpoint.endpoint_type==1006 or endpoint.endpoint_type==1007 or endpoint.endpoint_type==1008 or endpoint.endpoint_type==1009 or endpoint.endpoint_type==1010 or endpoint.endpoint_type==1011 or endpoint.endpoint_type==1012 or endpoint.endpoint_type==1013 or endpoint.endpoint_type==1014 or endpoint.endpoint_type==1015 or endpoint.endpoint_type==1016 or endpoint.endpoint_type==1017 or endpoint.endpoint_type==1018 or endpoint.endpoint_type==1019:
if (expected_status == 1):
action_id = 255
elif (expected_status == 0):
action_id = 0
# Form the outbound communication string to write coil : st0 = Slave ID, st1 = Function code for modbus write coil, st3 = device id/endpoint id, st4 = expected Status (converted), st6 = crc code this is 16 bit code
st0 = convert(endpoint.internal_nod_id)
st1 = convert(5)
st2 = convert(0)
st3 = convert(endpoint.internal_end_id)
st4 = convert(action_id)
st5 = convert(0)
st6 = convert(calcString(st0+st1+st2+st3+st4+st5))
serusb.close()
serusb.open()
debug_msg("WS outbound", int(st0.encode('hex'), 16),int(st1.encode('hex'), 16),int(st2.encode('hex'), 16),int(st3.encode('hex'), 16),int(st4.encode('hex'), 16),int(st5.encode('hex'), 16),int(st0.encode('hex'), 16),int(st6.encode('hex'), 16))
print (st0,st1,st2,st3,st4,st5,st6)
serusb.flushInput()
serusb.write(st0+st1+st2+st3+st4+st5+st6)
serusb.timeout=2
try:
read_val = serusb.read() # Wait forever for anything
for i in range(1,100000): # This dummy loop is added so that we can complete serial buffer
pass
data_left = serusb.inWaiting() # Get the number of characters ready to be read
read_val += serusb.read(size=data_left) # Do the read and combine it with the first character
serusb.close()
except:
data_left = 0
read_val = ""
serusb.close()
try:
debug_msg( "WS inbound:", int(read_val[0].encode('hex'), 16), int(read_val[1].encode('hex'), 16), int(read_val[2].encode('hex'), 16), int(read_val[3].encode('hex'), 16), int(read_val[4].encode('hex'), 16), int(read_val[5].encode('hex'), 16), int(read_val[6].encode('hex'), 16), int(read_val[7].encode('hex'), 16))#, int(read_val[8].encode('hex'), 16), int(read_val[9].encode('hex'), 16), int(read_val[10].encode('hex'), 16), int(read_val[11].encode('hex'), 16), int(read_val[12].encode('hex'), 16), int(read_val[13].encode('hex'), 16))
except:
pass
if(data_left != 0 and data_left >= 4):
ws_bit = int(read_val[4].encode('hex'), 16)
print "ws_bit", ws_bit
if ws_bit == 255:
status = 1
elif ws_bit == 0:
status = 0
else:
status = -1
else:
status = -1
errors = ""
# Check the type of endpointtype, if it is dimmer proceed
elif endpoint.endpoint_type == 1001 or endpoint.endpoint_type == 1020:
status = expected_status
return str(status),errors
def touchswitch(endpoint,expected_status):
debug_msg("test")
errors = ""
print "touchswitch called2"
status = expected_status
return str(status),errors
def acremote(endpoint,expected_status):
errors = ""
status = expected_status
return str(status),errors
def tvremote(endpoint,expected_status):
errors = ""
status = expected_status
return str(status),errors
def settopbox(endpoint,expected_status):
errors = ""
status = expected_status
return str(status),errors
| mit | -4,152,577,362,719,511,000 | 44.294118 | 580 | 0.634007 | false |
ksmaheshkumar/awslogs | tests.py | 1 | 22674 | import unittest
from datetime import datetime
from StringIO import StringIO
import boto
import gevent
from gevent.pool import Pool
from termcolor import colored
from mock import Mock, patch, call
from awslogs import AWSLogs
from awslogs.exceptions import UnknownDateError, ConnectionError
from awslogs.core import NO_MORE_EVENTS
from awslogs.bin import main
class TestAWSLogs(unittest.TestCase):
def setUp(self):
super(TestAWSLogs, self).setUp()
self.aws = AWSLogs(connection_cls=Mock)
@patch('awslogs.core.datetime')
def test_parse_datetime(self, datetime_mock):
datetime_mock.now.return_value = datetime(2015, 1, 1, 3, 0, 0, 0)
def epoch(dt):
return int(dt.strftime("%s")) * 1000
self.assertEqual(self.aws.parse_datetime(''), None)
self.assertEqual(self.aws.parse_datetime(None), None)
self.assertEqual(self.aws.parse_datetime('1m'),
epoch(datetime(2015, 1, 1, 2, 59, 0, 0)))
self.assertEqual(self.aws.parse_datetime('1m ago'),
epoch(datetime(2015, 1, 1, 2, 59, 0, 0)))
self.assertEqual(self.aws.parse_datetime('1minute'),
epoch(datetime(2015, 1, 1, 2, 59, 0, 0)))
self.assertEqual(self.aws.parse_datetime('1minute ago'),
epoch(datetime(2015, 1, 1, 2, 59, 0, 0)))
self.assertEqual(self.aws.parse_datetime('1minutes'),
epoch(datetime(2015, 1, 1, 2, 59, 0, 0)))
self.assertEqual(self.aws.parse_datetime('1minutes ago'),
epoch(datetime(2015, 1, 1, 2, 59, 0, 0)))
self.assertEqual(self.aws.parse_datetime('1h'),
epoch(datetime(2015, 1, 1, 2, 0, 0, 0)))
self.assertEqual(self.aws.parse_datetime('1h ago'),
epoch(datetime(2015, 1, 1, 2, 0, 0, 0)))
self.assertEqual(self.aws.parse_datetime('1hour'),
epoch(datetime(2015, 1, 1, 2, 0, 0, 0)))
self.assertEqual(self.aws.parse_datetime('1hour ago'),
epoch(datetime(2015, 1, 1, 2, 0, 0, 0)))
self.assertEqual(self.aws.parse_datetime('1hours'),
epoch(datetime(2015, 1, 1, 2, 0, 0, 0)))
self.assertEqual(self.aws.parse_datetime('1hours ago'),
epoch(datetime(2015, 1, 1, 2, 0, 0, 0)))
self.assertEqual(self.aws.parse_datetime('1d'),
epoch(datetime(2014, 12, 31, 3, 0, 0, 0)))
self.assertEqual(self.aws.parse_datetime('1d ago'),
epoch(datetime(2014, 12, 31, 3, 0, 0, 0)))
self.assertEqual(self.aws.parse_datetime('1day'),
epoch(datetime(2014, 12, 31, 3, 0, 0, 0)))
self.assertEqual(self.aws.parse_datetime('1day ago'),
epoch(datetime(2014, 12, 31, 3, 0, 0, 0)))
self.assertEqual(self.aws.parse_datetime('1days'),
epoch(datetime(2014, 12, 31, 3, 0, 0, 0)))
self.assertEqual(self.aws.parse_datetime('1days ago'),
epoch(datetime(2014, 12, 31, 3, 0, 0, 0)))
self.assertEqual(self.aws.parse_datetime('1w'),
epoch(datetime(2014, 12, 25, 3, 0, 0, 0)))
self.assertEqual(self.aws.parse_datetime('1w ago'),
epoch(datetime(2014, 12, 25, 3, 0, 0, 0)))
self.assertEqual(self.aws.parse_datetime('1week'),
epoch(datetime(2014, 12, 25, 3, 0, 0, 0)))
self.assertEqual(self.aws.parse_datetime('1week ago'),
epoch(datetime(2014, 12, 25, 3, 0, 0, 0)))
self.assertEqual(self.aws.parse_datetime('1weeks'),
epoch(datetime(2014, 12, 25, 3, 0, 0, 0)))
self.assertEqual(self.aws.parse_datetime('1weeks ago'),
epoch(datetime(2014, 12, 25, 3, 0, 0, 0)))
self.assertEqual(self.aws.parse_datetime('1/1/2013'),
epoch(datetime(2013, 1, 1, 0, 0, 0, 0)))
self.assertEqual(self.aws.parse_datetime('1/1/2012 12:34'),
epoch(datetime(2012, 1, 1, 12, 34, 0, 0)))
self.assertEqual(self.aws.parse_datetime('1/1/2011 12:34:56'),
epoch(datetime(2011, 1, 1, 12, 34, 56, 0)))
self.assertRaises(UnknownDateError, self.aws.parse_datetime, '???')
def test_get_groups(self):
self.aws.connection.describe_log_groups.side_effect = [
{'logGroups': [{'logGroupName': 'A'},
{'logGroupName': 'B'},
{'logGroupName': 'C'}],
'nextToken': 1},
{'logGroups': [{'logGroupName': 'D'},
{'logGroupName': 'E'},
{'logGroupName': 'F'}],
'nextToken': 2},
{'logGroups': [{'logGroupName': 'G'}]},
]
expected = ['A', 'B', 'C', 'D', 'E', 'F', 'G']
self.assertEqual([g for g in self.aws.get_groups()], expected)
expected = [call(next_token=None),
call(next_token=1),
call(next_token=2)]
self.assertEqual(self.aws.connection.describe_log_groups.call_args_list,
expected)
def test_get_streams(self):
self.aws.connection.describe_log_streams.side_effect = [
{'logStreams': [{'logStreamName': 'A'},
{'logStreamName': 'B'},
{'logStreamName': 'C'}],
'nextToken': 1},
{'logStreams': [{'logStreamName': 'D'},
{'logStreamName': 'E'},
{'logStreamName': 'F'}],
'nextToken': 2},
{'logStreams': [{'logStreamName': 'G'}]},
]
expected = ['A', 'B', 'C', 'D', 'E', 'F', 'G']
self.assertEqual([g for g in self.aws.get_streams('group')], expected)
expected = [call(log_group_name="group", next_token=None),
call(log_group_name="group", next_token=1),
call(log_group_name="group", next_token=2)]
self.assertEqual(self.aws.connection.describe_log_streams.call_args_list,
expected)
def test_get_streams_from_pattern(self):
side_effect = [
{'logStreams': [{'logStreamName': 'AAA'},
{'logStreamName': 'ABA'},
{'logStreamName': 'ACA'}],
'nextToken': 1},
{'logStreams': [{'logStreamName': 'BAA'},
{'logStreamName': 'BBA'},
{'logStreamName': 'BBB'}],
'nextToken': 2},
{'logStreams': [{'logStreamName': 'CAC'}]},
]
self.aws.connection.describe_log_streams.side_effect = side_effect
expected = ['AAA', 'ABA', 'ACA', 'BAA', 'BBA', 'BBB', 'CAC']
actual = [s for s in self.aws._get_streams_from_pattern('X', 'ALL')]
self.assertEqual(actual, expected)
self.aws.connection.describe_log_streams.side_effect = side_effect
expected = ['AAA', 'ABA', 'ACA']
actual = [s for s in self.aws._get_streams_from_pattern('X', 'A')]
self.assertEqual(actual, expected)
self.aws.connection.describe_log_streams.side_effect = side_effect
expected = ['AAA', 'ACA']
actual = [s for s in self.aws._get_streams_from_pattern('X', 'A[AC]A')]
self.assertEqual(actual, expected)
def test_get_groups_from_pattern(self):
side_effect = [
{'logGroups': [{'logGroupName': 'AAA'},
{'logGroupName': 'ABA'},
{'logGroupName': 'ACA'}],
'nextToken': 1},
{'logGroups': [{'logGroupName': 'BAA'},
{'logGroupName': 'BBA'},
{'logGroupName': 'BBB'}],
'nextToken': 2},
{'logGroups': [{'logGroupName': 'CAC'}]},
]
self.aws.connection.describe_log_groups.side_effect = side_effect
expected = ['AAA', 'ABA', 'ACA', 'BAA', 'BBA', 'BBB', 'CAC']
actual = [s for s in self.aws._get_groups_from_pattern('ALL')]
self.assertEqual(actual, expected)
self.aws.connection.describe_log_groups.side_effect = side_effect
expected = ['AAA', 'ABA', 'ACA']
actual = [s for s in self.aws._get_groups_from_pattern('A')]
self.assertEqual(actual, expected)
self.aws.connection.describe_log_groups.side_effect = side_effect
expected = ['AAA', 'ACA']
actual = [s for s in self.aws._get_groups_from_pattern('A[AC]A')]
self.assertEqual(actual, expected)
def test_get_streams_from_patterns(self):
groups = [
{'logGroups': [{'logGroupName': 'AAA'},
{'logGroupName': 'BAB'},
{'logGroupName': 'CCC'}]},
]
streams = [
{'logStreams': [{'logStreamName': 'ABB'},
{'logStreamName': 'ABC'},
{'logStreamName': 'ACD'}]},
{'logStreams': [{'logStreamName': 'BBB'},
{'logStreamName': 'BBD'},
{'logStreamName': 'BBE'}]},
{'logStreams': [{'logStreamName': 'CCC'}]},
]
self.aws.connection.describe_log_groups.side_effect = groups
self.aws.connection.describe_log_streams.side_effect = streams
expected = [('AAA', 'ABB'), ('AAA', 'ABC')]
actual = [s for s in self.aws._get_streams_from_patterns('A', 'AB')]
self.assertEqual(actual, expected)
self.aws.connection.describe_log_groups.side_effect = groups
self.aws.connection.describe_log_streams.side_effect = streams
expected = [('AAA', 'ABB'), ('AAA', 'ABC'), ('BAB', 'BBB'),
('BAB', 'BBD'), ('BAB', 'BBE')]
actual = [s for s in self.aws._get_streams_from_patterns('[AB]A.*', '.*B.*')]
self.assertEqual(actual, expected)
def test_raw_events_queue_consumer_exit_if_exhausted(self):
self.aws.stream_status = {('A', 'B'): self.aws.EXHAUSTED}
pool = Pool(size=1)
pool.spawn(self.aws._raw_events_queue_consumer)
pool.join()
self.assertEqual(self.aws.events_queue.get(), NO_MORE_EVENTS)
self.assertTrue(self.aws.events_queue.empty())
def test_raw_events_queue_consumer_exit_when_exhausted(self):
self.aws.stream_status = {('A', 'B'): self.aws.EXHAUSTED}
self.aws.raw_events_queue.put((0, {'message': 'Hello'}))
pool = Pool(size=1)
pool.spawn(self.aws._raw_events_queue_consumer)
pool.join()
self.assertEqual(self.aws.events_queue.get(), 'Hello\n')
self.assertEqual(self.aws.events_queue.get(), NO_MORE_EVENTS)
self.assertTrue(self.aws.events_queue.empty())
@patch('awslogs.core.gevent.sleep')
@patch('awslogs.core.AWSLogs._get_min_timestamp')
@patch('awslogs.core.AWSLogs._get_all_streams_exhausted')
def test_raw_events_queue_consumer_waits_streams(self, _get_all_streams_exhausted, _get_min_timestamp, sleep):
_get_min_timestamp.side_effect = [5, 5, 6, 7, 8, 9, 10]
_get_all_streams_exhausted.side_effect = [
False,
False,
False,
False,
False,
True,
True
]
self.aws.stream_status = {('A', 'B'): self.aws.ACTIVE,
('A', 'C'): self.aws.EXHAUSTED}
self.aws.raw_events_queue.put((8, {'message': 'Hello 8'}))
self.aws.raw_events_queue.put((7, {'message': 'Hello 7'}))
self.aws.raw_events_queue.put((9, {'message': 'Hello 9'}))
self.aws.raw_events_queue.put((6, {'message': 'Hello 6'}))
pool = Pool(size=1)
pool.spawn(self.aws._raw_events_queue_consumer)
pool.join()
self.assertEqual(self.aws.events_queue.get(), 'Hello 6\n')
self.assertEqual(self.aws.events_queue.get(), 'Hello 7\n')
self.assertEqual(self.aws.events_queue.get(), 'Hello 8\n')
self.assertEqual(self.aws.events_queue.get(), 'Hello 9\n')
self.assertEqual(self.aws.events_queue.get(), NO_MORE_EVENTS)
self.assertTrue(self.aws.events_queue.empty())
self.assertEqual(sleep.call_args_list, [call(0.3), call(0.3)])
def test_publisher_queue_consumer_with_empty_queue(self):
self.aws.connection = Mock()
pool = Pool(size=1)
pool.spawn(self.aws._publisher_queue_consumer)
pool.join()
self.assertEqual(self.aws.connection.call_count, 0)
def test_publisher_queue_consumer(self):
self.aws.publishers_queue.put((0, ('group', 'stream', None)))
self.aws.connection = Mock()
self.aws.connection.get_log_events.side_effect = [
{'events': [{'timestamp': 1, 'message': 'Hello 1'},
{'timestamp': 2, 'message': 'Hello 2'},
{'timestamp': 3, 'message': 'Hello 3'}]}
]
pool = Pool(size=1)
pool.spawn(self.aws._publisher_queue_consumer)
pool.join()
self.assertEqual(
self.aws.raw_events_queue.get(),
(1, {'timestamp': 1,
'message': 'Hello 1',
'stream': 'stream',
'group': 'group'})
)
self.assertEqual(
self.aws.raw_events_queue.get(),
(2, {'timestamp': 2,
'message': 'Hello 2',
'stream': 'stream',
'group': 'group'})
)
self.assertEqual(
self.aws.raw_events_queue.get(),
(3, {'timestamp': 3,
'message': 'Hello 3',
'stream': 'stream',
'group': 'group'})
)
self.assertTrue(self.aws.raw_events_queue.empty())
self.assertTrue(self.aws.publishers_queue.empty())
def test_publisher_queue_consumer_paginated(self):
self.aws.publishers_queue.put((0, ('group', 'stream', None)))
self.aws.connection = Mock()
self.aws.connection.get_log_events.side_effect = [
{'events': [{'timestamp': 1, 'message': 'Hello 1'},
{'timestamp': 2, 'message': 'Hello 2'},
{'timestamp': 3, 'message': 'Hello 3'}],
'nextForwardToken': 'token'},
{'events': [{'timestamp': 4, 'message': 'Hello 4'},
{'timestamp': 5, 'message': 'Hello 5'},
{'timestamp': 6, 'message': 'Hello 6'}]}
]
pool = Pool(size=1)
pool.spawn(self.aws._publisher_queue_consumer)
pool.join()
self.assertEqual(
self.aws.raw_events_queue.get(),
(1, {'timestamp': 1,
'message': 'Hello 1',
'stream': 'stream',
'group': 'group'})
)
self.assertEqual(
self.aws.raw_events_queue.get(),
(2, {'timestamp': 2,
'message': 'Hello 2',
'stream': 'stream',
'group': 'group'})
)
self.assertEqual(
self.aws.raw_events_queue.get(),
(3, {'timestamp': 3,
'message': 'Hello 3',
'stream': 'stream',
'group': 'group'})
)
self.assertEqual(
self.aws.raw_events_queue.get(),
(4, {'timestamp': 4,
'message': 'Hello 4',
'stream': 'stream',
'group': 'group'})
)
self.assertEqual(
self.aws.raw_events_queue.get(),
(5, {'timestamp': 5,
'message': 'Hello 5',
'stream': 'stream',
'group': 'group'})
)
self.assertEqual(
self.aws.raw_events_queue.get(),
(6, {'timestamp': 6,
'message': 'Hello 6',
'stream': 'stream',
'group': 'group'})
)
self.assertTrue(self.aws.raw_events_queue.empty())
self.assertTrue(self.aws.publishers_queue.empty())
def test_get_min_timestamp(self):
self.assertEqual(self.aws._get_min_timestamp(), None)
self.aws.stream_status = {('A', 'A'): AWSLogs.ACTIVE,
('B', 'B'): AWSLogs.ACTIVE,
('C', 'C'): AWSLogs.EXHAUSTED}
self.aws.stream_max_timestamp = {
('A', 'A'): datetime(2015, 1, 1, 13, 30),
('B', 'B'): datetime(2015, 1, 1, 14, 30),
('C', 'C'): datetime(2015, 1, 1, 15, 30)
}
self.assertEqual(self.aws._get_min_timestamp(),
datetime(2015, 1, 1, 13, 30))
self.aws.stream_status[('A', 'A')] = AWSLogs.EXHAUSTED
self.assertEqual(self.aws._get_min_timestamp(),
datetime(2015, 1, 1, 14, 30))
self.aws.stream_status[('B', 'B')] = AWSLogs.EXHAUSTED
self.assertEqual(self.aws._get_min_timestamp(), None)
def test_get_all_streams_exhausted(self):
self.aws.stream_status = {}
self.assertTrue(self.aws._get_all_streams_exhausted())
self.aws.stream_status = {('A', 'A'): AWSLogs.ACTIVE,
('B', 'B'): AWSLogs.EXHAUSTED}
self.assertFalse(self.aws._get_all_streams_exhausted())
self.aws.stream_status = {('A', 'A'): AWSLogs.EXHAUSTED,
('B', 'B'): AWSLogs.EXHAUSTED}
self.assertTrue(self.aws._get_all_streams_exhausted())
@patch('awslogs.core.AWSConnection')
@patch('sys.stdout', new_callable=StringIO)
def test_main_get(self, mock_stdout, AWSConnection):
instance = Mock()
AWSConnection.return_value = instance
logs = [
{'events': [{'timestamp': 1, 'message': 'Hello 1'},
{'timestamp': 2, 'message': 'Hello 2'},
{'timestamp': 3, 'message': 'Hello 3'}],
'nextForwardToken': 'token'},
{'events': [{'timestamp': 4, 'message': 'Hello 4'},
{'timestamp': 5, 'message': 'Hello 5'},
{'timestamp': 6, 'message': 'Hello 6'}],
'nextForwardToken': 'token'},
{'events': []}
]
groups = [
{'logGroups': [{'logGroupName': 'AAA'},
{'logGroupName': 'BBB'},
{'logGroupName': 'CCC'}]},
]
streams = [
{'logStreams': [{'logStreamName': 'DDD'},
{'logStreamName': 'EEE'}]}
]
instance.get_log_events.side_effect = logs
instance.describe_log_groups.side_effect = groups
instance.describe_log_streams.side_effect = streams
main("awslogs get AAA DDD --no-color".split())
self.assertEqual(
mock_stdout.getvalue(),
("AAA DDD Hello 1\n"
"AAA DDD Hello 2\n"
"AAA DDD Hello 3\n"
"AAA DDD Hello 4\n"
"AAA DDD Hello 5\n"
"AAA DDD Hello 6\n")
)
@patch('awslogs.core.AWSConnection')
@patch('sys.stdout', new_callable=StringIO)
def test_main_groups(self, mock_stdout, AWSConnection):
instance = Mock()
AWSConnection.return_value = instance
groups = [
{'logGroups': [{'logGroupName': 'AAA'},
{'logGroupName': 'BBB'},
{'logGroupName': 'CCC'}]},
]
instance.describe_log_groups.side_effect = groups
main("awslogs groups".split())
self.assertEqual(
mock_stdout.getvalue(),
("AAA\n"
"BBB\n"
"CCC\n")
)
@patch('awslogs.core.AWSConnection')
@patch('sys.stdout', new_callable=StringIO)
def test_main_streams(self, mock_stdout, AWSConnection):
instance = Mock()
AWSConnection.return_value = instance
groups = [
{'logGroups': [{'logGroupName': 'AAA'},
{'logGroupName': 'BBB'},
{'logGroupName': 'CCC'}]},
]
streams = [
{'logStreams': [{'logStreamName': 'DDD'},
{'logStreamName': 'EEE'}]}
]
instance.describe_log_groups.side_effect = groups
instance.describe_log_streams.side_effect = streams
main("awslogs streams AAA".split())
self.assertEqual(
mock_stdout.getvalue(),
("DDD\n"
"EEE\n")
)
@patch('sys.stderr', new_callable=StringIO)
def test_unknown_date_error(self, mock_stderr):
code = main("awslogs get AAA BBB -sX".split())
self.assertEqual(code, 3)
self.assertEqual(mock_stderr.getvalue(),
colored("awslogs doesn't understand 'X' as a date.\n", "red"))
@patch('awslogs.bin.AWSLogs')
@patch('sys.stderr', new_callable=StringIO)
def test_unknown_error(self, mock_stderr, mock_awslogs):
mock_awslogs.side_effect = Exception("Error!")
code = main("awslogs get AAA BBB".split())
output = mock_stderr.getvalue()
self.assertEqual(code, 1)
self.assertTrue("You've found a bug!" in output)
self.assertTrue("Exception: Error!" in output)
@patch('awslogs.bin.AWSLogs')
@patch('sys.stderr', new_callable=StringIO)
def test_connection_error(self, mock_stderr, mock_awslogs):
mock_awslogs.side_effect = ConnectionError("Error!")
code = main("awslogs get AAA BBB".split())
self.assertEqual(code, 2)
output = mock_stderr.getvalue()
self.assertEqual(mock_stderr.getvalue(),
colored("awslogs can't connecto to AWS.\n", "red"))
@patch('awslogs.core.botologs.connect_to_region')
@patch('sys.stderr', new_callable=StringIO)
def test_access_denied_error(self, mock_stderr, connect_to_region):
instance = Mock()
connect_to_region.return_value = instance
exc = boto.exception.JSONResponseError(
status=400,
reason='Bad Request',
body={u'Message': u'User XXX...', '__type': 'AccessDeniedException'}
)
instance.describe_log_groups.side_effect = exc
code = main("awslogs groups".split())
self.assertEqual(code, 4)
self.assertEqual(mock_stderr.getvalue(), colored("User XXX...\n", "red"))
| bsd-3-clause | 6,869,315,900,527,850,000 | 39.634409 | 114 | 0.516274 | false |
qwergram/data-structures | src/dll.py | 1 | 3676 | # -*- coding: utf-8 -*-
"""Doubl linked List implementation."""
class Node(object):
"""Creates a node object."""
def __init__(self, value, next, prev):
"""Initalize Node Object."""
self.value = value
self.next = next
self.prev = prev
class DoublyLinkedList(object):
"""Define a double pointered list."""
# It was quite difficult trying to solve this problem, so I got some help
# with my logic from the following site:
# http://ls.pwd.io/2014/08/singly-and-doubly-linked-lists-in-python/
head = None
tail = None
def __init__(self, values):
"""Accept a list of values and generate a chain of Nodes using those values."""
if isinstance(values, list):
for value in values:
self.append(value)
else:
raise TypeError("Please package your item into a list!")
def append(self, value):
"""Append a value to the tail of the linked list."""
new_node = Node(value, None, None)
if self.head is None:
self.head = self.tail = new_node
else:
new_node.prev = self.tail
new_node.next = None
self.tail.next = new_node
self.tail = new_node
def insert(self, value):
"""Insert a value to the head of the linked list."""
new_node = Node(value, None, None)
if self.head is None:
self.head = self.tail = new_node
else:
new_node.next = self.head
new_node.prev = None
self.head.prev = new_node
self.head = new_node
def pop(self):
"""Remove the head of the chain and return the Node."""
if self.head is None:
raise IndexError("Cannot execute on an empty list!")
elif self.head.next is None:
old_head = self.head
self.head = self.tail = None
return old_head
else:
old_head = self.head
new_head = self.head.next
new_head.prev = None
self.head = new_head
old_head.next = None
old_head.prev = None
return old_head
def shift(self):
"""Remove the tail of the chain and return the Node."""
if self.head is None:
raise IndexError("Cannot execute an empty list!")
elif self.head.next is None:
old_head = self.head
self.head = self.tail = None
return old_head
else:
old_tail = self.tail
new_tail = self.tail.prev
new_tail.next = None
self.tail = new_tail
old_tail.next = None
old_tail.prev = None
return old_tail
def remove(self, value):
"""Remove the specified item from the node chain and rebind the Nodes again."""
if self.tail is not None and self.tail.value == value:
self.shift()
elif self.head is not None and self.head.value == value:
self.pop()
else:
current_node = self.head
previous_node = None
while current_node is not None:
if current_node.value == value:
if previous_node is not None:
previous_node.next = current_node.next
previous_node.next.prev = previous_node
else:
self.head = current_node.next
break
previous_node = current_node
current_node = current_node.next
else:
raise ValueError("Item was not found in list!")
| mit | 8,665,013,767,160,872,000 | 32.724771 | 87 | 0.533188 | false |
sam-m888/gprime | gprime/plugins/rel/rel_pl.py | 1 | 32078 | # -*- coding: utf-8 -*-
#
# gPrime - A web-based genealogy program
#
# Copyright (C) 2003-2005 Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Rewritten in 2008 for 3.x version by Łukasz Rymarczyk
# Written in 2007 by Piotr Czubaszek, largely based on rel_de.py by Alex Roitman.
# PL: Po objaśnienia oznaczania relacji zobacz relationship.py
# EN: For more information see relationship.py
#
"""
Polish-specific definitions of relationships.
"""
#-------------------------------------------------------------------------
#
# Gprime modules
#
#-------------------------------------------------------------------------
from gprime.lib import Person
import gprime.relationship
#-------------------------------------------------------------------------
# określa liczebnik porządkowy
_level_name = [ "pierwszego", "drugiego", "trzeciego", "czwartego", "piątego",
"szóstego", "siódmego", "ósmego", "dziewiątego", "dziesiątego",
"jedenastego", "dwunastego","trzynastego", "czternastego", "piętnastego",
"szesnastego", "siedemnastego", "osiemnastego","dziewiętnastego", "dwudziestego", ]
_father_level = [ "",
"ojciec",
"dziadek",
"pradziadek",
"prapradziadek",
"praprapradziadek",
"prapraprapradziadek",
"praprapraprapradziadek",
"prapraprapraprapradziadek",
"praprapraprapraprapradziadek",
"prapraprapraprapraprapradziadek",
]
_mother_level = [ "",
"matka",
"babcia",
"prababcia",
"praprababcia",
"prapraprababcia",
"praprapraprababcia",
"prapraprapraprababcia",
"praprapraprapraprababcia",
"prapraprapraprapraprababcia",
"praprapraprapraprapraprababcia",
]
_son_level = [ "",
"syn",
"wnuk",
"prawnuk",
"praprawnuk",
"prapraprauwnuk",
"praprapraprauwnuk",
"prapraprapraprawnuk",
"praprapraprapraprawnuk",
"prapraprapraprapraprawnuk",
"praprapraprapraprapraprawnuk",
]
_daughter_level = [ "",
"córka",
"wnuczka",
"prawnuczka",
"praprawnuczka",
"prapraprauwnuczka",
"praprapraprauwnuczka",
"prapraprapraprawnuczka",
"praprapraprapraprawnuczka",
"prapraprapraprapraprawnuczka",
"praprapraprapraprapraprawnuczka",
]
_sister_level_of_male = [ "", "siostra", "ciotka stryjeczna",
"babcia stryjeczna",
"prababcia stryjeczna",
"praprababcia stryjeczna",
"prapraprababcia stryjeczna",
"praprapraprababcia stryjeczna",
"prapraprapraprababcia stryjeczna",
"praprapraprapraprababcia stryjeczna",
"prapraprapraprapraprababcia stryjeczna",
"praprapraprapraprapraprababcia stryjeczna",
]
_sister_level_of_female = [ "", "siostra", "ciotka",
"babcia cioteczna",
"prababcia cioteczna",
"praprababcia cioteczna",
"prapraprababcia cioteczna",
"praprapraprababcia cioteczna",
"prapraprapraprababcia cioteczna",
"praprapraprapraprababcia cioteczna",
"prapraprapraprapraprababcia cioteczna",
"praprapraprapraprapraprababcia cioteczna",
]
_brother_level_of_male = [ "", "brat", "stryj",
"dziadek stryjeczny",
"pradziadek stryjeczny",
"prapradziadek stryjeczny",
"praprapradziadek stryjeczny",
"prapraprapradziadek stryjeczny",
"praprapraprapradziadek stryjeczny",
"prapraprapraprapradziadek stryjeczny",
"praprapraprapraprapradziadek stryjeczny",
"prapraprapraprapraprapradziadek stryjeczny",
]
_brother_level_of_female = [ "", "brat", "wuj",
"dziadek cioteczny",
"pradziadek cioteczny",
"prapradziadek cioteczny",
"praprapradziadek cioteczny",
"prapraprapradziadek cioteczny",
"praprapraprapradziadek cioteczny",
"prapraprapraprapradziadek cioteczny",
"praprapraprapraprapradziadek cioteczny",
"prapraprapraprapraprapradziadek cioteczny",
]
_nephew_level_of_brothers_son = [ "", "bratanek",
"syn bratanka",
"wnuk bratanka",
"prawnuk bratanka",
"praprawnuk bratanka",
"prapraprawnuk bratanka",
"praprapraprawnuk bratanka",
"prapraprapraprawnuk bratanka",
"praprapraprapraprawnuk bratanka",
"prapraprapraprapraprawnuk bratanka",
]
_nephew_level_of_brothers_daughter = [ "", "bratanica",
"syn bratanicy",
"wnuk bratanicy",
"prawnuk bratanicy",
"praprawnuk bratanicy",
"prapraprawnuk bratanicy",
"praprapraprawnuk bratanicy",
"prapraprapraprawnuk bratanicy",
"praprapraprapraprawnuk bratanicy",
"prapraprapraprapraprawnuk bratanicy",
"praprapraprapraprapraprawnuk bratanicy",
]
_nephew_level_of_sisters_son = [ "", "siostrzeniec",
"syn siostrzeńca",
"wnuk siostrzeńca",
"prawnuk siostrzeńca",
"praprawnuk siostrzeńca",
"prapraprawnuk siostrzeńca",
"praprapraprawnuk siostrzeńca",
"prapraprapraprawnuk siostrzeńca",
"praprapraprapraprawnuk siostrzeńca",
"prapraprapraprapraprawnuk siostrzeńca",
]
_nephew_level_of_sisters_daughter = [ "", "siostrzenica",
"syn siostrzenicy",
"wnuk siostrzenicy",
"prawnuk siostrzenicy",
"praprawnuk siostrzenicy",
"prapraprawnuk siostrzenicy",
"praprapraprawnuk siostrzenicy",
"prapraprapraprawnuk siostrzenicy",
"praprapraprapraprawnuk siostrzenicy",
"prapraprapraprapraprawnuk siostrzenicy",
]
_niece_level_of_brothers_son = [ "", "bratanica",
"córka bratanka",
"wnuczka bratanka",
"prawnuczka bratanka",
"praprawnuczka bratanka",
"prapraprawnuczka bratanka",
"praprapraprawnuczka bratanka",
"prapraprapraprawnuczka bratanka",
"praprapraprapraprawnuczka bratanka",
]
_niece_level_of_brothers_daughter = [ "", "bratanica",
"córka bratanicy",
"wnuczka bratanicy",
"prawnuczka bratanicy",
"praprawnuczka bratanicy",
"prapraprawnuczka bratanicy",
"praprapraprawnuczka bratanicy",
"prapraprapraprawnuczka bratanicy",
"praprapraprapraprawnuczka bratanicy",
]
_niece_level_of_sisters_son = [ "", "siostrzenica",
"córka siostrzeńca",
"wnuczka siostrzeńca",
"prawnuczka siostrzeńca",
"praprawnuczka siostrzeńca",
"prapraprawnuczka siostrzeńca",
"praprapraprawnuczka siostrzeńca",
"prapraprapraprawnuczka siostrzeńca",
"praprapraprapraprawnuczka siostrzeńca",
]
_niece_level_of_sisters_daughter = [ "", "siostrzenica",
"córka siostrzenicy",
"wnuczka siostrzenicy",
"prawnuczka siostrzenicy",
"praprawnuczka siostrzenicy",
"prapraprawnuczka siostrzenicy",
"praprapraprawnuczka siostrzenicy",
"prapraprapraprawnuczka siostrzenicy",
"praprapraprapraprawnuczka siostrzenicy",
]
#-------------------------------------------------------------------------
#
#
#
#-------------------------------------------------------------------------
class RelationshipCalculator(gramps.gen.relationship.RelationshipCalculator):
"""
RelationshipCalculator Class
"""
def __init__(self):
gramps.gen.relationship.RelationshipCalculator.__init__(self)
def get_son(self, level, inlaw=''):
"""
Podaje tekst zawierający informację, jak bardzo potomek męski
(np. syn) jest spokrewniony do danej osoby
"""
# Określ, czy osoba jest przybraną, czy rodzoną
if inlaw == '':
t_inlaw = ""
else:
t_inlaw = "przybrany "
# TODO: dodać rozpoznawanie pasierb/pasierbica
if level >= 0 and level < len(_son_level):
return t_inlaw +_son_level[level]
elif level >= len(_son_level) and (level - 1) < len(_level_name):
return t_inlaw + \
"potomek męski %s pokolenia" % _level_name[level - 1]
else:
return t_inlaw + \
"potomek męski w %d pokoleniu" % level
def get_daughter(self, level, inlaw=''):
"""
Podaje tekst zawierający informację, jak bardzo potomek żeński
(np. córka) jest spokrewniony do danej osoby
"""
# Określ, czy osoba jest przybraną, czy rodzoną
# + stwórz obie formy (męską i żeńską)
if inlaw == '':
t_inlaw = ""
t_inlawM = ""
else:
t_inlaw = "przybrana "
t_inlawM = "przybrany "
# TODO: dodać rozpoznawanie pasierb/pasierbica
if level >= 0 and level < len(_daughter_level):
return t_inlaw + _daughter_level[level]
elif level >= len(_daughter_level) and (level - 1) < len(_level_name):
return t_inlawM + \
"potomek żeński %s pokolenia" % _level_name[level - 1]
else:
return t_inlawM + \
"potomek żeński w %d pokoleniu" % level
def get_child_unknown(self, level, inlaw=''):
"""
Podaje tekst zawierający informację, jak bardzo potomek
o nieokreślonej płci jest spokrewniony dodanej osoby
"""
# Określ, czy osoba jest przybraną, czy rodzoną
if inlaw == '':
t_inlaw = ""
else:
t_inlaw = "przybrany "
if level == 1:
if inlaw == '' :
return "dziecko"
else:
return "przybrane dziecko"
elif level >= 1 and (level - 1) < len(_level_name):
return t_inlaw + "potomek %s pokolenia" % _level_name[level - 1]
else:
return t_inlaw + "potomek w %d pokoleniu" % level
def get_sword_distaff(self, level, reltocommon, spacebefore = ""):
"""
PL: Generuje relację po mieczu/po kądzieli
EN: Generate relation 'by sword' or 'by distaff', polish specific
"""
if level <= 1:
return ""
elif level == 2:
# dziadek/babcia
if reltocommon[0] == self.REL_FATHER:
# ze strony rodzonego ojca
return spacebefore + "po mieczu"
elif reltocommon[0] == self.REL_MOTHER:
# ze strony rodzonej matki
return spacebefore + "po kądzieli"
else:
# relacja inna niż rodzona
return ""
elif level == 3:
# pradziadek/prababcia
if (reltocommon[0] == self.REL_FATHER) \
& (reltocommon[1] == self.REL_FATHER):
# pradziadek od dziadka ze strony ojca
return spacebefore + "podwójnego miecza"
elif (reltocommon[0] == self.REL_FATHER) \
& (reltocommon[1] == self.REL_MOTHER):
# pradziadek od babci ze strony ojca
return spacebefore + "raz po mieczu, dalej po kądzieli"
elif (reltocommon[0] == self.REL_MOTHER) \
& (reltocommon[1] == self.REL_FATHER):
# pradziadek od dziadka ze strony matki
return spacebefore + "raz po kądzieli, dalej po mieczu"
elif (reltocommon[0] == self.REL_MOTHER) \
& (reltocommon[1] == self.REL_MOTHER):
# pradziadek od babci ze strony matki
return spacebefore + "podwójnej kądzieli"
else:
# relacja inna niż rodzona
return ""
elif level == 4:
# prapradziadek/praprababcia
if (reltocommon[0] == self.REL_FATHER) \
& (reltocommon[1] == self.REL_FATHER) \
& (reltocommon[2] == self.REL_FATHER):
# tzw. linia męska
return spacebefore + "potrójnego miecza"
if (reltocommon[0] == self.REL_FATHER) \
& (reltocommon[1] == self.REL_FATHER) \
& (reltocommon[2] == self.REL_FATHER):
# tzw. linia żeńska
return spacebefore + "potrójnego miecza"
else:
return ""
else:
return ""
def get_father(self, level, reltocommon, inlaw=''):
"""
Podaje tekst zawierający informację, jak bardzo przodek męski
(np. ojciec) jest spokrewniony do danej osoby
"""
if inlaw == '':
t_inlaw = ""
else:
t_inlaw = "przybrany "
if level >= 0 and level < len(_father_level):
# Jeśli znasz bezpośrednią nazwę relacji, to ją zastosuj
if level == 1:
# ojciec
return t_inlaw + _father_level[level]
elif (level >= 2) & (level <= 4):
# dziadek, pradziadek, prapradziadek
return t_inlaw + _father_level[level] \
+ self.get_sword_distaff(level, reltocommon, ' ')
else:
return t_inlaw + _father_level[level]
elif level >= len(_father_level) and (level - 1) < len(_level_name):
# jeśli istnieje liczebnik dla danej liczby
return t_inlaw + \
"przodek męski %s pokolenia" % (_level_name[level - 1])
else:
# dla pozostałych przypadków wypisz relację liczbowo
return t_inlaw + \
"przodek męski w %d pokoleniu" % level
def get_mother(self, level, reltocommon, inlaw=''):
"""
Podaje tekst zawierający informację, jak bardzo przodek żeński
(np. matka) jest spokrewniony do danej osoby
"""
if inlaw == '':
t_inlaw = ""
else:
t_inlaw = "przybrana "
if level >= 0 and level < len(_mother_level):
# Jeśli znasz bezpośrednią nazwę relacji, to ją zastosuj
if level == 1:
# matka
return t_inlaw + _mother_level[level]
elif (level >= 2) & (level <= 4):
# babcia, prababcia, praprababcia
return t_inlaw + _mother_level[level] \
+ self.get_sword_distaff(level, reltocommon, ' ')
else:
return t_inlaw + _mother_level[level]
elif level >= len(_mother_level) and (level - 1) < len(_level_name):
# jeśli istnieje liczebnik dla danej liczby
return t_inlaw + \
"przodek żeński %s pokolenia" % (_level_name[level - 1])
else:
# dla pozostałych przypadków wypisz relację liczbowo
return t_inlaw +"przodek żeński w %d pokoleniu" % level
def get_parent_unknown(self, level, inlaw=''):
"""
Podaje tekst zawierający informację, jak bardzo przodek
o nieokreślonej płci jest spokrewniony dodanej osoby
"""
if inlaw == '':
t_inlaw = ""
else:
t_inlaw = "przybrany "
if level == 1:
return t_inlaw + "rodzic"
elif level > 1 and (level - 1) < len(_level_name):
if (level >= 2) & (level <= 4):
# babcia, prababcia, praprababcia
# (albo dziadek, pradziadek, prapradziadek)
tmp = t_inlaw +\
"przodek %s pokolenia" % (_level_name[level - 1])
# TODO: try to recognize a gender...
return tmp
# + self.get_sword_distaff(level, reltocommon, ' ')
else:
return t_inlaw + \
"przodek %s pokolenia" % (_level_name[level - 1])
else:
return t_inlaw +"przodek w %d pokoleniu" % level
def get_single_relationship_string(self, Ga, Gb, gender_a, gender_b,
reltocommon_a, reltocommon_b,
only_birth=True,
in_law_a=False, in_law_b=False):
"""
Provide a string that describes the relationsip between a person, and
another person. E.g. "grandparent" or "child".
"""
if only_birth:
step = ''
else:
step = self.STEP
if in_law_a or in_law_b :
inlaw = self.INLAW
else:
inlaw = ''
# b is the same person as a
if Ga == Gb == 0:
rel_str = 'ta sama osoba'
elif Ga == 0:
# b is son/descendant of a
if gender_b == Person.MALE:
if inlaw and Gb == 1 and not step:
rel_str = "zięć"
else:
rel_str = self.get_son(Gb, inlaw)
elif gender_b == Person.FEMALE:
if inlaw and Gb == 1 and not step:
rel_str = "synowa"
else:
rel_str = self.get_daughter(Gb, inlaw)
else:
rel_str = self.get_child_unknown(Gb, inlaw)
elif Gb == 0:
# b is parent/grand parent of a
if gender_b == Person.MALE:
if inlaw and Gb == 1 and not step:
# TODO: znaleźć odpowiedniki w zależności czy to syn/córka
rel_str = "teść"
else:
rel_str = self.get_father(Ga, reltocommon_a, inlaw)
elif gender_b == Person.FEMALE:
if inlaw and Gb == 1 and not step:
# TODO: znaleźć odpowiedniki w zależności czy to syn/córka
rel_str = "teściowa"
else:
rel_str = self.get_mother(Ga, reltocommon_a, inlaw)
else:
rel_str = self.get_parent_unknown(Ga, inlaw)
elif Ga == Gb == 1:
# rodzeństwo
if gender_b == Person.MALE:
if inlaw and not step:
rel_str = "brat przyrodni"
else:
rel_str = "brat rodzony"
elif gender_b == Person.FEMALE:
if inlaw and not step:
rel_str = "siostra przyrodnia"
else:
rel_str = "siostra rodzony"
else:
rel_str = "brat/siostra"
elif Gb == 1 and Ga > 1:
# Przyjmij, że nie rozróżniamy osób prawnie i nieprawnie przybranych...
if Ga == 2:
# rodzeństwo rodziców
# brat ojca, czyli stryj
if (gender_b == Person.MALE) \
& (reltocommon_a[0] == self.REL_FATHER):
rel_str = "stryj"
# siostra ojca, czyli ciotka ???
elif (gender_b == Person.FEMALE) \
& (reltocommon_a[0] == self.REL_FATHER):
rel_str = "ciotka (tzw. stryjna)"
# brat matki, czyli wuj/wujek
elif (gender_b == Person.MALE) \
& (reltocommon_a[0] == self.REL_MOTHER):
rel_str = "wuj (wujek)"
# siostra matki, czyli ciotka
elif (gender_b == Person.FEMALE) \
& (reltocommon_a[0] == self.REL_MOTHER):
rel_str = "ciotka"
else:
rel_str = "brat lub siostra rodzica"
elif Ga == 3:
# rodzeństwo dziadków rodziców osoby sprawdzanej
# rodzeństwo dziadka po mieczu (ojca ojca)
if (reltocommon_a[0] == self.REL_FATHER) \
& (reltocommon_a[1] == self.REL_FATHER):
if (gender_b == Person.MALE):
rel_str = "dziadek stryjeczny (tzw przestryj, stary stryj)"
elif (gender_b == Person.FEMALE):
rel_str = "babcia stryjeczna"
else:
rel_str = "rodzeństwo przodka w 2 pokoleniu"
# rodzeństwo babki po mieczu (matki ojca)
elif (reltocommon_a[0] == self.REL_FATHER) \
& (reltocommon_a[1] == self.REL_MOTHER):
# TODO: Należy sprawdzić, czy w staropolszczyźnie nie ma
# dokładniejszych określeń dla tego typu relacji
# TODO: EN: Try to check, whether in old polish language
# are more specific word for this kind of relation
if (gender_b == Person.MALE):
rel_str = "dziadek stryjeczny (tzw przestryj, stary stryj)"
elif (gender_b == Person.FEMALE):
rel_str = "babcia stryjeczna"
else:
rel_str = "rodzeństwo przodka w 2 pokoleniu"
# rodzeństwo dziadka po kądzieli (ojca matki)
elif (reltocommon_a[0] == self.REL_MOTHER) \
& (reltocommon_a[1] == self.REL_FATHER):
# TODO: Należy sprawdzić, czy w staropolszczyźnie nie ma
# dokładniejszych określeń dla tego typu relacji
# TODO: EN: Try to check, whether in old polish language
# are more specific word for this kind of relation
if (gender_b == Person.MALE):
rel_str = "dziadek cioteczny (starop. prapociot)"
elif (gender_b == Person.FEMALE):
rel_str = "babcia cioteczna (starop. praciota)"
else:
rel_str = "rodzeństwo przodka w 2 pokoleniu"
# rodzeństwo babki po kądzieli (matki matki)
elif (reltocommon_a[0] == self.REL_MOTHER) \
& (reltocommon_a[1] == self.REL_MOTHER):
# TODO: Należy sprawdzić, czy w staropolszczyźnie nie ma
# dokładniejszych określeń dla tego typu relacji
# TODO: EN: Try to check, whether in old polish language
# are more specific word for this kind of relation
if (gender_b == Person.MALE):
rel_str = "dziadek cioteczny (starop. prapociot)"
elif (gender_b == Person.FEMALE):
rel_str = "babcia cioteczna (starop. praciota)"
else:
rel_str = "rodzeństwo przodka w 2 pokoleniu"
else:
if (gender_b == Person.MALE):
rel_str = "rodzeństwo dziadka"
elif (gender_b == Person.FEMALE):
rel_str = "rodzeństwo babci"
else:
rel_str = "rodzeństwo przodka w 2 pokoleniu"
elif Ga > 3:
# pradziadkowie... (grandparents)
if (gender_b == Person.MALE) \
& (reltocommon_a[0] == self.REL_FATHER):
if Ga >= 0 and Ga < len(_brother_level_of_male):
rel_str = _brother_level_of_male[Ga]
else:
rel_str = "rodzeństwo przodka męskiego %d pokolenia" % Ga
elif (gender_b == Person.FEMALE) \
& (reltocommon_a[0] == self.REL_FATHER):
if Ga >= 0 and Ga < len(_sister_level_of_male):
rel_str = _sister_level_of_male[Ga]
else:
rel_str = "rodzeństwo przodka żeńskiego %d pokolenia" % Ga
elif (gender_b == Person.MALE) \
& (reltocommon_a[0] == self.REL_MOTHER):
if Ga >= 0 and Ga < len(_brother_level_of_female):
rel_str = _brother_level_of_male[Ga]
else:
rel_str = "rodzeństwo przodka męskiego %d pokolenia" % Ga
elif (gender_b == Person.FEMALE) \
& (reltocommon_a[0] == self.REL_MOTHER):
if Ga >= 0 and Ga < len(_sister_level_of_female):
rel_str = _sister_level_of_male[Ga]
else:
rel_str = "rodzeństwo przodka żeńskiego %d pokolenia" % Ga
else:
rel_str = "rodzeństwo przodka %d pokolenia" % Ga
else:
# A program should never goes there, but...
rel_str = "Relacja nie określona"
elif Ga ==1 and Gb > 1:
# syn brata
if (gender_b == Person.MALE) \
& (reltocommon_b[0] == self.REL_FATHER):
if Gb < len(_nephew_level_of_brothers_son):
rel_str = _nephew_level_of_brothers_son[Gb]
else:
rel_str = "męski potomek w %d pokoleniu brata" % Gb
# córka brata
elif (gender_b == Person.FEMALE) \
& (reltocommon_b[0] == self.REL_FATHER):
if Gb < len(_nephew_level_of_brothers_daughter):
rel_str = _nephew_level_of_brothers_daughter[Gb]
else:
rel_str = "żeński potomek w %d pokoleniu brata" % Gb
# syn siostry
if (gender_b == Person.MALE) \
& (reltocommon_b[0] == self.REL_MOTHER):
if Gb < len(_nephew_level_of_sisters_son):
rel_str = _nephew_level_of_sisters_son[Gb]
else:
rel_str = "męski potomek w %d pokoleniu brata" % Gb
# córka siostry
elif (gender_b == Person.FEMALE) \
& (reltocommon_b[0] == self.REL_MOTHER):
if Gb < len(_nephew_level_of_sisters_daughter):
rel_str = _nephew_level_of_sisters_daughter[Gb]
else:
rel_str = "żeński potomek w %d pokoleniu brata" % Gb
# potomek brata
elif (reltocommon_b[0] == self.REL_FATHER):
rel_str = "potomek w %d pokoleniu brata" % Gb
# potomek brata
elif (reltocommon_b[0] == self.REL_MOTHER):
rel_str = "potomek w %d pokoleniu brata" % Gb
else :
rel_str = "potomek w %d pokoleniu rodzeństwa" % Gb
elif Ga > 1 and Gb > 1:
if (gender_b == Person.MALE):
if Ga == 2 and Gb == 2:
rel_str = "kuzyn"
else:
rel_str = "daleki kuzyn (%d. stopień pokrewieństwa)" % (Ga+Gb)
elif (gender_b == Person.FEMALE):
if Ga == 2 and Gb == 2:
rel_str = "kuzynka"
else:
rel_str = "daleka kuzynka (%d. stopień pokrewieństwa)" % (Ga+Gb)
else:
if Ga == 2 and Gb == 2:
rel_str = "kuzyn(ka)"
else:
rel_str = "daleki członek rodziny (%d. stopień pokrewieństwa)" % (Ga+Gb)
else:
# A program should never goes there, but...
rel_str = "nieokreślony stopień pokrewieństwa"
return rel_str
def get_plural_relationship_string(self, Ga, Gb,
reltocommon_a='', reltocommon_b='',
only_birth=True,
in_law_a=False, in_law_b=False):
"""
Generate a text with information, how far away is a group of persons
from a main person
"""
if Ga == Gb == 0:
return 'ta sama osoba'
if 0 == Ga:
if 1 == Gb:
return 'Dzieci'
if 2 == Gb:
return 'Wnuki'
if 3 == Gb:
return 'Prawnuki'
if 4 == Gb:
return 'Praprawnuki'
return 'Praprapra(n)wnuki'
if 0 == Gb:
if 1 == Ga:
return 'Rodzice'
if 2 == Ga:
return 'Dziadkowie'
if 3 == Ga:
return 'Pradziadkowie'
if 4 == Ga:
return 'Praprapradziadkowie'
return 'Praprapra(n)dziadkowie'
if 1 == Ga == Gb:
return 'Rodzeństwo'
if 1 == Gb and Ga > 1:
return 'Wujowie/stryjowie i ciocie'
if 1 < Gb and 1 == Ga:
return 'bratankowie(ice)/siostrzeńcy(nice)'
if 1 < Ga and 1 < Gb:
return 'dalsza rodzina'
return 'relacja nieznana'
def get_sibling_relationship_string(self, sib_type, gender_a, gender_b,
in_law_a=False, in_law_b=False):
if in_law_a or in_law_b :
inlaw = self.INLAW
else:
inlaw = ''
if sib_type == self.NORM_SIB:
if not inlaw:
if gender_b == Person.MALE:
rel_str = 'brat (rodzony)'
elif gender_b == Person.FEMALE:
rel_str = 'siostra (rodzona)'
else:
rel_str = 'brat lub siostra (rodzeni)'
else:
if gender_b == Person.MALE:
# TODO: znaleźć odpowiednik
rel_str = "brat (pasierb)"
elif gender_b == Person.FEMALE:
# TODO: znaleźć odpowiednik
rel_str = "siostra (pasierbica)"
else:
# TODO: znaleźć odpowiednik
rel_str = "brat lub siostra (pasierb/pasierbica)"
elif sib_type == self.UNKNOWN_SIB:
if not inlaw:
if gender_b == Person.MALE:
rel_str = 'brat'
elif gender_b == Person.FEMALE:
rel_str = 'siostra'
else:
rel_str = 'brat lub siostra'
else:
if gender_b == Person.MALE:
# TODO: znaleźć odpowiednik
rel_str = "brat (brat/szwagier)"
elif gender_b == Person.FEMALE:
# TODO: znaleźć odpowiednik
rel_str = "siostra (bratowa/szwagierka)"
else:
# TODO: znaleźć odpowiednik
rel_str = "brat lub siostra (szwagier/szagierka)"
elif sib_type == self.HALF_SIB_FATHER:
if gender_b == Person.MALE:
rel_str = "brat przyrodni"
elif gender_b == Person.FEMALE:
rel_str = "siostra przyrodnia"
else:
rel_str = "brat/siostra przyrodni"
elif sib_type == self.HALF_SIB_MOTHER:
if gender_b == Person.MALE:
rel_str = "brat przyrodni"
elif gender_b == Person.FEMALE:
rel_str = "siostra przyrodnia"
else:
rel_str = "brat/siostra przyrodni"
elif sib_type == self.STEP_SIB:
if gender_b == Person.MALE:
rel_str = "brat przyrodni"
elif gender_b == Person.FEMALE:
rel_str = "siostra przyrodnia"
else:
rel_str = "brat lub siostra przyrodnia"
else:
rel_str = "nieokreślona relacja rodzeństwa"
return rel_str
if __name__ == "__main__":
# Test function. Call it as follows from the command line (so as to find
# imported modules):
# export PYTHONPATH=/path/to/gramps/src
# python src/plugins/rel/rel_pl.py
"""TRANSLATORS, copy this if statement at the bottom of your
rel_xx.py module, and test your work with:
python src/plugins/rel/rel_xx.py
"""
from gprime.relationship import test
RC = RelationshipCalculator()
test(RC, True)
| gpl-2.0 | 8,653,816,077,234,441,000 | 33.513543 | 92 | 0.522382 | false |
chrishokamp/maxent-decoder | phrase_table/Phrase_Table.py | 1 | 1704 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
from collections import deque,defaultdict
class Phrase_Table:
# TODO: unit names should actually be objects with sub-fields
def __init__(self, lines, unit_names):
self.phrase_table = self.build_phrase_table(lines, unit_names)
# store the field names in case we need them later
self.field_names
def build_phrase_table(self, lines, unit_names):
line_units = [line.split('|||') for line in lines]
def strip_list(l):
return deque([u.strip() for u in l])
lists_of_units = [strip_list(x) for x in line_units]
phrase_table = defaultdict(list)
# assume first elem is the key
for entry in lists_of_units:
f_phrase = entry.popleft()
e_phrase = entry.popleft()
# currently unused
counts = entry.pop()
alignment = entry.pop()
# end unused
# split each field on whitespace except target -- there should be a name for every field
flattened = []
for section in entry:
flattened = flattened + re.split('\s+', section)
flattened = [e_phrase] + flattened #TODO: hack
e = { k:v for k,v in zip(unit_names, flattened) }
phrase_table[f_phrase].append(e)
return phrase_table
# TODO: will throw error when item isn't found
def getEntry(self, phrase):
return self.phrase_table[phrase]
def contains(self, phrase):
if self.phrase_table[phrase] != []:
return True
else:
return False
def getTable(self):
return self.phrase_table
| mit | -8,461,846,650,403,378,000 | 30.555556 | 100 | 0.581573 | false |
GoberInfinity/ExampleDjango | modeladmin/migrations/0001_initial.py | 1 | 1439 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-13 08:31
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('crudclassviews', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Animal',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=140)),
('animal', models.CharField(max_length=140)),
('dbirth', models.DateField()),
('owner', models.ManyToManyField(to='crudclassviews.Person')),
],
options={
'ordering': ['animal'],
},
),
migrations.CreateModel(
name='Size',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('animal_size', models.CharField(choices=[('S', 'Small'), ('M', 'Medium'), ('L', 'Large')], max_length=1)),
],
),
migrations.AddField(
model_name='animal',
name='size',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='modeladmin.Size'),
),
]
| mit | 6,050,514,650,440,598,000 | 32.465116 | 123 | 0.539958 | false |
openstack/ceilometer | ceilometer/compute/pollsters/disk.py | 1 | 3344 | #
# Copyright 2012 eNovance <[email protected]>
# Copyright 2012 Red Hat, Inc
# Copyright 2014 Cisco Systems, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ceilometer.compute import pollsters
from ceilometer import sample
class PerDeviceDiskPollster(pollsters.GenericComputePollster):
inspector_method = "inspect_disks"
@staticmethod
def get_resource_id(instance, stats):
return "%s-%s" % (instance.id, stats.device)
@staticmethod
def get_additional_metadata(instance, stats):
return {'disk_name': stats.device}
class PerDeviceReadRequestsPollster(PerDeviceDiskPollster):
sample_name = 'disk.device.read.requests'
sample_unit = 'request'
sample_type = sample.TYPE_CUMULATIVE
sample_stats_key = 'read_requests'
class PerDeviceReadBytesPollster(PerDeviceDiskPollster):
sample_name = 'disk.device.read.bytes'
sample_unit = 'B'
sample_type = sample.TYPE_CUMULATIVE
sample_stats_key = 'read_bytes'
class PerDeviceWriteRequestsPollster(PerDeviceDiskPollster):
sample_name = 'disk.device.write.requests'
sample_unit = 'request'
sample_type = sample.TYPE_CUMULATIVE
sample_stats_key = 'write_requests'
class PerDeviceWriteBytesPollster(PerDeviceDiskPollster):
sample_name = 'disk.device.write.bytes'
sample_unit = 'B'
sample_type = sample.TYPE_CUMULATIVE
sample_stats_key = 'write_bytes'
class PerDeviceDiskLatencyPollster(PerDeviceDiskPollster):
inspector_method = 'inspect_disk_latency'
sample_name = 'disk.device.latency'
sample_unit = 'ms'
sample_stats_key = 'disk_latency'
class PerDeviceDiskIOPSPollster(PerDeviceDiskPollster):
inspector_method = 'inspect_disk_iops'
sample_name = 'disk.device.iops'
sample_unit = 'count/s'
sample_stats_key = 'iops_count'
class PerDeviceCapacityPollster(PerDeviceDiskPollster):
inspector_method = 'inspect_disk_info'
sample_name = 'disk.device.capacity'
sample_unit = 'B'
sample_stats_key = 'capacity'
class PerDeviceAllocationPollster(PerDeviceDiskPollster):
inspector_method = 'inspect_disk_info'
sample_name = 'disk.device.allocation'
sample_unit = 'B'
sample_stats_key = 'allocation'
class PerDevicePhysicalPollster(PerDeviceDiskPollster):
inspector_method = 'inspect_disk_info'
sample_name = 'disk.device.usage'
sample_unit = 'B'
sample_stats_key = 'physical'
class PerDeviceDiskReadLatencyPollster(PerDeviceDiskPollster):
sample_name = 'disk.device.read.latency'
sample_type = sample.TYPE_CUMULATIVE
sample_unit = 'ns'
sample_stats_key = 'rd_total_times'
class PerDeviceDiskWriteLatencyPollster(PerDeviceDiskPollster):
sample_name = 'disk.device.write.latency'
sample_type = sample.TYPE_CUMULATIVE
sample_unit = 'ns'
sample_stats_key = 'wr_total_times'
| apache-2.0 | 8,738,284,241,909,994,000 | 30.252336 | 75 | 0.733553 | false |
rahit/django-mymen | setup.py | 1 | 1102 | import os
from setuptools import setup
README = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-rahit-user',
version='0.0.1',
description='Registration and Auth module',
long_description=README,
author='Tahsin Hassan Rahit',
author_email='[email protected]',
url='http://github.com/rahit/django-rahit-user',
packages=['mymen'],
install_requires=['Django >=1.5'],
include_package_data=True,
license='MIT License',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
| mit | -6,098,621,485,870,813,000 | 32.393939 | 78 | 0.625227 | false |
itucsdb1628/itucsdb1628 | suggestion.py | 1 | 3003 | import psycopg2 as dbapi2
from flask import request
from flask_login import current_user, login_required, login_user, logout_user
from datetime import date
from dsn_conf import get_dsn
dsn = get_dsn()
def select_suggestions():
with dbapi2.connect(dsn) as connection:
try:
cursor = connection.cursor()
query = """SELECT SUGGESTION.ID,USERDATA.USERNAME,
SUGGESTION.ARTIST, SUGGESTION.SONGNAME,SUGGESTION.RELEASEDATE,SUGGESTION.SUGGESTIONDATE,
SUGGESTION.STATU
FROM SUGGESTION,USERDATA
WHERE(
USERDATA.ID = SUGGESTION.USERID)
ORDER BY SUGGESTION.STATU DESC"""
cursor.execute(query)
return cursor
except dbapi2.DatabaseError as e:
connection.rollback()
def select_suggestions_user():
with dbapi2.connect(dsn) as connection:
try:
cursor = connection.cursor()
query = """SELECT ID,ARTIST,SONGNAME,RELEASEDATE,SUGGESTIONDATE,STATU
FROM SUGGESTION
WHERE(
SUGGESTION.USERID = %s
)
ORDER BY SUGGESTION.SUGGESTIONDATE""" % current_user.id
cursor.execute(query)
return cursor
except dbapi2.DatabaseError as e:
connection.rollback()
def insert_suggestion(userid,artist,songname,releasedate):
with dbapi2.connect(dsn) as connection:
try:
cursor = connection.cursor()
query = """INSERT INTO SUGGESTION(USERID,ARTIST,SONGNAME,SUGGESTIONDATE,RELEASEDATE,STATU)
VALUES(%s,%s,%s,%s,%s,%s)"""
myTime = date.today()
cursor.execute(query,(userid,artist,songname,date.today(),releasedate,2))
connection.commit()
except dbapi2.DatabaseError as e:
connection.rollback()
def delete_suggestion(deleteId):
with dbapi2.connect(dsn) as connection:
try:
cursor = connection.cursor()
cursor.execute("""DELETE FROM SUGGESTION WHERE ID = %s""", (int(deleteId),))
connection.commit()
except dbapi2.DatabaseError as e:
connection.rollback()
def reject_suggestion(updateId):
with dbapi2.connect(dsn) as connection:
try:
cursor = connection.cursor()
query = """UPDATE SUGGESTION SET STATU = 0 WHERE ID = %s"""
cursor.execute(query, (updateId,))
connection.commit()
except dbapi2.DatabaseError as e:
connection.rollback()
def approve_suggestion(updateId):
with dbapi2.connect(dsn) as connection:
try:
cursor = connection.cursor()
query = """UPDATE SUGGESTION SET STATU = 1 WHERE ID = %s"""
cursor.execute(query, (updateId,))
connection.commit()
except dbapi2.DatabaseError as e:
connection.rollback()
| gpl-3.0 | 2,680,382,297,286,805,500 | 36.08642 | 102 | 0.589078 | false |
andycavatorta/oratio | Common/attar.py | 1 | 1226 | """
attar is the global logging and reporting system.
When it's more complete, it can be moved to thritybirds
It is used as a global repository for internal messages: debug, trace, exceptions, events
It writes these messages to the terminal ( std.out ) and a log file, and publishes them on ZMQ.
So messages are available in many modes and easier to view, review, and collect.
multiple instances can run simultaneously.
"""
import time
class main():
def __init__(self, hostname, publisher):
self.hostname = hostname
# if /Logs/attar.log does not exist, create it
def log(self, topic, filename, classname, method, message, traceback=""):
# get timestamp
message_d = {
"timestamp":time.strftime("%Y-%m-%d %H:%M:%S:"),
"topic":topic,
"hostname":hostname,
"filename":filename,
"classname":classname,
"method":method,
"message":message,
"traceback":traceback
}
print topic, filename, classname, method, message, traceback
message_j = json.dumps(message_d)
#log message_d
#publish message_d to topic
publisher.send(topic, message_j)
| mit | 3,567,286,270,273,221,600 | 26.244444 | 95 | 0.627243 | false |
rechner/Taxidi | signature.py | 1 | 14273 | #!/usr/bin/env python
#*-* coding:utf-8 *-*
# signature.py © 2012 Zac Sturgeon and Nathan Lex
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
"""
A simple free-drawing area which outputs the drawing as a list
of line segments, suitable for capturing signatures to store in a
database. The module includes an optimised compression algorithm
with a 92% compression ratio.
See http://jkltech.net/taxidi/wiki/Signature_Format
"""
__version__ = '0.1'
__all__ = ['SignaturePad']
import wx
import math
import zlib
import base64
directions = [bin(0x7070563)[2:][i:i+3] for i in range(0,27,3)]
def encode(lines):
if len(lines) == 0: return '0'
# check if 3 points are on the same line, in order
def ison(a, c, b):
within = lambda p, q, r: p <= q <= r or r <= q <= p
return ((b[0] - a[0]) * (c[1] - a[1]) == (c[0] - a[0]) * (b[1] - a[1])
and (within(a[0], c[0], b[0]) if a[0] != b[0] else
within(a[1], c[1], b[1])))
# converts series of lines to 'connect the dots', and looks for single dots
strokes = [[lines[0][0:2]]]; dots = []
for line in lines:
if line[0:2] != strokes[-1][-1]:
if len(strokes[-1]) == 1:
dots += strokes.pop()
strokes += [[line[0:2]]]
if line[2:4] != strokes[-1][-1]:
if len(strokes[-1]) > 1 and \
ison(strokes[-1][-2], strokes[-1][-1], line[2:4]):
strokes[-1][-1] = line[2:4]
else:
strokes[-1] += [line[2:4]]
if len(strokes[-1]) == 1:
dots += strokes.pop()
# big endian, most significant first
def BEVLI4Enc(num):
if num == 0: return '0' * 4
else:
temp = -(-int(math.log(num, 2) + 1) // 3) * 3
temp = [bin(num)[2:].zfill(temp)[i:i+3] for i in range(0, temp, 3)]
return '1'.join([''] + temp[:-1]) + '0' + temp[-1]
# encode dots in binary
data = ''.join(map(BEVLI4Enc, [len(dots)] + [i for d in dots for i in d]))
# convert series of points to deltas, then convert to binary
for stroke in strokes:
prev_point = stroke[0]
data += ''.join(map(BEVLI4Enc, (len(stroke) - 2,) + prev_point))
for point in stroke[1:]:
dx, dy = point[0] - prev_point[0], point[1] - prev_point[1]
prev_point = point
# format: bit 'is this delta more than 1 pixel?', 3xbits direction
# directions: 111 000 001
# 110 # 010
# 101 100 011
isleap = abs(dx) > 1 or abs(dy) > 1
data += ('1' if isleap else '0') + \
directions[cmp(dx, 0) + 1 + (cmp(dy, 0) + 1) * 3]
if isleap:
if abs(dx): data += BEVLI4Enc(abs(dx))
if abs(dy): data += BEVLI4Enc(abs(dy))
# pad to byte boundry, then convert to binary
data = ''.join(map(lambda x: chr(int(x, 2)), \
[data[i:i+8].ljust(8, '0') for i in range(0,len(data),8)]))
# base 95 encoder
def b95btoa(b):
b95 = ''; n = int(('_' + b).encode('hex'), 16)
while n > 0:
b95 += chr(int(n % 95 + 32)); n /= 95
return b95[::-1]
# compress using zlib if it makes it smaller
z = zlib.compress(data)[2:-4]
if len(z) < len(data):
return 'c' + b95btoa(z)
else:
return 'e' + b95btoa(data)
def decode(data):
if data[0] == '0': return []
# dewrapper functions
def inflate(z):
return zlib.decompress(z, -zlib.MAX_WBITS)
def b64atob(b64):
return base64.b64decode(b64 + '=' * (4 - len(b64) % 4))
def b95atob(b95):
n = 0; m = 1
for c in b95[::-1]:
n += (ord(c) - 32) * m; m *= 95
return hex(n)[4:-1].decode('hex')
def unwrap(d):
return {
'a': inflate, # zlib compression
'b': lambda x: x, # raw version 1 format
'c': lambda x: inflate(b95atob(x)), # base 95 encoding, zlib compression
'd': lambda x: inflate(b64atob(x)), # base 64 encoding, zlib compression
'e': b95atob, # base 95 encoding, no compression
'f': b64atob # base 64 encoding, no compression
}[d[0]](d[1:])
# unwrap, break into groups of 4, and convert to 01
data = ''.join([bin(ord(c))[2:].rjust(8, '0') for c in unwrap(data)])
data = [data[i:i+4] for i in range(0, len(data), 4)]
def BEVLI4Dec(arr):
temp = [arr.pop(0)]
while temp[-1][0] == '1':
temp += [arr.pop(0)]
return int(''.join([i[1:4] for i in temp]), 2)
#decode dots
lines = []
for d in range(0, BEVLI4Dec(data)):
x, y = BEVLI4Dec(data), BEVLI4Dec(data)
lines += [(x, y, x, y)]
#decode strokes
num_points = BEVLI4Dec(data)
while num_points > 0:
last_line = (0, 0, BEVLI4Dec(data), BEVLI4Dec(data))
for i in range (0, num_points + 1):
isleap = data[0][0] == '1'
direction = directions.index(data.pop(0)[1:4])
dx, dy = direction % 3 - 1, direction / 3 - 1
last_line = (last_line[2], last_line[3],
last_line[2] + dx * (BEVLI4Dec(data) if isleap and dx != 0 else 1),
last_line[3] + dy * (BEVLI4Dec(data) if isleap and dy != 0 else 1))
lines += [last_line]
num_points = BEVLI4Dec(data) if len(data) > 0 else 0
return lines
class SignaturePad(wx.Window):
"""Widget for drawing and capturing a signature.
Optimised for a size of 500 x 200."""
def __init__(self, parent, signatureLine=True,
signatureLineText='Sign Here', signatureLineColour='Grey'):
super(SignaturePad, self).__init__(parent,
style=wx.NO_FULL_REPAINT_ON_RESIZE)
self._initDrawing()
self._bindEvents()
self._initBuffer()
self.signature = []
self.debug = False #Set to true to enable debugging output
self.signatureLine = signatureLine
self.signatureLineText = signatureLineText
self.signatureLineColour = signatureLineColour
self.SetMinSize((500, 200))
self.SetMaxSize((500, 200))
def _initDrawing(self):
self.SetBackgroundColour('White')
self.penThickness = 2 #default pen thickness
self.penColour = '#145394' #default colour
self.lines = []
self.previousPosition = (0, 0)
def _bindEvents(self):
for event, handler in [ \
(wx.EVT_LEFT_DOWN, self.onLeftDown), # Start drawing
(wx.EVT_LEFT_UP, self.onLeftUp), # Stop drawing
(wx.EVT_MOTION, self.onMotion), # Draw
(wx.EVT_SIZE, self.onSize), # Prepare for redraw
(wx.EVT_IDLE, self.onIdle), # Redraw
(wx.EVT_PAINT, self.onPaint), # Refresh
(wx.EVT_WINDOW_DESTROY, self.cleanup)]:
self.Bind(event, handler)
def _initBuffer(self):
# Initialize the bitmap used for the display buffer
size = self.GetClientSize()
self.buffer = wx.EmptyBitmap(size.width, size.height)
dc = wx.BufferedDC(None, self.buffer)
dc.SetBackground(wx.Brush(self.GetBackgroundColour()))
dc.Clear()
self.drawLines(dc, *self.lines)
self.reInitBuffer = False #set flag
def SetPenColour(self, colour):
"""Sets the active pen colour. Returns true if changed."""
if (self.penColour == colour): return False
self.penColour = colour
return True
def SetPenThickness(self, thickness):
"""Sets the pen thickness."""
self.penThickness = thickness
#Event handlers:
def onLeftDown(self, event):
"""Called on left button press (pen down)"""
self.currentLine = []
self.previousPosition = event.GetPositionTuple()
self.CaptureMouse()
def onLeftUp(self, event):
"""Called on left button release (pen up)"""
if self.HasCapture():
self.lines.append((self.penColour, self.penThickness,
self.currentLine))
self.currentLine = []
self.ReleaseMouse()
def onMotion(self, event):
"""Called when the mouse moving (pen is being dragged). If the
left button is down while dragging, a line is drawn from the
last event position to the new one. Coordinates are saved
for redrawing and appended to the signature output."""
if event.Dragging() and event.LeftIsDown():
dc = wx.BufferedDC(wx.ClientDC(self), self.buffer)
currentPosition = event.GetPositionTuple()
lineSegment = self.previousPosition + currentPosition
self.signature.append(lineSegment) #Store signature value
self.drawLines(dc, (self.penColour, self.penThickness,
[lineSegment]))
self.currentLine.append(lineSegment)
self.previousPosition = currentPosition
if self.debug:
print self.signature
print len(self.signature)
def onSize(self, event):
"""Enables flag to cause a redraw event if the window is
resized"""
self.reInitBuffer = True
def onIdle(self, event):
"""If the window is resized, the bitmap is recopied to match
the new window size. The buffer is re-initialized while
idle such that a refresh only occurs once when needed."""
if self.reInitBuffer:
self._initBuffer()
self.Refresh(False)
def onPaint(self, event):
"""Paints window and signature line when exposed."""
# Create a buffered paint DC. It will create the real
# wx.PaintDC and then blit the bitmap to it when dc is
# deleted. Since we don't need to draw anything else
# here that's all there is to it.
dc = wx.BufferedPaintDC(self, self.buffer)
#Signature line
if self.signatureLine:
self.drawLines(dc, (self.signatureLineColour,
2, [(20, 150, 480, 150)]))
font = wx.Font(10, wx.FONTFAMILY_SCRIPT,
wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL)
dc.SetFont(font)
dc.SetTextForeground(self.signatureLineColour)
dc.DrawText(self.signatureLineText, 20, 155)
def clear(self):
self.currentLine = []
self.signature = []
self.reInitBuffer = True
self._initBuffer()
dc = wx.BufferedDC(wx.ClientDC(self), self.buffer)
dc.Clear()
self.Refresh()
def cleanup(self, event):
#for future use
return True
@staticmethod
def drawLines(dc, *lines):
''' drawLines takes a device context (dc) and a list of lines
as arguments. Each line is a three-tuple: (colour, thickness,
linesegments). linesegments is a list of coordinates: (x1, y1,
x2, y2). '''
dc.BeginDrawing()
for colour, thickness, lineSegments in lines:
pen = wx.Pen(colour, thickness, wx.SOLID)
dc.SetPen(pen)
for lineSegment in lineSegments:
dc.DrawLine(*lineSegment)
dc.EndDrawing()
t_CONTROLS_CANCEL = wx.NewEventType()
CONTROLS_CANCEL = wx.PyEventBinder(t_CONTROLS_CANCEL, 1)
class SignaturePadControls(wx.Panel):
def __init__(self, parent=None):
super(SignaturePadControls, self).__init__(parent)
sizer = wx.BoxSizer(wx.VERTICAL)
bsizer = wx.BoxSizer(wx.HORIZONTAL)
self.CancelButton = wx.Button(self, wx.ID_CANCEL, size=(-1, 50))
self.ClearButton = wx.Button(self, wx.ID_CLEAR, size=(-1, 50))
self.AcceptButton = wx.Button(self, wx.ID_OK, size=(-1, 50))
bsizer.Add(self.ClearButton, 1, wx.EXPAND | wx.ALL, 5)
bsizer.AddStretchSpacer()
bsizer.Add(self.CancelButton, 1, wx.EXPAND | wx.ALL, 5)
bsizer.Add(self.AcceptButton, 1, wx.EXPAND | wx.ALL, 5)
self.sigpad = SignaturePad(self)
sizer.Add(bsizer, 0, wx.EXPAND)
sizer.Add(self.sigpad, 1, wx.EXPAND)
self.SetSizer(sizer)
self.CancelButton.Bind(wx.EVT_BUTTON, self.onCancel)
self.ClearButton.Bind(wx.EVT_BUTTON, self.onClear)
self.AcceptButton.Bind(wx.EVT_BUTTON, self.onAccept)
def onClear(self, event):
self.sigpad.clear()
def onCancel(self, event):
evt2 = wx.PyCommandEvent(t_CONTROLS_CANCEL, self.GetId())
self.GetEventHandler().ProcessEvent(evt2)
event.Skip()
pass
def onAccept(self, event):
if self.sigpad.signature == []:
wx.MessageBox('Signature cannot be blank!',
'Error', wx.OK | wx.ICON_ERROR)
else:
print self.sigpad.signature
encoded = encode(self.sigpad.signature)
print decode(encoded)
class TestFrame(wx.Frame):
def __init__(self, parent=None):
super(TestFrame, self).__init__(parent, title="Signature Pad",
size=(500,260),
style=wx.DEFAULT_FRAME_STYLE^ wx.RESIZE_BORDER)
signature = SignaturePadControls(self)
signature.Bind(CONTROLS_CANCEL, self.onCancel)
self.Centre()
def onCancel(self, event):
self.Close()
if __name__ == '__main__':
app = wx.App()
frame = TestFrame()
frame.Show()
app.MainLoop()
| gpl-3.0 | 134,584,395,785,613,250 | 35.974093 | 80 | 0.568316 | false |
rdkit/rdkit-orig | Contrib/mmpa/cansmirk.py | 2 | 3001 | # Copyright (c) 2012, GlaxoSmithKline Research & Development Ltd.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of GlaxoSmithKline Research & Development Ltd.
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Created by Jameed Hussain, September 2012
import sys
import re
from rdkit import Chem
from indexing import cansmirk
if __name__=='__main__':
if (len(sys.argv) >= 2):
print "Program that canonicalises an input SMIRKS so its in same format as MMP identification program.\n";
print "USAGE: ./cansmirks.py <file_of_smirks\n";
sys.exit(1)
#read the STDIN
for line in sys.stdin:
line = line.rstrip()
line_fields = re.split('\s|,',line)
smirks = line_fields[0]
if(len(line_fields) == 1):
id=""
else:
id=line_fields[1]
lhs,rhs = smirks.split(">>")
l = Chem.MolFromSmiles( lhs )
if(l == None):
sys.stderr.write("Can't generate mol for: %s\n" % (lhs) )
continue
r = Chem.MolFromSmiles( rhs )
if(r == None):
sys.stderr.write("Can't generate mol for: %s\n" % (rhs) )
continue
clhs = Chem.MolToSmiles( l, isomericSmiles=True )
crhs = Chem.MolToSmiles( r, isomericSmiles=True )
#just need to take care of [*H:1]
if(clhs == '[*H:1]'):
clhs = '[*:1][H]'
if(crhs == '[*H:1]'):
crhs = '[*:1][H]'
#print clhs
#print crhs
csmirk,context = cansmirk(clhs,crhs,"")
print "%s %s" % (csmirk,id)
| bsd-3-clause | -579,565,610,952,190,800 | 34.305882 | 107 | 0.656115 | false |
nmdp-bioinformatics/service-gfe-submission | client-python/setup.py | 1 | 2627 | # coding: utf-8
"""
Gene Feature Enumeration Service
The Gene Feature Enumeration (GFE) Submission service provides an API for converting raw sequence data to GFE. It provides both a RESTful API and a simple user interface for converting raw sequence data to GFE results. Sequences can be submitted one at a time or as a fasta file. This service uses <a href=\"https://github.com/nmdp-bioinformatics/service-feature\">nmdp-bioinformatics/service-feature</a> for encoding the raw sequence data and <a href=\"https://github.com/nmdp-bioinformatics/HSA\">nmdp-bioinformatics/HSA</a> for aligning the raw sequence data. The code is open source, and available on <a href=\"https://github.com/nmdp-bioinformatics/service-gfe-submission\">GitHub</a>.<br><br>Go to <a href=\"http://service-gfe-submission.readthedocs.io\">service-gfe-submission.readthedocs.io</a> for more information
OpenAPI spec version: 1.0.7
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import sys
from setuptools import setup, find_packages
NAME = "swagger_client"
VERSION = "1.0.0"
# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
REQUIRES = ["urllib3 >= 1.15", "six >= 1.10", "certifi", "python-dateutil"]
setup(
name=NAME,
version=VERSION,
description="Gene Feature Enumeration Service",
author_email="[email protected]",
url="",
keywords=["Swagger", "Gene Feature Enumeration Service"],
install_requires=REQUIRES,
packages=find_packages(),
include_package_data=True,
long_description="""\
The Gene Feature Enumeration (GFE) Submission service provides an API for converting raw sequence data to GFE. It provides both a RESTful API and a simple user interface for converting raw sequence data to GFE results. Sequences can be submitted one at a time or as a fasta file. This service uses <a href=\"https://github.com/nmdp-bioinformatics/service-feature\">nmdp-bioinformatics/service-feature</a> for encoding the raw sequence data and <a href=\"https://github.com/nmdp-bioinformatics/HSA\">nmdp-bioinformatics/HSA</a> for aligning the raw sequence data. The code is open source, and available on <a href=\"https://github.com/nmdp-bioinformatics/service-gfe-submission\">GitHub</a>.<br><br>Go to <a href=\"http://service-gfe-submission.readthedocs.io\">service-gfe-submission.readthedocs.io</a> for more information
"""
)
| gpl-3.0 | 1,899,160,545,371,149,600 | 63.073171 | 947 | 0.744956 | false |
superfluidity/RDCL3D | code/lib/nemo/nemo_parser.py | 1 | 1987 | import json
import pyaml
import yaml
from lib.util import Util
from lib.parser import Parser
import logging
import traceback
import glob
import os
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger('NemoParser')
class NemoParser(Parser):
"""Parser methods for nemo project type
"""
def __init__(self):
super(NemoParser, self).__init__()
@classmethod
def importprojectdir(cls,dir_project, file_type):
"""Imports all descriptor files under a given folder
this method is specific for Nemo project type
"""
project = {
'intent':{},
'nodemodel':{},
'positions': {}
}
for desc_type in project:
cur_type_path = os.path.join(dir_project, desc_type.upper())
log.debug(cur_type_path)
if os.path.isdir(cur_type_path):
for file in glob.glob(os.path.join(cur_type_path, '*.'+file_type)):
if file_type == 'nemo':
project[desc_type][os.path.basename(file).split('.')[0]] = Util.openfile(file).read()
for vertices_file in glob.glob(os.path.join(dir_project, '*.json')):
if os.path.basename(vertices_file) == 'vertices.json':
project['positions']['vertices'] = Util.loadjsonfile(vertices_file)
return project
@classmethod
def importprojectfiles(cls, file_dict):
"""Imports descriptors (extracted from the new project POST)
The keys in the dictionary are the file types
"""
project = {
'intent':{},
'nodemodel':{},
}
print "Importing project file"
for desc_type in project:
if desc_type in file_dict:
files_desc_type = file_dict[desc_type]
for file in files_desc_type:
project[desc_type][os.path.splitext(file.name)[0]] = file.read()
return project
| apache-2.0 | 8,360,074,849,449,196,000 | 26.985915 | 109 | 0.573729 | false |
rizaon/limbo | limbo/plugins/stock.py | 1 | 1262 | """$<ticker symbol> for a quote on a stock price"""
from __future__ import print_function
import logging
import re
try:
from urllib import quote
except ImportError:
from urllib.request import quote
from bs4 import BeautifulSoup
import requests
logger = logging.getLogger(__name__)
def stockprice(ticker):
url = "https://www.google.com/finance?q={0}"
soup = BeautifulSoup(requests.get(url.format(quote(ticker))).text)
try:
company, ticker = re.findall(u"^(.+?)\xa0\xa0(.+?)\xa0", soup.text, re.M)[0]
except IndexError:
logging.info("Unable to find stock {0}".format(ticker))
return ""
price = soup.select("#price-panel .pr span")[0].text
change, pct = soup.select("#price-panel .nwp span")[0].text.split()
pct.strip('()')
emoji = ":chart_with_upwards_trend:" if change.startswith("+") else ":chart_with_downwards_trend:"
return "{0} {1} {2}: {3} {4} {5} {6}".format(emoji, company, ticker, price, change, pct, emoji)
def on_message(msg, server):
text = msg.get("text", "")
matches = re.findall(r"^\$[a-zA-Z]\w{0,3}", text)
if not matches:
return
prices = [stockprice(ticker[1:].encode("utf8")) for ticker in matches]
return "\n".join(p for p in prices if p)
| mit | 8,730,622,750,421,006,000 | 30.55 | 102 | 0.637876 | false |
datastreaming/mflow_nodes | mflow_nodes/test_tools/m_generate_test_stream.py | 1 | 2509 | from argparse import ArgumentParser
import numpy as np
from types import SimpleNamespace
from mflow_nodes.stream_tools.mflow_forwarder import MFlowForwarder
def generate_frame_data(frame_shape, frame_number):
"""
Generate a frame that is filled with the frame number value.
:param frame_shape: Shape of the frame to generate.
:param frame_number: Number to fill the frame with.
"""
return np.full(shape=frame_shape, fill_value=frame_number, dtype=np.int32)
def generate_test_array_stream(binding_address="tcp://127.0.0.1:40000", frame_shape=(4, 4), number_of_frames=16):
"""
Generate an array-1.0 stream of shape [4,4] and the specified number of frames.
The values for each cell in the frame corresponds to the frame number.
:param frame_shape: Shape (number of cells) of the frames to send.
:param number_of_frames: Number of frames to send.
:param binding_address: Address to bind the stream to.
"""
print("Preparing to send %d frames of shape %s." % (number_of_frames, str(frame_shape)))
mflow_forwarder = MFlowForwarder()
mflow_forwarder.start(binding_address)
# Test stream is of array type.
header = {"htype": "array-1.0",
"type": "int32",
"shape": list(frame_shape)}
# Send 16 4x4 frames. The values of each array cell is equal to the frame number.
for frame_number in range(number_of_frames):
header["frame"] = frame_number
data = generate_frame_data(frame_shape, frame_number)
print("Sending frame %d" % frame_number)
message = SimpleNamespace()
message.data = {"header": header, "data": [data]}
mflow_forwarder.forward(message)
mflow_forwarder.stop()
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("binding_address", type=str, help="Binding address for mflow connection.\n"
"Example: tcp://127.0.0.1:40001")
parser.add_argument("--n_frames", type=int, default=16, help="Number of frames to generate.")
parser.add_argument("--frame_size", type=int, default=4, help="Number of values X and Y direction, per frame.")
input_args = parser.parse_args()
try:
generate_test_array_stream(input_args.binding_address, number_of_frames=input_args.n_frames,
frame_shape=(input_args.frame_size, input_args.frame_size))
except KeyboardInterrupt:
print("Terminated by user.")
| gpl-3.0 | 47,031,052,100,983,640 | 39.467742 | 115 | 0.656038 | false |
SublimeGit/SublimeGit | sgit/checkout.py | 1 | 9251 | # coding: utf-8
from functools import partial
import sublime
from sublime_plugin import WindowCommand, TextCommand
from .util import noop
from .cmd import GitCmd
from .helpers import GitStatusHelper, GitBranchHelper, GitErrorHelper, GitLogHelper, GitRemoteHelper
from .helpers import GitTagHelper
GIT_BRANCH_EXISTS_MSG = "The branch %s already exists. Do you want to overwrite it?"
NO_REMOTES = u"No remotes have been configured. Remotes can be added with the Git: Add Remote command. Do you want to add a remote now?"
class GitCheckoutWindowCmd(GitCmd, GitBranchHelper, GitLogHelper, GitErrorHelper):
pass
class GitCheckoutBranchCommand(WindowCommand, GitCheckoutWindowCmd):
"""
Check out an existing branch.
This command allows you to select a branch from the quick bar
to check out. The currently active branch (if any) is marked with an
asterisk (*) to the left of its name.
"""
def run(self):
repo = self.get_repo()
if not repo:
return
branches = self.get_branches(repo)
choices = []
for current, name in branches:
choices.append('%s %s' % ('*' if current else ' ', name))
self.window.show_quick_panel(choices, partial(self.on_done, repo, branches), sublime.MONOSPACE_FONT)
def on_done(self, repo, branches, idx):
if idx == -1:
return
current, branch = branches[idx]
if current:
return
exit, stdout, stderr = self.git(['checkout', branch], cwd=repo)
if exit == 0:
panel = self.window.get_output_panel('git-checkout')
panel.run_command('git_panel_write', {'content': stderr})
self.window.run_command('show_panel', {'panel': 'output.git-checkout'})
else:
sublime.error_message(self.format_error_message(stderr))
self.window.run_command('git_status', {'refresh_only': True})
class GitCheckoutTagCommand(WindowCommand, GitCheckoutWindowCmd, GitTagHelper):
"""
Check out a specific tag.
This command allows you to check out a specific tag. A list of
available tags will be presented in the quick bar.
After checkout, you will be in a detached head state.
"""
def run(self, repo=None, tag=None):
repo = repo or self.get_repo()
if not repo:
return
if tag:
self.on_tag(repo, tag)
else:
tags = self.get_tags(repo)
if not tags:
sublime.error_message("This repo does not contain any tags. Run Git: Add Tag to add one.")
return
choices = self.format_quick_tags(tags)
def on_done(idx):
if idx != -1:
tag = choices[idx][0]
self.on_tag(repo, tag)
self.window.show_quick_panel(choices, on_done)
def on_tag(self, repo, tag):
exit, stdout, stderr = self.git(['checkout', 'tags/%s' % tag], cwd=repo)
if exit == 0:
panel = self.window.get_output_panel('git-checkout')
panel.run_command('git_panel_write', {'content': stderr})
self.window.run_command('show_panel', {'panel': 'output.git-checkout'})
else:
sublime.error_message(self.format_error_message(stderr))
self.window.run_command('git_status', {'refresh_only': True})
class GitCheckoutCommitCommand(WindowCommand, GitCheckoutWindowCmd):
"""
Check out a specific commit.
This command allows you to check out a specific commit. The list
of commits will be presented in the quick bar, containing the first
line of the commit message, the abbreviated sha1, as well as a relative
and absolute date in the local timezone.
After checkout, you will be in a detached head state.
"""
def run(self):
repo = self.get_repo()
if not repo:
return
log = self.get_quick_log(repo)
hashes, choices = self.format_quick_log(log)
self.window.show_quick_panel(choices, partial(self.on_done, repo, hashes))
def on_done(self, repo, hashes, idx):
if idx == -1:
return
commit = hashes[idx]
exit, stdout, stderr = self.git(['checkout', commit], cwd=repo)
if exit == 0:
panel = self.window.get_output_panel('git-checkout')
panel.run_command('git_panel_write', {'content': stderr})
self.window.run_command('show_panel', {'panel': 'output.git-checkout'})
else:
sublime.error_message(self.format_error_message(stderr))
self.window.run_command('git_status', {'refresh_only': True})
class GitCheckoutNewBranchCommand(WindowCommand, GitCheckoutWindowCmd):
"""
Create a new branch from the current HEAD and switch to it.
This command will show an input panel allowing you to name your new
branch. After giving the branch a name, pressing enter will create
the new branch and check it out. Pressing esc will cancel.
If a branch with the given name already exists, you will be asked if
you want to overwrite the branch. Selecting cancel will exit silently,
without making any changes.
"""
def run(self):
repo = self.get_repo()
if not repo:
return
self.window.show_input_panel("Branch:", "", partial(self.on_done, repo), noop, noop)
def on_done(self, repo, branch):
branch = branch.strip()
if not branch:
return
b = '-b'
branches = [n for _, n in self.get_branches(repo)]
if branch in branches:
if sublime.ok_cancel_dialog(GIT_BRANCH_EXISTS_MSG % branch, 'Overwrite'):
b = '-B'
else:
return
exit, stdout, stderr = self.git(['checkout', b, branch], cwd=repo)
if exit == 0:
panel = self.window.get_output_panel('git-checkout')
panel.run_command('git_panel_write', {'content': stderr})
self.window.run_command('show_panel', {'panel': 'output.git-checkout'})
else:
sublime.error_message(self.format_error_message(stderr))
self.window.run_command('git_status', {'refresh_only': True})
class GitCheckoutRemoteBranchCommand(WindowCommand, GitCheckoutWindowCmd, GitRemoteHelper):
"""Checkout a remote branch."""
def run(self, repo=None):
repo = self.get_repo()
if not repo:
return
remotes = self.get_remotes(repo)
if not remotes:
if sublime.ok_cancel_dialog(NO_REMOTES, 'Add Remote'):
self.window.run_command('git_remote_add')
return
choices = self.format_quick_remotes(remotes)
self.window.show_quick_panel(choices, partial(self.remote_panel_done, repo, choices))
def remote_panel_done(self, repo, choices, idx):
if idx != -1:
remote = choices[idx][0]
remote_branches = self.get_remote_branches(repo, remote)
if not remote_branches:
return sublime.error_message("No branches on remote %s" % remote)
formatted_remote_branches = self.format_quick_branches(remote_branches)
local_branches = [b for _, b in self.get_branches(repo)]
remote_only_branches = [b for b in formatted_remote_branches if b[0] not in frozenset(local_branches)]
if not remote_only_branches:
return sublime.error_message("All remote branches are already present locally")
def on_remote():
self.window.show_quick_panel(remote_only_branches, partial(self.remote_branch_panel_done, repo, remote_only_branches))
sublime.set_timeout(on_remote, 50)
def remote_branch_panel_done(self, repo, branches, idx):
if idx != -1:
branch = branches[idx][0]
exit, stdout, stderr = self.git(['checkout', branch], cwd=repo)
if exit == 0:
panel = self.window.get_output_panel('git-checkout')
panel.run_command('git_panel_write', {'content': stderr})
self.window.run_command('show_panel', {'panel': 'output.git-checkout'})
else:
sublime.error_message(self.format_error_message(stderr))
self.window.run_command('git_status', {'refresh_only': True})
class GitCheckoutCurrentFileCommand(TextCommand, GitCmd, GitStatusHelper):
"""
Documentation coming soon.
"""
def run(self, edit):
filename = self.view.file_name()
if not filename:
sublime.error_message("Cannot checkout an unsaved file.")
return
repo = self.get_repo()
if not repo:
return
if not self.file_in_git(repo, filename):
sublime.error_message("The file %s is not tracked by git.")
return
exit, stdout, stderr = self.git(['checkout', '--quiet', '--', filename], cwd=repo)
if exit == 0:
sublime.status_message('Checked out %s' % filename)
view = self.view
sublime.set_timeout(partial(view.run_command, 'revert'), 50)
else:
sublime.error_message('git error: %s' % stderr)
| mit | 7,732,864,790,283,200,000 | 34.856589 | 136 | 0.611177 | false |
textclf/data-handler | nlpdatahandlers/base_handler.py | 1 | 6414 | import numpy as np
import util.misc
class DataHandlerException(Exception):
pass
class BaseDataHandler(object):
DATA_ALL = 1
DATA_TRAIN = 2
DATA_VALIDATION = 4
DATA_TEST = 3
def __init__(self, source):
self.source = source
def get_data(self, type=DATA_ALL):
"""
Process the data from its source and returns two lists: texts and labels, ready for a classifier to be used
"""
raise NotImplementedError()
@staticmethod
def shuffle_data(train_values, labels):
combined_lists = zip(train_values, labels)
np.random.shuffle(combined_lists)
return zip(*combined_lists)
@staticmethod
def to_word_level_vectors(texts_list, wv_container, words_per_text=None):
"""
Receives a list of texts. For each text, it converts the text into a list of word vectors
given by a vector container (Glove, WordToVec) for direct use as input
If words_per_text is specified, each text representation can have as many
as words_per_text words. Hence texts will be cut or zero-padded.
"""
from util.language import tokenize_text
tokenized_texts = util.misc.parallel_run(tokenize_text, texts_list)
text_wvs_indices = [wv_container.get_indices(text) for text in tokenized_texts]
del tokenized_texts
text_wvs = [wv_container[text_indices] for text_indices in text_wvs_indices]
del text_wvs_indices
if words_per_text is not None:
text_wvs = BaseDataHandler.__pad_sequence_word_vectors(text_wvs, words_per_text)
return text_wvs
@staticmethod
def __pad_sequence_word_vectors(text_wvs, maxlen=None):
"""
Given a list of lists of word vectors (this is, wvs for texts), it zero-pads
or reduces the number of words up to maxlen if specified. Otherwise, it pads
everything up to the maximum text size
"""
lengths = [len(s) for s in text_wvs]
nb_samples = len(text_wvs)
if maxlen is None:
maxlen = np.max(lengths)
wv_dim = text_wvs[0].shape[1]
x = np.zeros((nb_samples, maxlen, wv_dim)).astype('float32')
for idx, s in enumerate(text_wvs):
x[idx, :lengths[idx]] = s[:maxlen]
return x
@staticmethod
def to_char_level_idx(texts_list, char_container, chars_per_word=None, words_per_document=None, prepend=False):
"""
Receives a list of texts. For each text, it converts the text into a list of indices of a characters
for later use in the embedding of a neural network.
Texts are padded (or reduced) up to chars_per_word
char_container is assumed to be a method that converts characters to indices using a method
called get_indices()
"""
from util.language import tokenize_text
texts_list = util.misc.parallel_run(tokenize_text, texts_list)
if words_per_document is not None:
text_with_indices = [BaseDataHandler.__normalize(char_container.get_indices(txt), chars_per_word, prepend) for txt in texts_list]
text_with_indices = BaseDataHandler.__normalize(text_with_indices, size=words_per_document, filler=[0] * chars_per_word)
else:
text_with_indices = char_container.get_indices(texts_list)
return text_with_indices
@staticmethod
def to_word_level_idx(texts_list, wv_container, words_per_document=None, prepend=False):
"""
Receives a list of texts. For each text, it converts the text into indices of a word
vector container (Glove, WordToVec) for later use in the embedding of a neural network.
Texts are padded (or reduced) up to words_per_document
"""
from util.language import tokenize_text
texts_list = util.misc.parallel_run(tokenize_text, texts_list)
if words_per_document is not None:
text_with_indices = BaseDataHandler.__normalize(wv_container.get_indices(texts_list), words_per_document, prepend)
else:
text_with_indices = wv_container.get_indices(texts_list)
return text_with_indices
@staticmethod
def to_sentence_level_idx(texts_list, sentences_per_paragraph, words_per_sentence, wv_container, prepend=False):
"""
Receives a list of texts. For each text, it converts the text into sentences and converts the words into
indices of a word vector container (Glove, WordToVec) for later use in the embedding of a neural network.
Sentences are padded (or reduced) up to words_per_sentence elements.
Texts ("paragraphs") are padded (or reduced) up to sentences_per_paragraph
If prepend = True, padding is added at the beginning
Ex: [[This might be cumbersome. Hopefully not.], [Another text]]
to
[ [[5, 24, 3, 223], [123, 25, 0, 0]]. [[34, 25, 0, 0], [0, 0, 0, 0] ]
using sentences_per_paragraph = 4, words_per_sentence = 4
"""
from util.language import parse_paragraph
texts_list = util.misc.parallel_run(parse_paragraph, texts_list)
text_with_normalized_sentences = [BaseDataHandler.__normalize(review, size=words_per_sentence, prepend=prepend)
for review in wv_container.get_indices(texts_list)]
text_padded_paragraphs = BaseDataHandler.__normalize(text_with_normalized_sentences,
size=sentences_per_paragraph, filler=[0] * words_per_sentence)
return text_padded_paragraphs
@staticmethod
def __normalize(sq, size=30, filler=0, prepend=False):
"""
Take a list of lists and ensure that they are all of length `sz`
Args:
-----
e: a non-generator iterable of lists
sz: integer, the size that each sublist should be normalized to
filler: obj -- what should be added to fill out the size?
prepend: should `filler` be added to the front or the back of the list?
"""
if not prepend:
def _normalize(e, sz):
return e[:sz] if len(e) >= sz else e + [filler] * (sz - len(e))
else:
def _normalize(e, sz):
return e[-sz:] if len(e) >= sz else [filler] * (sz - len(e)) + e
return [_normalize(e, size) for e in sq]
| mit | 4,222,095,093,282,551,300 | 42.04698 | 141 | 0.631743 | false |
yt-project/unyt | unyt/tests/test_unit_systems.py | 1 | 4322 | """
Test unit systems.
"""
# -----------------------------------------------------------------------------
# Copyright (c) 2018, yt Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the LICENSE file, distributed with this software.
# -----------------------------------------------------------------------------
import pytest
import unyt.unit_symbols as us
from unyt.exceptions import IllDefinedUnitSystem, MissingMKSCurrent
from unyt.unit_object import Unit
from unyt.unit_systems import (
UnitSystem,
cgs_unit_system,
mks_unit_system,
unit_system_registry,
)
from unyt.unit_registry import UnitRegistry
from unyt import dimensions
def test_unit_systems():
goofy_unit_system = UnitSystem(
"goofy",
"ly",
"lbm",
"hr",
temperature_unit="R",
angle_unit="arcsec",
current_mks_unit="mA",
luminous_intensity_unit="cd",
)
assert goofy_unit_system["temperature"] == Unit("R")
assert goofy_unit_system[dimensions.solid_angle] == Unit("arcsec**2")
assert goofy_unit_system["energy"] == Unit("lbm*ly**2/hr**2")
goofy_unit_system["energy"] = "eV"
assert goofy_unit_system["energy"] == Unit("eV")
assert goofy_unit_system["magnetic_field_mks"] == Unit("lbm/(hr**2*mA)")
assert "goofy" in unit_system_registry
def test_unit_system_id():
reg1 = UnitRegistry()
reg2 = UnitRegistry()
assert reg1.unit_system_id == reg2.unit_system_id
reg1.modify("g", 2.0)
assert reg1.unit_system_id != reg2.unit_system_id
reg1 = UnitRegistry()
reg1.add("dinosaurs", 12.0, dimensions.length)
assert reg1.unit_system_id != reg2.unit_system_id
reg1 = UnitRegistry()
reg1.remove("g")
assert reg1.unit_system_id != reg2.unit_system_id
reg1.add("g", 1.0e-3, dimensions.mass, prefixable=True)
assert reg1.unit_system_id == reg2.unit_system_id
def test_bad_unit_system():
with pytest.raises(IllDefinedUnitSystem):
UnitSystem("atomic", "nm", "fs", "nK", "rad")
with pytest.raises(IllDefinedUnitSystem):
UnitSystem("atomic", "nm", "fs", "nK", "rad", registry=UnitRegistry())
with pytest.raises(IllDefinedUnitSystem):
UnitSystem("atomic", us.nm, us.fs, us.nK, us.rad)
with pytest.raises(IllDefinedUnitSystem):
UnitSystem("atomic", us.nm, us.fs, us.nK, us.rad, registry=UnitRegistry())
def test_code_unit_system():
ureg = UnitRegistry()
ureg.add("code_length", 2.0, dimensions.length)
ureg.add("code_mass", 3.0, dimensions.mass)
ureg.add("code_time", 4.0, dimensions.time)
ureg.add("code_temperature", 5.0, dimensions.temperature)
code_unit_system = UnitSystem(
"my_unit_system",
"code_length",
"code_mass",
"code_time",
"code_temperature",
registry=ureg,
)
assert code_unit_system["length"] == Unit("code_length", registry=ureg)
assert code_unit_system["length"].base_value == 2
assert code_unit_system["mass"] == Unit("code_mass", registry=ureg)
assert code_unit_system["mass"].base_value == 3
assert code_unit_system["time"] == Unit("code_time", registry=ureg)
assert code_unit_system["time"].base_value == 4
assert code_unit_system["temperature"] == Unit("code_temperature", registry=ureg)
assert code_unit_system["temperature"].base_value == 5
def test_mks_current():
with pytest.raises(MissingMKSCurrent):
cgs_unit_system[dimensions.current_mks]
with pytest.raises(MissingMKSCurrent):
cgs_unit_system[dimensions.magnetic_field]
with pytest.raises(MissingMKSCurrent):
cgs_unit_system[dimensions.current_mks] = "foo"
with pytest.raises(MissingMKSCurrent):
cgs_unit_system[dimensions.magnetic_field] = "bar"
assert cgs_unit_system.has_current_mks is False
assert mks_unit_system.has_current_mks is True
def test_create_unit_system_from_unit_objects():
s = UnitSystem("test_units", us.Mpc, us.Msun, us.s)
assert s["length"] == us.Mpc
assert s["mass"] == us.Msun
assert s["time"] == us.s
def test_create_unit_system_from_quantity():
s = UnitSystem("test_units", us.Mpc, 3 * us.Msun, us.s)
assert s["length"] == us.Mpc
assert s["mass"] == Unit("3*Msun")
assert s["time"] == us.s
| bsd-3-clause | 683,659,337,923,796,000 | 33.576 | 85 | 0.633272 | false |
arunkgupta/gramps | gramps/gen/filters/rules/person/_isdescendantfamilyof.py | 1 | 3903 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2006 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....ggettext import gettext as _
try:
set()
except NameError:
from sets import Set as set
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from .. import Rule
#-------------------------------------------------------------------------
#
# IsDescendantFamilyOf
#
#-------------------------------------------------------------------------
class IsDescendantFamilyOf(Rule):
"""Rule that checks for a person that is a descendant or the spouse
of a descendant of a specified person"""
labels = [ _('ID:'), _('Inclusive:') ]
name = _('Descendant family members of <person>')
category = _('Descendant filters')
description = _("Matches people that are descendants or the spouse "
"of a descendant of a specified person")
def prepare(self,db):
self.db = db
self.matches = set()
self.root_person = db.get_person_from_gramps_id(self.list[0])
self.add_matches(self.root_person)
try:
if int(self.list[1]):
inclusive = True
else:
inclusive = False
except IndexError:
inclusive = True
if not inclusive:
self.exclude()
def reset(self):
self.matches = set()
def apply(self,db,person):
return person.handle in self.matches
def add_matches(self,person):
if not person:
return
# Add self
self.matches.add(person.handle)
for family_handle in person.get_family_handle_list():
family = self.db.get_family_from_handle(family_handle)
if family:
# Add every child recursively
for child_ref in family.get_child_ref_list():
if child_ref:
self.add_matches(self.db.get_person_from_handle(child_ref.ref))
# Add spouse
if person.handle == family.get_father_handle():
spouse_handle = family.get_mother_handle()
else:
spouse_handle = family.get_father_handle()
self.matches.add(spouse_handle)
def exclude(self):
# This removes root person and his/her spouses from the matches set
if not self.root_person: return
self.matches.remove(self.root_person.handle)
for family_handle in self.root_person.get_family_handle_list():
family = self.db.get_family_from_handle(family_handle)
if family:
if self.root_person.handle == family.get_father_handle():
spouse_handle = family.get_mother_handle()
else:
spouse_handle = family.get_father_handle()
self.matches.remove(spouse_handle)
| gpl-2.0 | -1,071,615,286,370,999,000 | 34.481818 | 87 | 0.540354 | false |
jongyookim/IQA_BIECON_release | IQA_BIECON_release/data_load/data_loader_IQA.py | 1 | 39562 | from __future__ import absolute_import, division, print_function
import os
import sys
import timeit
import numpy as np
from scipy import misc
from scipy.ndimage.filters import convolve
from .dataset import Dataset
from ..ssim import ssim
FR_MET_BASEPATH = 'FR_met/'
FR_MET_SUBPATH_LIVE = 'LIVE/LIVE IQA DB/'
FR_MET_SUBPATH_TID2008 = 'TID2008/'
FR_MET_SUBPATH_TID2013 = 'TID2013/'
class DataLoader(object):
"""
Arguments
db_config: database configuration dictionary
"""
def __init__(self, db_config):
print('DataLoader')
self.patch_size = None
self.patch_step = None
self.base_path = None
self.n_images = 0
self.n_patches = 0
self.npat_img_list = []
self.d_pat_set = []
self.r_pat_set = []
self.loc_met_set = []
self.score_list = None
self.db_config = db_config
if db_config is not None:
self.set_config(db_config)
def set_config(self, db_config):
# Database
self.sel_data = db_config.get('sel_data', None)
self.scenes = db_config.get('scenes', 'all')
self.dist_types = db_config.get('dist_types', 'all')
self.select_database(self.sel_data, self.scenes, self.dist_types)
# Initialize patch size
self.patch_size = self.init_patch_size(
db_config.get('patch_size', None))
# Random crops
self.random_crops = int(db_config.get('random_crops', 0))
# If even patch division
self.patch_step = self.init_patch_step(
db_config.get('patch_step', None))
self.patch_mode = db_config.get('patch_mode', None)
assert self.patch_mode in ['both_side', 'shift_center', None]
# Pre-processing
self.color = db_config.get('color', 'gray')
assert self.color in ['gray', 'rgb', 'ycbcr']
self.num_ch = 1 if self.color == 'gray' else 3
self.local_norm = db_config.get('local_norm', False)
# etc.
self.horz_ref = db_config.get('horz_ref', False)
self.std_filt_r = db_config.get('std_filt_r', 1.0)
self.train_size = db_config.get('train_size', 0.8)
assert self.train_size > 0 and self.train_size < 1, \
'train_size(%.2f) is not within 0 and 1' % (self.train_size)
self.shuffle = db_config.get('shuffle', True)
self.reverse_mos = db_config.get('reverse_mos', False)
# Local metric score
self.fr_met = db_config.get('fr_met', None)
# self.fr_met_path = db_config.get('fr_met_path', None)
self.fr_met_path = FR_MET_BASEPATH
self.fr_met_scale = float(db_config.get('fr_met_scale', 1.0))
assert self.fr_met_scale > 0
if self.fr_met:
assert self.fr_met_path is not None
assert self.fr_met_scale is not None
self.select_fr_met(self.fr_met)
self.fr_met_avg = db_config.get('fr_met_avg', False)
def init_patch_size(self, patch_size):
# initialize patch size and step
if patch_size:
if isinstance(patch_size, int):
patch_size_new = (patch_size, patch_size)
elif isinstance(patch_size, (list, tuple)):
assert len(patch_size) == 2
patch_size_new = tuple(patch_size)
else:
raise ValueError('Wrong patch_size: {0}'.format(patch_size))
self.use_original_size = False
else:
patch_size_new = None
self.use_original_size = True
return patch_size_new
def init_patch_step(self, patch_step):
# initialize patch step
if not self.use_original_size:
if isinstance(patch_step, int):
patch_step_new = (patch_step, patch_step)
elif isinstance(patch_step, (list, tuple)):
assert len(patch_step) == 2
patch_step_new = tuple(patch_step)
elif patch_step is None:
assert self.patch_size is not None
patch_step_new = self.patch_size
else:
raise ValueError('Wrong patch_step:', patch_step)
else:
patch_step_new = (1, 1)
return patch_step_new
def select_database(self, sel_data, scenes, dist_types):
"""
Select database to be loaded, and check scenes and dist_types.
"""
if sel_data == 'LIVE':
from . import LIVE
self.DB_module = LIVE
elif sel_data == 'TID2008':
from . import TID2008
self.DB_module = TID2008
elif sel_data == 'TID2013':
from . import TID2013
self.DB_module = TID2013
else:
raise ValueError('Improper sel_data: {0}'.format(sel_data))
self.make_image_list_func = self.DB_module.make_image_list
if scenes == 'all' or scenes is None:
scenes = self.DB_module.ALL_SCENES
if dist_types == 'all' or dist_types is None:
dist_types = self.DB_module.ALL_DIST_TYPES
self.scenes = scenes
self.dist_types = dist_types
self.sel_data = sel_data
return scenes, dist_types
def select_fr_met(self, fr_met):
"""
Select FR-IQA metric for local metric score
"""
self.fr_met = fr_met
if fr_met is not None:
self.fr_met_ext = '.bin'
met_switcher = {
'SSIM': '.ssim',
'GMS': '.gms',
'FSIM': '.fsim',
'FSIMc': '.fsimc',
'VSI': '.vsi',
'SSIM_now': '.ssim',
}
self.fr_met_suffix = met_switcher.get(fr_met, None)
if self.fr_met_suffix is None:
raise ValueError('Select the proper name for fr_met: '
'SSIM / GMS / FSIM / FSIMc / VSI')
if self.sel_data == 'LIVE':
self.fr_met_subpath = FR_MET_SUBPATH_LIVE
elif self.sel_data == 'TID2008':
self.fr_met_subpath = FR_MET_SUBPATH_TID2008
elif self.sel_data == 'TID2013':
self.fr_met_subpath = FR_MET_SUBPATH_TID2013
else:
raise ValueError('Improper sel_data: {0}'.format(self.sel_data))
###########################################################################
def get_setting_dic(self):
config_dict = {
'sel_data': self.sel_data,
'dist_types': self.dist_types,
'patch_size': self.patch_size,
'patch_step': self.patch_step,
'random_crops': self.random_crops,
'std_filt_r': self.std_filt_r,
'horz_ref': self.horz_ref,
'color': self.color,
'local_norm': self.local_norm,
'fr_met': self.fr_met,
'shuffle': self.shuffle,
'train_size': self.train_size,
'reverse_mos': self.reverse_mos,
'n_images': self.n_images,
'n_patches': self.n_patches,
}
return config_dict
###########################################################################
def show_info(self):
if self.patch_size is not None:
print(' - Patch Size: (%d, %d)' % (
self.patch_size[0], self.patch_size[1]))
if self.random_crops > 0:
print(' - Number of random crops: %d' % self.random_crops)
else:
print(' - Patch Step = (%d, %d)' % (
self.patch_step[0], self.patch_step[1]), end='')
print(' / mode: %s' % self.patch_mode)
print(' - Color: %s' % self.color, end='')
if self.local_norm:
print(' (Local norm.)')
else:
print('')
if self.fr_met:
print(' - FR-IQA metric: %s' % self.fr_met, end='')
print(' (scale: 1/%.2f' % (1. / self.fr_met_scale), end='')
if self.fr_met_avg:
print(', averaged)')
else:
print(')')
if self.std_filt_r < 1.0 and self.random_crops == 0:
print(' - Patch sel. ratio (STD) =', self.std_filt_r)
print(' - Augmentation :', end='')
if self.horz_ref:
print(' Horz. flip')
else:
print(' None')
print(' - Reverse subj. score =', self.reverse_mos)
###########################################################################
# Data loader interface
def load_data_tr_te(self, tr_te_file=None, dataset_obj=False,
imagewise=True):
"""
Load IQA database and divide into training and testing sets.
"""
print(' (Load training/testing data (shared ref.))')
print(' - DB = %s' % (self.sel_data))
train_scenes, test_scenes = self.divide_tr_te_wrt_ref(
self.scenes, self.train_size, tr_te_file)
self.show_info()
# Get train set
print('\n (Load training data)')
data_dict = self.make_image_list_func(train_scenes, self.dist_types)
self.load_ref_dis_images(data_dict)
if self.horz_ref:
self.data_augmentation_horz_refl()
train_dataset = Dataset()
train_dataset.put_data(
self.d_pat_set, self.r_pat_set, self.dis2ref_idx,
loc_data=self.loc_met_set,
score_data=self.score_list,
npat_img_list=self.npat_img_list,
filt_idx_list=self.filt_idx_list,
imagewise=imagewise, shuffle=self.shuffle)
train_dataset.set_patch_config(
self.patch_step, self.random_crops)
# Get test set
print('\n (Load testing data)')
data_dict = self.make_image_list_func(test_scenes, self.dist_types)
self.load_ref_dis_images(data_dict)
test_dataset = Dataset()
test_dataset.put_data(
self.d_pat_set, self.r_pat_set, self.dis2ref_idx,
loc_data=self.loc_met_set,
score_data=self.score_list,
npat_img_list=self.npat_img_list,
filt_idx_list=self.filt_idx_list,
imagewise=imagewise, shuffle=self.shuffle)
test_dataset.set_patch_config(
self.patch_step, self.random_crops)
return train_dataset, test_dataset
def load_data_rand(self, number, dataset_obj=False, imagewise=True):
"""
Load IQA database (random images) and
divide into training and testing sets.
"""
print(' (Load training/testing data)', end='')
print(' %d random images (shared ref.)' % number)
print(' - DB = %s' % (self.sel_data))
scenes = self.DB_module.ALL_SCENES
n_train_refs = int(np.ceil(number * self.train_size))
n_test_refs = number - n_train_refs
rand_seq = np.random.permutation(number)
scenes_sh = [scenes[elem] for elem in rand_seq]
train_scenes = sorted(scenes_sh[:n_train_refs])
test_scenes = sorted(scenes_sh[n_train_refs:])
print((' - Refs.: training = %d / testing = %d (Ratio = %.2f)' %
(n_train_refs, n_test_refs, self.train_size)))
self.show_info()
# Get train set
data_dict = self.make_image_list_func(train_scenes, self.dist_types)
self.load_ref_dis_images(data_dict)
if self.horz_ref:
self.data_augmentation_horz_refl()
train_dataset = Dataset()
train_dataset.put_data(
self.d_pat_set, self.r_pat_set, self.dis2ref_idx,
loc_data=self.loc_met_set,
score_data=self.score_list,
npat_img_list=self.npat_img_list,
filt_idx_list=self.filt_idx_list,
imagewise=imagewise, shuffle=self.shuffle)
train_dataset.set_patch_config(
self.patch_step, self.random_crops)
# Get test set
data_dict = self.make_image_list_func(test_scenes, self.dist_types)
self.load_ref_dis_images(data_dict)
test_dataset = Dataset()
test_dataset.put_data(
self.d_pat_set, self.r_pat_set, self.dis2ref_idx,
loc_data=self.loc_met_set,
score_data=self.score_list,
npat_img_list=self.npat_img_list,
filt_idx_list=self.filt_idx_list,
imagewise=imagewise, shuffle=self.shuffle)
test_dataset.set_patch_config(
self.patch_step, self.random_crops)
return train_dataset, test_dataset
def load_data_for_test(self, tr_te_file, dataset_obj=False):
"""
Load data with MOS and image data.
There are no overlapping reference images between
training and testing sets.
"""
print(' - (Load testing data)')
train_scenes, test_scenes = self.divide_tr_te_wrt_ref(
self.scenes, self.train_size, tr_te_file)
self.show_info()
# Get test set
data_dict = self.make_image_list_func(test_scenes, self.dist_types)
self.load_ref_dis_images(data_dict)
test_dataset = Dataset()
test_dataset.put_data(
self.d_pat_set, self.r_pat_set, self.dis2ref_idx,
loc_data=self.loc_met_set,
score_data=self.score_list,
npat_img_list=self.npat_img_list,
filt_idx_list=self.filt_idx_list,
imagewise=True, shuffle=False)
test_dataset.set_patch_config(
self.patch_step, self.random_crops)
return test_dataset
def load_toy_data_tr_te(self, n_images=10, imagewise=True):
""" Load toy IQA database and divide into training and testing sets.
"""
print(' - (Load toy data: train/test)')
# Get train set
self.make_toy_examples(n_images=n_images)
train_dataset = Dataset()
train_dataset.put_data(
self.d_pat_set, self.r_pat_set, self.dis2ref_idx,
loc_data=self.loc_met_set,
score_data=self.score_list,
npat_img_list=self.npat_img_list,
filt_idx_list=self.filt_idx_list,
imagewise=imagewise, shuffle=self.shuffle)
train_dataset.set_patch_config(
self.patch_step, self.random_crops)
# Get test set
self.make_toy_examples(n_images=n_images)
test_dataset = Dataset()
test_dataset.put_data(
self.d_pat_set, self.r_pat_set, self.dis2ref_idx,
loc_data=self.loc_met_set,
score_data=self.score_list,
npat_img_list=self.npat_img_list,
filt_idx_list=self.filt_idx_list,
imagewise=imagewise, shuffle=self.shuffle)
test_dataset.set_patch_config(
self.patch_step, self.random_crops)
self.show_info()
return train_dataset, test_dataset
def divide_tr_te_wrt_ref(self, scenes, train_size=0.8, tr_te_file=None):
"""
Divdie data with respect to scene
"""
tr_te_file_loaded = False
if tr_te_file is not None and os.path.isfile(tr_te_file):
# Load tr_te_file and divide scenes accordingly
tr_te_file_loaded = True
with open(tr_te_file, 'r') as f:
train_scenes = f.readline().strip().split()
train_scenes = [int(elem) for elem in train_scenes]
test_scenes = f.readline().strip().split()
test_scenes = [int(elem) for elem in test_scenes]
n_train_refs = len(train_scenes)
n_test_refs = len(test_scenes)
train_size = (len(train_scenes) /
(len(train_scenes) + len(test_scenes)))
else:
# Divide scenes randomly
# Get the numbers of training and testing scenes
n_scenes = len(scenes)
n_train_refs = int(np.ceil(n_scenes * train_size))
n_test_refs = n_scenes - n_train_refs
# Randomly divide scenes
rand_seq = np.random.permutation(n_scenes)
scenes_sh = [scenes[elem] for elem in rand_seq]
train_scenes = sorted(scenes_sh[:n_train_refs])
test_scenes = sorted(scenes_sh[n_train_refs:])
# Write train-test idx list into file
if tr_te_file is not None:
fpath, fname = os.path.split(tr_te_file)
if not os.path.isdir(fpath):
os.makedirs(fpath)
with open(tr_te_file, 'w') as f:
for idx in range(n_train_refs):
f.write('%d ' % train_scenes[idx])
f.write('\n')
for idx in range(n_scenes - n_train_refs):
f.write('%d ' % test_scenes[idx])
f.write('\n')
print(' - Refs.: training = %d / testing = %d (Ratio = %.2f)' %
(n_train_refs, n_test_refs, train_size), end='')
if tr_te_file_loaded:
print(' (Loaded %s)' % (tr_te_file))
else:
print('')
return train_scenes, test_scenes
###########################################################################
def load_ref_dis_images(self, data_dict):
self.score_list = data_dict['score_list']
if self.reverse_mos and self.score_list is not None:
self.score_list = 1.0 - self.score_list
self.n_images = data_dict['n_images']
base_path = data_dict['base_path']
d_img_list = data_dict['d_img_list']
r_img_list = data_dict['r_img_list']
r_idx_list = data_dict['r_idx_list']
scenes = data_dict['scenes']
res = self.load_ref_images(
base_path, r_img_list, r_idx_list, scenes)
ref_img2pat_idx, ref_top_left_set, ref_npat_img_list, inv_scenes = res
self.load_dis_images(
base_path, d_img_list, r_idx_list, inv_scenes,
ref_img2pat_idx, ref_top_left_set, ref_npat_img_list)
def load_ref_images(self, base_path, r_img_list, r_idx_list, scenes):
"""
Actual routine of loading reference images.
"""
self.n_ref_images = len(scenes)
n_dis_images = len(r_img_list)
# make a list of reference index
ref_idx_idx_list = []
for ref_idx in scenes:
for idx, this_ref_idx in enumerate(r_idx_list):
if ref_idx == this_ref_idx:
ref_idx_idx_list.append(idx)
break
if idx == n_dis_images - 1:
raise ValueError('@ No %d index in r_idx_list' % ref_idx)
new_r_img_list = []
for idx in ref_idx_idx_list:
new_r_img_list.append(r_img_list[idx])
inv_scenes = np.ones(max(scenes) + 1, dtype='int32') * -1
for idx, scn in enumerate(scenes):
inv_scenes[scn] = idx
patch_size = self.patch_size
patch_step = self.patch_step
n_ref_patches = 0
ref_npat_img_list = []
ref_img2pat_idx = []
ref_top_left_set = []
r_pat_set = []
#######################################################################
# Read images
start_time = timeit.default_timer()
pass_list = []
for im_idx in range(self.n_ref_images):
# Show progress
show_progress(float(im_idx) / self.n_ref_images)
# Read ref. and dist. images
r_img_raw = misc.imread(
os.path.join(base_path, new_r_img_list[im_idx]))
cur_h = r_img_raw.shape[0]
cur_w = r_img_raw.shape[1]
if self.use_original_size:
patch_size = (cur_h, cur_w)
# Gray or RGB
r_img = convert_color2(r_img_raw, self.color)
# Local normalization
if self.local_norm:
if self.color == 'gray':
# faster
r_img_norm = local_normalize_1ch(r_img)
else:
r_img_norm = local_normalize(r_img, self.num_ch)
else:
r_img_norm = r_img.astype('float32') / 255.
if self.color == 'gray':
r_img_norm = r_img_norm[:, :, None]
# numbers of patches along y and x axes
ny = (cur_h - patch_size[0]) // patch_step[0] + 1
nx = (cur_w - patch_size[1]) // patch_step[1] + 1
npat = int(ny * nx)
ref_npat_img_list.append((npat, ny, nx))
ref_img2pat_idx.append(n_ref_patches + np.arange(npat))
n_ref_patches += npat
if npat == 0:
pass_list.append(im_idx)
ref_top_left_set.append(False)
continue
# get non-covered length along y and x axes
cov_height = patch_step[0] * (ny - 1) + patch_size[0]
cov_width = patch_step[1] * (nx - 1) + patch_size[1]
nc_height = cur_h - cov_height
nc_width = cur_w - cov_width
# Shift center
if self.patch_mode == 'shift_center':
shift = [(nc_height + 1) // 2, (nc_width + 1) // 2]
if shift[0] % 2 == 1:
shift[0] -= 1
if shift[1] % 2 == 1:
shift[1] -= 1
shift = tuple(shift)
else:
shift = (0, 0)
# generate top_left_set of patches
top_left_set = np.zeros((nx * ny, 2), dtype=np.int)
for yidx in range(ny):
for xidx in range(nx):
top = (yidx * patch_step[0] + shift[0])
left = (xidx * patch_step[1] + shift[1])
top_left_set[yidx * nx + xidx] = [top, left]
ref_top_left_set.append(top_left_set)
# Crop the images to patches
for idx in range(ny * nx):
[top, left] = top_left_set[idx]
if top + patch_size[0] > cur_h:
print('\n@Error: imidx=%d, pat=%d' % (im_idx, idx), end='')
print(' (%d > %d)' % (top + patch_size[0], cur_h))
if left + patch_size[1] > cur_w:
print('\n@Error: imidx=%d, pat=%d' % (im_idx, idx), end='')
print(' (%d > %d)' % (left + patch_size[1], cur_w))
r_crop_norm = r_img_norm[top:top + patch_size[0],
left:left + patch_size[1]]
r_pat_set.append(r_crop_norm)
# Show 100% progress bar
show_progress(1.0)
minutes, seconds = divmod(timeit.default_timer() - start_time, 60)
print(' - It took {:02.0f}:{:05.2f}'.format(minutes, seconds))
print(' - Loaded num of ref. patches: {:,}'.format(n_ref_patches))
if len(pass_list) > 0:
self.n_images -= len(pass_list)
print(' - Ignored ref. images due to small size: %s' %
', '.join(str(i) for i in pass_list))
self.n_ref_patches = n_ref_patches
self.ref_npat_img_list = ref_npat_img_list
self.ref_top_left_set = ref_top_left_set
self.ref_img2pat_idx = ref_img2pat_idx
self.r_pat_set = r_pat_set
return ref_img2pat_idx, ref_top_left_set, ref_npat_img_list, inv_scenes
def load_dis_images(self, base_path, d_img_list, r_idx_list, inv_scenes,
ref_img2pat_idx, ref_top_left_set, ref_npat_img_list):
"""
Actual routine of loading distorted images.
"""
self.n_images = len(d_img_list)
d_img_list = d_img_list
assert self.n_images > 0, \
'n_images(%d) is not positive number' % (self.n_images)
patch_size = self.patch_size
n_patches = 0
npat_img_list = []
d_pat_set = []
loc_met_set = []
filt_idx_list = []
dis2ref_idx = []
#######################################################################
# Read images
start_time = timeit.default_timer()
pat_idx = 0
pass_list = []
for im_idx in range(self.n_images):
ref_idx = inv_scenes[r_idx_list[im_idx]]
assert ref_idx >= 0
if ref_top_left_set[ref_idx] is False:
continue
# Show progress
show_progress(float(im_idx) / self.n_images)
# Read ref. and dist. images
d_img_raw = misc.imread(
os.path.join(base_path, d_img_list[im_idx]))
cur_h = d_img_raw.shape[0]
cur_w = d_img_raw.shape[1]
if self.use_original_size:
patch_size = (cur_h, cur_w)
if cur_h < patch_size[0] or cur_w < patch_size[1]:
pass_list.append(im_idx)
continue
# Gray or RGB
d_img = convert_color2(d_img_raw, self.color)
# Read local metric scores
if self.fr_met:
ext = int(1. / self.fr_met_scale) - 1
met_size = (int((cur_h + ext) * self.fr_met_scale),
int((cur_w + ext) * self.fr_met_scale))
met_pat_size = (int((patch_size[0] + ext) * self.fr_met_scale),
int((patch_size[1] + ext) * self.fr_met_scale))
if self.fr_met == 'SSIM_now':
# d_img_ds = misc.imresize(d_img, met_size, interp='bicubic')
# r_img_ds = misc.imresize(r_img, met_size, interp='bicubic')
# loc_q_map = ssim(d_img_ds, r_img_ds)
raise NotImplementedError()
else:
met_s_fname = (d_img_list[im_idx] +
self.fr_met_suffix + self.fr_met_ext)
loc_q_map = np.fromfile(
os.path.join(self.fr_met_path, self.fr_met_subpath,
met_s_fname),
dtype='float32')
loc_q_map = loc_q_map.reshape(
(met_size[1], met_size[0])).transpose()
# Local normalization
if self.local_norm:
if self.color == 'gray':
# faster
d_img_norm = local_normalize_1ch(d_img)
else:
d_img_norm = local_normalize(d_img, self.num_ch)
else:
d_img_norm = d_img.astype('float32') / 255.
if self.color == 'gray':
d_img_norm = d_img_norm[:, :, None]
top_left_set = ref_top_left_set[ref_idx]
cur_n_patches = top_left_set.shape[0]
if self.random_crops > 0:
if self.random_crops < cur_n_patches:
n_crops = self.random_crops
rand_perm = np.random.permutation(cur_n_patches)
sel_patch_idx = sorted(rand_perm[:n_crops])
top_left_set = top_left_set[sel_patch_idx].copy()
else:
n_crops = cur_n_patches
sel_patch_idx = np.arange(cur_n_patches)
npat_filt = n_crops
npat_img_list.append((npat_filt, 1, npat_filt))
n_patches += npat_filt
idx_set = list(range(npat_filt))
filt_idx_list.append(idx_set)
else:
# numbers of patches along y and x axes
npat, ny, nx = ref_npat_img_list[ref_idx]
npat_filt = int(npat * self.std_filt_r)
npat_img_list.append((npat_filt, ny, nx))
n_patches += npat_filt
if self.std_filt_r < 1.0:
std_set = np.zeros((nx * ny))
for idx, top_left in enumerate(top_left_set):
top, left = top_left
std_set[idx] = np.std(
d_img[top:top + patch_size[0],
left:left + patch_size[1]])
# Filter the patches with low std
if self.std_filt_r < 1.0:
idx_set = sorted(list(range(len(std_set))),
key=lambda x: std_set[x], reverse=True)
idx_set = sorted(idx_set[:npat_filt])
else:
idx_set = list(range(npat_filt))
filt_idx_list.append(idx_set)
# Crop the images to patches
for idx in idx_set:
[top, left] = top_left_set[idx]
if top + patch_size[0] > cur_h:
print('\n@Error: imidx=%d, pat=%d' % (im_idx, idx), end='')
print(' (%d > %d)' % (top + patch_size[0], cur_h))
if left + patch_size[1] > cur_w:
print('\n@Error: imidx=%d, pat=%d' % (im_idx, idx), end='')
print(' (%d > %d)' % (left + patch_size[1], cur_w))
d_crop_norm = d_img_norm[top:top + patch_size[0],
left:left + patch_size[1]]
d_pat_set.append(d_crop_norm)
if self.random_crops > 0:
dis2ref_idx.append(
ref_img2pat_idx[ref_idx][sel_patch_idx[idx]])
else:
dis2ref_idx.append(ref_img2pat_idx[ref_idx][idx])
# Crop the local metric scores
if self.fr_met:
ext = int(1. / self.fr_met_scale) - 1
top_r = int((top + ext) * self.fr_met_scale)
left_r = int((left + ext) * self.fr_met_scale)
if top_r + met_pat_size[0] > met_size[0]:
print('\n@Error (FR metric size):', end='')
print(' imidx=%d, pat=%d' % (im_idx, idx), end='')
print(' (%d > %d)' % (
top_r + met_pat_size[0], met_size[0]))
if left_r + met_pat_size[1] > met_size[1]:
print('\n@Error (FR metric size):', end='')
print(' imidx=%d, pat=%d' % (im_idx, idx), end='')
print(' (%d > %d)' % (
left_r + met_pat_size[1], met_size[1]))
loc_met_crop = loc_q_map[top_r:top_r + met_pat_size[0],
left_r:left_r + met_pat_size[1]]
if loc_met_crop.shape != met_pat_size:
print('\n@Error (oc_met_crop.shape != met_pat_size)')
print("@ image (%d-%d):" % (im_idx, idx),
d_img_list[im_idx])
print("@ loc_met_crop.shape:", loc_met_crop.shape)
print("@ met_size:", met_size)
print("@ top_r:", top_r)
print("@ left_r:", left_r)
os.system("pause")
if self.fr_met_avg:
loc_met_set.append(
np.mean(loc_met_crop, keepdims=True))
else:
loc_met_set.append(loc_met_crop)
pat_idx += 1
# Show 100 % progress bar
show_progress(1.0)
minutes, seconds = divmod(timeit.default_timer() - start_time, 60)
print(' - It took {:02.0f}:{:05.2f}'.format(minutes, seconds))
print(' - Loaded num of patches: {:,}'.format(n_patches))
if len(pass_list) > 0:
self.n_images -= len(pass_list)
print(' - Ignored image list due to small size: %s' %
', '.join(str(i) for i in pass_list))
self.n_patches = n_patches
self.npat_img_list = npat_img_list
self.d_pat_set = d_pat_set
if self.fr_met:
self.loc_met_set = loc_met_set
self.filt_idx_list = filt_idx_list
self.dis2ref_idx = dis2ref_idx
def make_toy_examples(self, patch_size=None, n_images=10):
if patch_size is None:
patch_size = self.patch_size
if self.patch_size is None:
patch_size = [64, 64]
self.patch_size = patch_size
n_ch = 1 if self.color == 'gray' else 3
ny = 2
nx = 2
score_list = np.zeros(n_images, dtype='float32')
npat_img_list = []
filt_idx_list = []
n_patches = 0
for im_idx in range(n_images):
npat = ny * nx
npat_img_list.append((npat, ny, nx))
n_patches += npat
filt_idx_list.append(list(range(npat)))
d_pat_set = []
pat_shape = (patch_size[0], patch_size[1], n_ch)
dummy_pat = np.ones(pat_shape, dtype='float32') * 0.5
for idx in range(n_patches):
d_pat_set.append(dummy_pat)
print(' - Generated toy examples: %d x' % n_patches, pat_shape)
loc_met_set = []
dis2ref_idx = []
for idx in range(n_patches):
loc_met_set.append(np.mean(dummy_pat, keepdims=True))
dis2ref_idx.append(idx)
self.n_images = n_images
self.score_list = score_list
self.n_patches = n_patches
self.npat_img_list = npat_img_list
self.d_pat_set = d_pat_set
self.r_pat_set = d_pat_set
self.loc_met_set = loc_met_set
self.filt_idx_list = filt_idx_list
self.dis2ref_idx = dis2ref_idx
def data_augmentation_horz_refl(self):
# Patches augmentation
pat_idx = 0
for pat_idx in range(self.n_patches):
# Flip horizontal
self.d_pat_set.append(self.d_pat_set[pat_idx][:, ::-1])
if self.fr_met:
self.loc_met_set.append(self.loc_met_set[pat_idx][:, ::-1])
self.dis2ref_idx.append(
self.dis2ref_idx[pat_idx] + self.n_ref_patches)
for pat_idx in range(self.n_ref_patches):
# Flip horizontal
self.r_pat_set.append(self.r_pat_set[pat_idx][:, ::-1])
# Image data augmentation
if self.score_list is not None:
self.score_list = np.tile(self.score_list, 2)
self.npat_img_list += self.npat_img_list
self.filt_idx_list += self.filt_idx_list
self.n_patches *= 2
self.n_images *= 2
self.n_ref_patches *= 2
if self.score_list is not None:
assert self.score_list.shape[0] == self.n_images, (
'n_score_list: %d != n_images: %d' %
(self.score_list.shape[0], self.n_images))
assert len(self.npat_img_list) == self.n_images, (
'n_npat_img_list: %d != n_images: %d' %
(len(self.npat_img_list), self.n_images))
assert len(self.filt_idx_list) == self.n_images, (
'n_filt_idx_list: %d != n_images: %d' %
(len(self.filt_idx_list), self.n_images))
print(' - Augmented patches: {0:,}'.format(self.n_patches), end=' ')
print('(x2 horz. reflection)')
def show_progress(percent):
hashes = '#' * int(round(percent * 20))
spaces = ' ' * (20 - len(hashes))
sys.stdout.write("\r - Load images: [{0}] {1}%".format(
hashes + spaces, int(round(percent * 100))))
sys.stdout.flush()
def convert_color(img, color):
""" Convert image into gray or RGB or YCbCr.
"""
assert len(img.shape) in [2, 3]
if color == 'gray':
# if d_img_raw.shape[2] == 1:
if len(img.shape) == 2: # if gray
img_ = img[:, :, None]
elif len(img.shape) == 3: # if RGB
if img.shape[2] > 3:
img = img[:, :, :3]
img_ = rgb2gray(img)[:, :, None]
elif color == 'rgb':
if len(img.shape) == 2: # if gray
img_ = gray2rgb(img)
elif len(img.shape) == 3: # if RGB
if img.shape[2] > 3:
img = img[:, :, :3]
img_ = img
elif color == 'ycbcr':
if len(img.shape) == 2: # if gray
img_ = rgb2ycbcr(gray2rgb(img))
elif len(img.shape) == 3: # if RGB
if img.shape[2] > 3:
img = img[:, :, :3]
img_ = rgb2ycbcr(img)
else:
raise ValueError("Improper color selection: %s" % color)
return img_
def convert_color2(img, color):
""" Convert image into gray or RGB or YCbCr.
(In case of gray, dimension is not increased for
the faster local normalization.)
"""
assert len(img.shape) in [2, 3]
if color == 'gray':
# if d_img_raw.shape[2] == 1:
if len(img.shape) == 3: # if RGB
if img.shape[2] > 3:
img = img[:, :, :3]
img_ = rgb2gray(img)
elif color == 'rgb':
if len(img.shape) == 2: # if gray
img_ = gray2rgb(img)
elif len(img.shape) == 3: # if RGB
if img.shape[2] > 3:
img = img[:, :, :3]
img_ = img
elif color == 'ycbcr':
if len(img.shape) == 2: # if gray
img_ = rgb2ycbcr(gray2rgb(img))
elif len(img.shape) == 3: # if RGB
if img.shape[2] > 3:
img = img[:, :, :3]
img_ = rgb2ycbcr(img)
else:
raise ValueError("Improper color selection: %s" % color)
return img_
def gray2rgb(im):
w, h = im.shape
ret = np.empty((w, h, 3), dtype=np.uint8)
ret[:, :, :] = im[:, :, np.newaxis]
return ret
def rgb2gray(rgb):
assert rgb.shape[2] == 3
return np.dot(rgb[..., :3], [0.299, 0.587, 0.114])
def rgb2ycbcr(rgb):
xform = np.array([[.299, .587, .114],
[-.1687, -.3313, .5],
[.5, -.4187, -.0813]])
ycbcr = np.dot(rgb[..., :3], xform.T)
ycbcr[:, :, [1, 2]] += 128
return ycbcr
def ycbcr2rgb(ycbcr):
xform = np.array([[1, 0, 1.402],
[1, -0.34414, -.71414],
[1, 1.772, 0]])
rgb = ycbcr.astype('float32')
rgb[:, :, [1, 2]] -= 128
return rgb.dot(xform.T)
k = np.float32([1, 4, 6, 4, 1])
k = np.outer(k, k)
kern = k / k.sum()
def local_normalize_1ch(img, const=127.0):
mu = convolve(img, kern, mode='nearest')
mu_sq = mu * mu
im_sq = img * img
tmp = convolve(im_sq, kern, mode='nearest') - mu_sq
sigma = np.sqrt(np.abs(tmp))
structdis = (img - mu) / (sigma + const)
# Rescale within 0 and 1
# structdis = (structdis + 3) / 6
structdis = 2. * structdis / 3.
return structdis
def local_normalize(img, num_ch=1, const=127.0):
if num_ch == 1:
mu = convolve(img[:, :, 0], kern, mode='nearest')
mu_sq = mu * mu
im_sq = img[:, :, 0] * img[:, :, 0]
tmp = convolve(im_sq, kern, mode='nearest') - mu_sq
sigma = np.sqrt(np.abs(tmp))
structdis = (img[:, :, 0] - mu) / (sigma + const)
# Rescale within 0 and 1
# structdis = (structdis + 3) / 6
structdis = 2. * structdis / 3.
norm = structdis[:, :, None]
elif num_ch > 1:
norm = np.zeros(img.shape, dtype='float32')
for ch in range(num_ch):
mu = convolve(img[:, :, ch], kern, mode='nearest')
mu_sq = mu * mu
im_sq = img[:, :, ch] * img[:, :, ch]
tmp = convolve(im_sq, kern, mode='nearest') - mu_sq
sigma = np.sqrt(np.abs(tmp))
structdis = (img[:, :, ch] - mu) / (sigma + const)
# Rescale within 0 and 1
# structdis = (structdis + 3) / 6
structdis = 2. * structdis / 3.
norm[:, :, ch] = structdis
return norm
| mit | 1,415,385,897,606,707,200 | 35.939309 | 81 | 0.496411 | false |
tommyogden/maxwellbloch | maxwellbloch/tests/test_ob_solve.py | 1 | 8224 | # -*- coding: utf-8 -*-
"""
Unit tests for the OBSolve class.
Thomas Ogden <[email protected]>
"""
import os
import unittest
import numpy as np
from maxwellbloch import ob_solve, t_funcs
# Absolute path of tests/json directory, so that tests can be called from
# different directories.
JSON_DIR = os.path.abspath(os.path.join(__file__, '../', 'json'))
JSON_STR_02 = (
'{'
' "atom": {'
' "decays": ['
' { "channels": [[0,1], [1,2]], '
' "rate": 1.0'
' }'
' ],'
' "energies": [],'
' "fields": ['
' {'
' "coupled_levels": ['
' [0, 1]'
' ],'
' "detuning": 0.0,'
' "detuning_positive": true,'
' "label": "probe",'
' "rabi_freq": 5.0,'
' "rabi_freq_t_args": {},'
' "rabi_freq_t_func": null'
' },'
' {'
' "coupled_levels": ['
' [1, 2]'
' ],'
' "detuning": 0.0,'
' "detuning_positive": false,'
' "label": "coupling",'
' "rabi_freq": 10.0,'
' "rabi_freq_t_args": {},'
' "rabi_freq_t_func": null'
' }'
' ],'
' "num_states": 3'
' },'
' "t_min": 0.0,'
' "t_max": 1.0,'
' "t_steps": 100,'
' "method": "mesolve",'
' "opts": {}'
'}'
)
class TestSetFieldRabiTFunc(unittest.TestCase):
""" Test setting custom Rabi frequency time functions. """
def test_set_field_rabi_t_func_1(self):
""" Test that a custom double pulse Rabi freq time functions can be
set.
"""
ob_solve_02 = ob_solve.OBSolve().from_json_str(JSON_STR_02)
two_pulse_t_func = lambda t, args: (t_funcs.gaussian(0)(t, args) +
t_funcs.gaussian(1)(t, args))
two_pulse_t_args = {"ampl_0": 1.0, "centre_0": 0.0, "fwhm_0": 0.1,
"ampl_1": 2.0, "centre_1": 0.5, "fwhm_1": 0.1, }
ob_solve_02.set_field_rabi_freq_t_func(0, two_pulse_t_func)
ob_solve_02.set_field_rabi_freq_t_args(0, two_pulse_t_args)
field_0 = ob_solve_02.atom.fields[0]
self.assertAlmostEqual(field_0.rabi_freq_t_func(0.0,
field_0.rabi_freq_t_args), 1.0)
self.assertAlmostEqual(field_0.rabi_freq_t_func(0.5,
field_0.rabi_freq_t_args), 2.0)
self.assertAlmostEqual(field_0.rabi_freq_t_func(1.0,
field_0.rabi_freq_t_args), 0.0)
class TestSolve(unittest.TestCase):
def test_two_level_rabi_oscillations(self):
""" Solve the optical Bloch equations for the two-level atom.
Notes:
See https://en.wikipedia.org/wiki/Rabi_cycle
"""
RABI_FREQ = 5.0
atom_dict = {"fields": [{"coupled_levels": [[0, 1]],
"rabi_freq": RABI_FREQ}], "num_states": 2}
obs = ob_solve.OBSolve(atom=atom_dict, t_min=0.0, t_max=1.0,
t_steps=100)
obs.solve()
# Get the populations
pop_0 = np.absolute(obs.states_t()[:, 0, 0])
pop_1 = np.absolute(obs.states_t()[:, 1, 1])
# The solution is known, we should have Rabi cycling at the frequency.
known_0 = np.cos(2.0*np.pi*RABI_FREQ*obs.tlist/2.0)**2
known_1 = np.sin(2.0*np.pi*RABI_FREQ*obs.tlist/2.0)**2
self.assertTrue(np.allclose(pop_0, known_0, rtol=1.e-5, atol=1.e-5))
self.assertTrue(np.allclose(pop_1, known_1, rtol=1.e-5, atol=1.e-5))
# If you want to take a look
# import matplotlib.pyplot as plt
# plt.plot(obs.tlist, pop_0)
# plt.plot(obs.tlist, known_0, ls='dashed')
# plt.plot(obs.tlist, pop_1)
# plt.plot(obs.tlist, known_1, ls='dashed')
# plt.show()
def test_two_level_with_opts(self):
""" Same as test_two_level_rabi_oscillations() but with opts set such
that the tolerances are lower. The results will be less
accurate.
"""
RABI_FREQ = 5.0
atom_dict = {"fields": [{"coupled_levels": [[0, 1]],
"rabi_freq": RABI_FREQ}], "num_states": 2,
"initial_state": [1., 0.]}
obs = ob_solve.OBSolve(atom=atom_dict, t_min=0.0, t_max=1.0,
t_steps=100, opts={'atol': 1e-6, 'rtol': 1e-4})
obs.solve()
# Get the populations
pop_0 = np.absolute(obs.states_t()[:, 0, 0])
pop_1 = np.absolute(obs.states_t()[:, 1, 1])
# The solution is known, we should have Rabi cycling at the frequency.
known_0 = np.cos(2.0 * np.pi * RABI_FREQ * obs.tlist / 2.0)**2
known_1 = np.sin(2.0 * np.pi * RABI_FREQ * obs.tlist / 2.0)**2
# Compared with test_two_level_rabi_oscillations() we can only assert
# a lower tolerance to the known solution.
self.assertTrue(np.allclose(pop_0, known_0, rtol=1.e-3, atol=1.e-3))
self.assertTrue(np.allclose(pop_1, known_1, rtol=1.e-3, atol=1.e-3))
# If you want to take a look
# import matplotlib.pyplot as plt
# plt.plot(obs.tlist, pop_0)
# plt.plot(obs.tlist, known_0, ls='dashed')
# plt.plot(obs.tlist, pop_1)
# plt.plot(obs.tlist, known_1, ls='dashed')
# plt.show()
def test_two_level_with_inital_state(self):
""" Same as test_two_level_rabi_oscillations() but with the initial
state set so that the population starts in the upper level.
"""
RABI_FREQ = 5.0
atom_dict = {"fields": [{"coupled_levels": [[0, 1]],
"rabi_freq": RABI_FREQ}], "num_states": 2,
"initial_state": [0., 1.]}
obs = ob_solve.OBSolve(atom=atom_dict, t_min=0.0, t_max=1.0,
t_steps=100)
obs.solve()
# Get the populations
pop_0 = np.absolute(obs.states_t()[:, 0, 0])
pop_1 = np.absolute(obs.states_t()[:, 1, 1])
# The solution is as test_two_level_rabi_oscillations() but swapped
known_0 = np.sin(2.0*np.pi*RABI_FREQ*obs.tlist/2.0)**2
known_1 = np.cos(2.0*np.pi*RABI_FREQ*obs.tlist/2.0)**2
self.assertTrue(np.allclose(pop_0, known_0, rtol=1.e-5, atol=1.e-5))
self.assertTrue(np.allclose(pop_1, known_1, rtol=1.e-5, atol=1.e-5))
class TestJSON(unittest.TestCase):
def test_to_from_json_str_00(self):
ob_solve_00 = ob_solve.OBSolve()
ob_solve_01 = ob_solve.OBSolve.from_json_str(ob_solve_00.to_json_str())
self.assertEqual(ob_solve_00.to_json_str(), ob_solve_01.to_json_str())
def test_from_json_str(self):
ob_solve_02 = ob_solve.OBSolve().from_json_str(JSON_STR_02)
self.assertEqual(ob_solve_02.t_min, 0.0)
self.assertEqual(ob_solve_02.t_max, 1.0)
self.assertEqual(ob_solve_02.t_steps, 100)
self.assertEqual(ob_solve_02.method, "mesolve")
def test_to_from_json_str_03(self):
json_path = os.path.join(JSON_DIR, "ob_solve_03.json")
obs = ob_solve.OBSolve().from_json(json_path)
obs_test = ob_solve.OBSolve.from_json_str(obs.to_json_str())
self.assertEqual(obs.to_json_str(), obs_test.to_json_str())
def test_to_from_json(self):
import os
filepath = "test_ob_solve_02.json"
ob_solve_02 = ob_solve.OBSolve().from_json_str(JSON_STR_02)
ob_solve_02.to_json(filepath)
ob_solve_03 = ob_solve.OBSolve().from_json(filepath)
os.remove(filepath)
self.assertEqual(ob_solve_02.to_json_str(),
ob_solve_03.to_json_str())
class TestSaveLoad(unittest.TestCase):
""" Tests for the OBSolve save and load methods."""
def test_save_load_01(self):
""" Solve a basic OBSolve problem. Save the results to file. Set the
results in the OBSolve object to null. Load the results from file
and check that they match the original values.
"""
json_path = os.path.join(JSON_DIR, "ob_solve_02.json")
ob_solve_02 = ob_solve.OBSolve().from_json(json_path)
states_t = ob_solve_02.solve()
states_t_loaded = ob_solve_02.solve(recalc=False)
self.assertTrue((states_t == states_t_loaded).all())
| mit | -7,425,014,225,698,490,000 | 32.567347 | 79 | 0.542437 | false |
chenchiyuan/hawaii | hawaii/apps/weixin/models/apps.py | 1 | 2860 | # -*- coding: utf-8 -*-
# __author__ = chenchiyuan
from __future__ import division, unicode_literals, print_function
from django.db import models
from libs.models.models import SingletonModel
from django.conf import settings
from libs.uuids import get_uuid
import requests
import json
class App(SingletonModel):
class Meta:
app_label = "weixin"
db_table = "weixin_app"
verbose_name_plural = verbose_name = u"账号设置"
name = models.CharField("微信名", max_length=64, default="", blank=True, null=True)
app_url = models.CharField("微信回调地址", max_length=256, blank=True, null=True)
app_token = models.CharField("微信Token", max_length=64, blank=True, null=True)
app_key = models.CharField("app_key", max_length=64, blank=True, null=True)
app_id = models.CharField("app_secret", max_length=64, blank=True, null=True)
def __unicode__(self):
return bool(self.name) and self.name or self.owner.email
@property
def subscribe_rule(self):
subscribe = self.subscribeitem_set.all()
if not subscribe.count():
return None
else:
return subscribe[0].rule
def get_app_url(self):
return "%s/weixin/callback/" % settings.APP_HOST_NAME
def save(self, force_insert=False, force_update=False, using=None):
if force_insert and force_update:
raise ValueError("Cannot force both insert and updating in model saving.")
if not self.app_url:
self.app_url = self.get_app_url()
if not self.app_token:
self.app_token = get_uuid()
if self.app_key and self.app_id:
self.delete_menus()
self.create_menus()
super(App, self).save(force_insert, force_update, using)
def get_access_token(self):
if not any([self.app_key, self.app_id]):
raise Exception(u"必须申请app_key和app_secret".encode("utf-8"))
url = "https://api.weixin.qq.com/cgi-bin/token?grant_type=client_credential&appid=%s&secret=%s" \
% (self.app_key, self.app_id)
response = requests.get(url)
json_data = json.loads(response.content)
return json_data['access_token']
def create_menus(self):
from hawaii.apps.weixin.models.menus import MenuItem
token = self.get_access_token()
post_dict = MenuItem.get_menus_by_app(self)
headers = {'content-type': 'application/json'}
url = "https://api.weixin.qq.com/cgi-bin/menu/create?access_token=%s" % token
return requests.post(url, data=json.dumps(post_dict, ensure_ascii=False).encode("utf-8"), headers=headers)
def delete_menus(self):
token = self.get_access_token()
url = "https://api.weixin.qq.com/cgi-bin/menu/delete?access_token=%s" % token
return requests.get(url) | bsd-3-clause | 1,870,618,801,998,368,800 | 35.636364 | 114 | 0.639362 | false |
Azure/azure-sdk-for-python | sdk/azureadb2c/azure-mgmt-azureadb2c/azure/mgmt/azureadb2c/v2019_01_01_preview/_cpim_configuration_client.py | 1 | 3068 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.mgmt.core import ARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Optional
from azure.core.credentials import TokenCredential
from ._configuration import CPIMConfigurationClientConfiguration
from .operations import B2CTenantsOperations
from .operations import Operations
from . import models
class CPIMConfigurationClient(object):
"""CPIM Configuration Client.
:ivar b2_ctenants: B2CTenantsOperations operations
:vartype b2_ctenants: $(python-base-namespace).v2019_01_01_preview.operations.B2CTenantsOperations
:ivar operations: Operations operations
:vartype operations: $(python-base-namespace).v2019_01_01_preview.operations.Operations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms part of the URI for every service call.
:type subscription_id: str
:param str base_url: Service URL
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
def __init__(
self,
credential, # type: "TokenCredential"
subscription_id, # type: str
base_url=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
if not base_url:
base_url = 'https://management.azure.com'
self._config = CPIMConfigurationClientConfiguration(credential, subscription_id, **kwargs)
self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self.b2_ctenants = B2CTenantsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.operations = Operations(
self._client, self._config, self._serialize, self._deserialize)
def close(self):
# type: () -> None
self._client.close()
def __enter__(self):
# type: () -> CPIMConfigurationClient
self._client.__enter__()
return self
def __exit__(self, *exc_details):
# type: (Any) -> None
self._client.__exit__(*exc_details)
| mit | 7,716,659,260,923,566,000 | 40.459459 | 172 | 0.66395 | false |
leleobhz/phonetica | high_experimental/mvuorine/four1.py | 1 | 1236 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from math import sin
# Replaces data[1..2*nn] by its discrete Fourier transform, if isign */
# is input as 1; or replaces data[1..2*nn] by nn times its inverse */
# discrete Fourier transform, if isign is input as -1. data is a */
# complex array of length nn or, equivalently, a real array of length */
# 2**nn. nn MUST be an integer power of 2 (this is not checked for!). */
def four1(data, nn, isign):
n=nn << 1
j=1
for i in range(1, n, 2):
if (j > i):
tmp = data[j]
data[j] = data[i]
data[i] = tmp
tmp = data[j+1]
data[j+1] = data[i+1]
data[i+1] = tmp
m=n >> 1
while (m >= 2 and j > m):
j -= m
m >>= 1
j += m;
mmax=2
while(n > mmax):
istep=mmax << 1
theta=isign*(6.28318503717959/mmax)
wtemp=sin(0.5*theta)
wpr = -2.0*wtemp*wtemp
wpi=sin(theta)
wr=1.0
wi=0.0
for m in range(1, mmax, 2):
for i in range(m, n+1, istep):
j=i+mmax
tempr=wr*data[j]-wi*data[j+1]
tempi=wr*data[j+1]+wi*data[j]
data[j]=data[i]-tempr
data[j+1]=data[i+1]-tempi
data[i] += tempr
data[i+1]+= tempi
wtemp=wr
wr=wtemp*wpr-wi*wpi+wr
wi=wi*wpr+wtemp*wpi+wi
mmax=istep
| gpl-2.0 | -3,151,591,414,976,966,000 | 22.72 | 72 | 0.562298 | false |
concordusapps/django-identity | src/identity/saml/views/provider.py | 1 | 5398 | # -*- coding: utf-8 -*-
""" \file identity/saml/views/provider.py
\brief Implements the SAML endpoints for providers.
\author Erich Healy (cactuscommander) [email protected]
\author Ryan Leckey (mehcode) [email protected]
\copyright Copyright 2012 © Concordus Applications, Inc.
All Rights Reserved.
"""
from .. import models
from ...models import Provider, Consumer
from ..client import binding
from lxml import etree
import saml.schema.saml
import saml.schema.samlp
from saml import schema
from django.shortcuts import get_object_or_404, redirect
from django.core.urlresolvers import reverse
from uuid import uuid4
from urllib import urlencode
from django.contrib.auth.decorators import login_required
from datetime import datetime, timedelta
def sso(request, *args, **kwargs):
"""Single sign on (SSO)."""
# Get ACS profile instance
acs = models.Profile.objects.get(slug='acs')
# Verify that the provider described in the URL exists
provider = get_object_or_404(Provider, slug=kwargs['slug'])
# Determine if we are from the login form or not
from_login = request.method == 'GET' and len(request.GET) == 1
if not from_login:
# Generate request identifier to namespace variables stored in session
# storage
identifier = uuid4().hex
else:
# Grab the identifier from the GET paramter passed back from the login
# form
identifier = request.GET['id']
# Template to pull namespaced items out of the session storage
storage = '{}:saml:{{}}'.format(identifier)
if not from_login:
# Decode and deserialize the message
message, state = binding.Binding.receive(request, 'request')
xml = etree.XML(message)
obj = schema.samlp.AuthenticationRequest.deserialize(xml)
# Verify that the issuing consumer is known to identity
consumer = get_object_or_404(Consumer, name=obj.issuer.text)
else:
# Get the consumer from the identifier passed from the login form
consumer = get_object_or_404(
Consumer,
slug=request.session[storage.format('consumer')]
)
# Query for a list of services provided by the requester that we know
# about; if we cannot find one for ACS, then return a 404
# TODO: Handle the case of more than one acs
# NOTE: This is a redundant query; it is merely a sanity check so that
# if the service doesn't exist the user won't get any farther
# in the authn process.
service = get_object_or_404(
models.Service,
resource=consumer,
profile=acs
)
if not from_login:
# TODO: Perform validation of message
pass
# Store items in namespaced session storage
request.session[storage.format('provider')] = provider.slug
request.session[storage.format('consumer')] = consumer.slug
request.session[storage.format('message:id')] = obj.id
request.session[storage.format('state')] = state
if not request.user.is_authenticated():
# Send user off to get authenticated;
# redirect to login page
return redirect('{}?{}'.format(
reverse('login'),
urlencode({'id': identifier})
))
else:
# Assign subject id to user if not already assigned
if 'saml:subject' not in request.session:
request.session['saml:subject'] = uuid4().hex
# Construct SAML response
# FIXME: This should go in `python-saml`, perhaps?
obj = schema.samlp.Response(
issuer=schema.saml.Issuer(provider.name),
status=schema.samlp.Status(
code=schema.samlp.StatusCode(
value=schema.samlp.StatusCode.Value.SUCCESS
)
),
assertion=schema.saml.Assertion(
issuer=schema.saml.Issuer(provider.name),
subject=schema.saml.Subject(
id=schema.saml.NameID(request.session['saml:subject']),
confirm=schema.saml.SubjectConfirmation(
data=schema.saml.SubjectConfirmationData(
in_response_to=consumer.name
)
)
),
statements=[
schema.saml.AuthenticationStatement(
context=schema.saml.AuthenticationContext(
reference=schema.saml.AuthenticationContext.
Reference.PREVIOUS_SESSION
)
),
schema.saml.AttributeStatement(
attributes=[
schema.saml.Attribute(
name='uid',
values=request.user.username
),
]
),
]
)
)
# Serialize message to a string
message = etree.tostring(schema.samlp.Response.serialize(obj))
print(message)
# Send off
return binding.Redirect.send(
service.get_absolute_url(),
message,
request.session[storage.format('state')],
'response'
)
def slo(request, *args, **kwargs):
"""Single log on (SLO)"""
pass
| mit | -1,617,509,357,031,689,700 | 34.045455 | 78 | 0.592181 | false |
imanoleizaguirre/mitologias | conf.py | 1 | 7760 | # -*- coding: utf-8 -*-
#
# Mitología documentation build configuration file, created by
# sphinx-quickstart on Wed Sep 19 11:47:40 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Mitología'
copyright = u'2012, Imanol Eizaguirre'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'es'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Mitologadoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Mitologa.tex', u'Mitología Documentation',
u'Imanol Eizaguirre', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'mitologa', u'Mitología Documentation',
[u'Imanol Eizaguirre'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Mitologa', u'Mitología Documentation',
u'Imanol Eizaguirre', 'Mitologa', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| bsd-3-clause | -3,158,373,626,509,274,600 | 31.045455 | 80 | 0.704707 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.