repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
ayosec/pyslide | Pyslide/Presentation/Pages.py | 1 | 9277 | # -*- coding: latin1 -*-
#
# Copyright (C) 2003, 2004 Ayose Cazorla León
#
# Authors
# Ayose Cazorla <[email protected]>
#
# This file is part of Pyslide.
#
# Pyslide is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Pyslide is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Pyslide; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import pygame
def renderbackground(surface, background):
'''renderbackground(surface, background)
Renders background on surface. background can be:
* a pygame.Surface object (use blit)
* a tuple, interpreted as a color (use fill)
* None.. do nothing
'''
if isinstance(background, tuple):
surface.fill(background)
elif isinstance(background, pygame.Surface):
surface.blit(background, (0,0))
elif background is not None:
raise TypeError, 'background has to be Surface, tuple or None'
def createbackground(attrs):
'''createbackground(attrs) -> background
Create a background for this attributes. "background" can be:
* a pygame.Surface object
* a tuple: this is a color used to fill the background
* None: there is no background for this attributes.
'''
from Pyslide import misc
from Pyslide.Presentation import CurrentSurface
size = CurrentSurface.width, CurrentSurface.height
if attrs.has_key('bggrad'):
try:
top, bottom = attrs['bggrad'].split('-', 1)
except ValueError:
raise misc.PyslideError, 'Invalid value for bggrad: ' + attrs['bggrad']
top = misc.parse_color(top)
bottom = misc.parse_color(bottom)
if None in (top, bottom):
raise misc.PyslideError, 'Invalid gradient value: ' + attrs['bggrad']
bg = pygame.Surface(size)
grad = misc.RenderGradient(bg, top, bottom)
pygame.surfarray.blit_array(bg, grad)
elif attrs.has_key('bg'):
scale = attrs.get('bgscale', 'yes') == 'yes'
from Pyslide.Main.Images import imageloader
bg = imageloader.loadfile(attrs['bg'])
if scale:
bg = pygame.transform.scale(bg, size).convert()
else:
s = bg.get_size()
if (s[0] < size[0]) or (s[1] < size[1]):
i = pygame.Surface(size).convert()
i.fill(0)
i.blit(bg, (0,0))
bg = i
else:
bg = bg.convert()
elif attrs.has_key('bgcolor'):
bg = misc.parse_color(attrs['bgcolor'])
if bg is None:
raise misc.PyslideError, 'Invalid color: ' + attrs['bgcolor']
else:
bg = None
return bg
def applycss(item, group):
'''applycss(attrs, item, group) -> item
Get attributes for the item. Returns the item type and its attributes
'''
from Pyslide.misc import PyslideError
parentobj = group.parent().parent()
# its own attributes
newattrs = item['attrs'].copy()
itemtype = item['type']
# class attributes
if newattrs.has_key('class'):
c = newattrs['class']
del newattrs['class']
try:
classattrs = parentobj.cssclass[c].items()
except KeyError:
raise PyslideError, 'Unknown class: ' + c
for key, val in classattrs:
if not newattrs.has_key(key):
newattrs[key] = val
# alias attributes
if parentobj.cssalias.has_key(itemtype):
alias = parentobj.cssalias[itemtype]
if itemtype not in ('text', 'image', 'system', 'shape', 'list'):
try:
itemtype = alias['item-type']
except:
raise PyslideError, \
'Invalid alias "%s": item-type attribute not present' % itemtype
for key, val in alias.items():
if not newattrs.has_key(key):
newattrs[key] = val
# group attibutes
for key, val in group.attrs.items():
if not newattrs.has_key(key):
newattrs[key] = val
# remove for- attributes, if it is necessary
posgroup = group.getposition(item) + 1
for key in newattrs.keys():
if key.startswith('for-'):
place = key.split('-')[1]
put = False
try:
# is it a number?
put = (int(place) == posgroup)
except ValueError:
place = place.lower()
v = ['first', 'second', 'third']
if place in v:
put = (v.index(place) + 1) == posgroup
elif (place == 'even' and (posgroup % 2) == 0) \
or (place == 'odd' and (posgroup % 2) == 1):
put = True
elif place == 'last':
put = group.isthelast(item)
if put:
k = '-'.join(key.split('-')[2:])
if not newattrs.has_key(k):
newattrs[k] = newattrs[key]
del newattrs[key]
# THE item!
return {'type': itemtype, 'content': item['content'], 'attrs': newattrs}
class CreatedPage:
def __init__(self, attrs, groups):
self.groups = groups
self.attrs = attrs
self.stage = 0
self.__bg = None
if attrs.has_key('ttl'):
try:
self.ttl = int(attrs['ttl']) / 10.
except ValueError:
raise PyslideError, 'Invalid TTL value: ' + str(attrs['ttl'])
else:
self.ttl = None
def currentgroups(self):
return self.groups[:self.stage+1]
def getcurrentgroup(self):
return self.groups[self.stage]
def nextstage(self):
if self.stage < len(self.groups) - 1:
self.stage += 1
return self.groups[self.stage]
return None
def prevstage(self):
if self.stage > 0:
self.stage -= 1
return self.groups[self.stage + 1]
return None
def setstage(self, n):
if n < 0:
self.stage = len(self.groups) + n
else:
self.stage = n
def getbackground(self):
if self.__bg is None:
self.__bg = createbackground(self.attrs)
return self.__bg
def iskeepalways(item, group):
'''iskeepalways(item, group) -> bool
Returns True if item is a keep-always item
'''
def i():
yield item['attrs']
yield group.attrs
p = group.parent().parent()
if p.cssalias.has_key(item['type']):
yield p.cssalias[item['type']]
if item['attrs'].has_key('class'):
c = item['attrs']['class']
if p.cssclass.has_key(c):
yield p.cssclass[c]
for attrs in i():
if attrs.has_key('keep-always'):
return attrs['keep-always'] == 'yes'
return False
class Page:
def __init__ (self, page):
self.page = page
def getkeepalwaysitems(self):
# we have to create all the previous items to
# the keep-always items, because that items may
# need the LastPoint info.
# First, create a flat list of items
from copy import copy as C
items = []
for g in self.page.groups():
x = C(g.items())
for i in x: i['parent-group'] = g
items += x
# find the last keep-always item
last = -1
keepalwaysitems = []
for n, i in enumerate(items):
if iskeepalways(i, i['parent-group']):
last = n
keepalwaysitems.append(i)
from Pyslide import Items
result = []
lp = Items.LastPoint()
if last >= 0:
for item in items[:last+1]:
i = self.createitem(item, item['parent-group'], lp)
if item in keepalwaysitems:
result.append(i)
return result
def createitem(origitem, group, lp):
from Pyslide import Items
from copy import copy as C
item = applycss(origitem, group)
try:
itemtype = Items.getitemtype(item['type'])
except KeyError:
from Pyslide import misc
raise misc.PyslideError, 'invalid item: ' + item['type']
try:
i = itemtype((origitem, group), C(item['content']), C(item['attrs']), lp)
except Items.ReplaceThisItem, (t,):
i = t
return i
createitem = staticmethod(createitem)
def createpage(self):
from Pyslide import Items
groups = []
lp = Items.LastPoint()
for group in self.page.groups():
groups.append([self.createitem(i, group, lp) for i in group.items()])
return CreatedPage(self.page.get_attrs(), groups)
| gpl-2.0 | -1,627,606,097,163,363,300 | 27.283537 | 85 | 0.556214 | false |
FrankNagel/qlc | src/webapp/quanthistling/scripts/annotations/annotations_for_jakway2008.py | 1 | 8208 | # -*- coding: utf8 -*-
import sys, os
sys.path.append(os.path.abspath('.'))
import re
from operator import attrgetter
import difflib
# Pylons model init sequence
import pylons.test
import logging
from quanthistling.config.environment import load_environment
from quanthistling.model.meta import Session, metadata
from quanthistling import model
import quanthistling.dictdata.books
from paste.deploy import appconfig
import functions
from manualannotations_for_jakway2008 import manual_entries
def get_bold_annotations(entry):
sorted_annotations = [ a for a in entry.annotations if a.value=='bold']
sorted_annotations = sorted(sorted_annotations, key=attrgetter('start'))
last_bold_end = -1
at_start = True
last_bold_start = sorted_annotations[0].start
head_starts = []
head_ends = []
for a in sorted_annotations:
if (a.start <= (last_bold_end + 1)):
last_bold_end = a.end
else:
head_starts.append(last_bold_start)
head_ends.append(last_bold_end)
last_bold_start = a.start
last_bold_end = a.end
head_starts.append(last_bold_start)
head_ends.append(last_bold_end)
return head_starts, head_ends
def annotate_head(entry):
# delete head annotations
head_annotations = [ a for a in entry.annotations if a.value=='head' or a.value=="iso-639-3" or a.value=="doculect"]
for a in head_annotations:
Session.delete(a)
head = None
heads = []
sorted_annotations = [ a for a in entry.annotations if a.value=='bold']
sorted_annotations = sorted(sorted_annotations, key=attrgetter('start'))
head_starts, head_ends = get_bold_annotations(entry)
heads = []
for i in range(len(head_starts)):
head_start_pos = head_starts[i]
head_end_pos = head_ends[i]
#head_end_pos = functions.get_last_bold_pos_at_start(entry)
#head_start_pos = 0
if head_end_pos > -1:
start = head_start_pos
substr = entry.fullentry[head_start_pos:head_end_pos]
for match in re.finditer(r', ?', substr):
end = match.start(0) + head_start_pos
inserted_head = functions.insert_head(entry, start, end)
#entry.append_annotation(start, end, u'head', u'dictinterpretation')
heads.append(inserted_head)
start = match.end(0) + head_start_pos
end = head_end_pos
inserted_head = functions.insert_head(entry, start, end)
#entry.append_annotation(start, end, u'head', u'dictinterpretation')
heads.append(inserted_head)
else:
print "no head"
print entry.fullentry.encode('utf-8')
return heads
def annotate_head_without_comma(entry):
# delete head annotations
head_annotations = [ a for a in entry.annotations if a.value=='head' or a.value=="iso-639-3" or a.value=="doculect"]
for a in head_annotations:
Session.delete(a)
head = None
heads = []
sorted_annotations = [ a for a in entry.annotations if a.value=='bold']
sorted_annotations = sorted(sorted_annotations, key=attrgetter('start'))
head_starts, head_ends = get_bold_annotations(entry)
heads = []
for i in range(len(head_starts)):
head_start_pos = head_starts[i]
head_end_pos = head_ends[i]
#head_end_pos = functions.get_last_bold_pos_at_start(entry)
#head_start_pos = 0
if head_end_pos > -1:
inserted_head = functions.insert_head(entry, head_start_pos, head_end_pos)
heads.append(inserted_head)
else:
print "no head"
print entry.fullentry.encode('utf-8')
return heads
def annotate_translations(entry):
# delete translation annotations
trans_annotations = [ a for a in entry.annotations if a.value=='translation']
for a in trans_annotations:
Session.delete(a)
#head_end_pos = functions.get_last_bold_pos_at_start(entry)
head_starts, head_ends = get_bold_annotations(entry)
for i in range(len(head_starts)):
trans_start_pos = head_ends[i]
if len(head_starts) > i+1:
trans_end_pos = head_starts[i+1]
else:
trans_end_pos = len(entry.fullentry)
if trans_start_pos > -1:
substr = entry.fullentry[trans_start_pos:trans_end_pos]
start = trans_start_pos
for match in re.finditer(r'(?:, ?|; ?|\d\) )', substr):
mybreak = False
# are we in a bracket?
for m in re.finditer(r'\(.*?\)', substr):
if match.start(0) >= m.start(0) and match.end(0) <= m.end(0):
mybreak = True
if not mybreak:
end = match.start(0) + trans_start_pos
if end > start and not re.match(r' +$', entry.fullentry[start:end]):
functions.insert_translation(entry, start, end)
start = match.end(0) + trans_start_pos
end = trans_end_pos
if end > start and not re.match(r'^ +$', entry.fullentry[start:end]):
functions.insert_translation(entry, start, end)
def main(argv):
bibtex_key = u"jakway2008"
if len(argv) < 2:
print "call: annotations_for%s.py ini_file" % bibtex_key
exit(1)
ini_file = argv[1]
conf = appconfig('config:' + ini_file, relative_to='.')
if not pylons.test.pylonsapp:
load_environment(conf.global_conf, conf.local_conf)
# Create the tables if they don't already exist
metadata.create_all(bind=Session.bind)
dictdatas = Session.query(model.Dictdata).join(
(model.Book, model.Dictdata.book_id==model.Book.id)
).filter(model.Book.bibtex_key==bibtex_key).all()
for dictdata in dictdatas:
entries = Session.query(model.Entry).filter_by(dictdata_id=dictdata.id).all()
#entries = Session.query(model.Entry).filter_by(dictdata_id=dictdata.id,startpage=109,pos_on_page=18).all()
#entries = []
startletters = set()
for e in entries:
if dictdata.startpage == 129:
heads = annotate_head_without_comma(e)
else:
heads = annotate_head(e)
if not e.is_subentry:
for h in heads:
if len(h) > 0:
startletters.add(h[0].lower())
annotate_translations(e)
dictdata.startletters = unicode(repr(sorted(list(startletters))))
Session.commit()
for e in manual_entries:
dictdata = model.meta.Session.query(model.Dictdata).join(
(model.Book, model.Dictdata.book_id==model.Book.id)
).filter(model.Book.bibtex_key==bibtex_key).filter("startpage<=:pagenr and endpage>=:pagenr").params(pagenr=int(e["startpage"])).first()
entry_db = Session.query(model.Entry).filter_by(dictdata_id=dictdata.id, startpage=e["startpage"], pos_on_page=e["pos_on_page"]).first()
if difflib.SequenceMatcher(None, e["fullentry"].decode('utf-8'), entry_db.fullentry).ratio() > 0.95:
entry_db.fullentry = e["fullentry"].decode('utf-8')
# delete all annotations in db
for a in entry_db.annotations:
Session.delete(a)
# insert new annotations
for a in e["annotations"]:
entry_db.append_annotation(a["start"], a["end"], a["value"].decode('utf-8'), a["type"].decode('utf-8'), a["string"].decode('utf-8'))
else:
print "We have a problem, manual entry on page %i pos %i seems not to be the same entry as in db, it was not inserted to db. Please correct the problem." % (e["startpage"], e["pos_on_page"])
Session.commit()
if __name__ == "__main__":
main(sys.argv) | gpl-3.0 | -4,918,240,981,853,853,000 | 35.488584 | 202 | 0.577851 | false |
heejongahn/hjlog | hjlog/models/post.py | 1 | 1123 | from datetime import datetime
from hjlog import db
# Tag helper table
tags = db.Table(
'tags',
db.Column('tag_id', db.Integer, db.ForeignKey('tag.id')),
db.Column('post_id', db.Integer, db.ForeignKey('post.id')))
class Post(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(120), index=True, unique=True)
body = db.Column(db.Text)
datetime = db.Column(db.DateTime)
category = db.Column(db.String(20))
author_id = db.Column(db.Integer, db.ForeignKey('user.id'))
private = db.Column(db.Boolean)
tags = db.relationship(
'Tag',
secondary=tags,
backref=db.backref('describes', lazy='dynamic'))
photos = db.relationship('Photo', backref='original')
def __init__(self, title, body, category, author, private, tags):
self.title = title
self.body = body
self.category = category
self.author = author
self.tags = tags
self.private = private
self.datetime = datetime.now()
def is_invisible_by(self, user):
return self.private and self.author != user
| mit | -7,618,970,135,233,268,000 | 30.194444 | 69 | 0.631345 | false |
kirillmorozov/youbot_control | scripts/client_gui.py | 1 | 32532 | #!/usr/bin/python2
# -*- coding: utf-8 -*-
"""Client GUI to control youBot robot."""
import Tkinter as tk
import ttk
import tkMessageBox
import rospyoubot
from math import radians, degrees
class MainApplication(ttk.Frame):
u"""Основное окно приложения."""
def __init__(self, parent, *args, **kwargs):
u"""Конструктор основного окна."""
ttk.Frame.__init__(self, parent, *args, **kwargs)
self.grid(sticky='nswe')
self.columnconfigure(0, weight=1)
# self.columnconfigure(1, weight=1)
self.style = ttk.Style()
self.style.theme_use('clam')
self.notebook = ttk.Notebook(self)
self.notebook.grid(column=0, row=0, sticky='nswe')
self.manual_controls = ControlsPage(self.notebook)
self.notebook.add(self.manual_controls,
text='Ручное управление',
sticky='nswe')
self.automatic_controls = AutomaticControls(self.notebook)
self.notebook.add(self.automatic_controls,
text='Автоматическое управление',
sticky='nswe')
class ControlsPage(ttk.Frame):
u"""Вкладка управления."""
def __init__(self, parent):
u"""Конструктор класса."""
ttk.Frame.__init__(self, parent)
self.columnconfigure(0, weight=0)
self.columnconfigure(1, weight=1)
# Arm joints controls
self.joints_controls = JointsControlsFrame(self)
self.joints_controls.grid(column=1, row=0, sticky='nswe')
# Odometry
self.odometry = OdometryFrame(self)
self.odometry.grid(column=1, row=1, sticky='nswe')
# Base controls
self.base_control = BaseControl(self)
self.base_control.grid(column=1, row=2, sticky='nswe')
# Padding
for child in self.winfo_children():
child.grid_configure(padx=5, pady=5)
class OdometryFrame(ttk.LabelFrame):
u"""Фрейм одометрии."""
def __init__(self, parent):
u"""Инициализация класса."""
ttk.LabelFrame.__init__(self, parent, text='Одометрия:')
self.columnconfigure(0, weight=1)
self.columnconfigure(1, weight=1)
self.odom_x = tk.StringVar()
self.odom_x.set('x')
self.odom_y = tk.StringVar()
self.odom_y.set('y')
self.odom_z = tk.StringVar()
self.odom_z.set('z')
ttk.Label(self, text='X:', width=5, anchor=tk.E).grid(column=0, row=0)
ttk.Label(self,
textvariable=ODOMETRY[0],
width=6,
anchor=tk.W).grid(column=1, row=0)
ttk.Label(self, text='Y:', width=5, anchor=tk.E).grid(column=0, row=1)
ttk.Label(self,
textvariable=ODOMETRY[1],
width=6,
anchor=tk.W).grid(column=1, row=1)
ttk.Label(self, text=u'\u03c6:', width=5, anchor=tk.E).grid(column=0,
row=2)
ttk.Label(self,
textvariable=ODOMETRY[2],
width=6,
anchor=tk.W).grid(column=1, row=2)
for child in self.winfo_children():
child.grid_configure(padx=2, pady=2)
class JointsControlsFrame(ttk.LabelFrame):
u"""Фрейм управления степенями подвижности."""
def __init__(self, parent):
u"""Инициализация класса."""
ttk.LabelFrame.__init__(self, parent, text='Управление манипулятором:')
self.columnconfigure(0, weight=1)
self.columnconfigure(1, weight=1)
self.a1_joint = JointControl(self, 1)
self.a1_joint.grid(row=0, columnspan=2, sticky='nswe')
self.a2_joint = JointControl(self, 2)
self.a2_joint.grid(row=1, columnspan=2, sticky='nswe')
self.a3_joint = JointControl(self, 3)
self.a3_joint.grid(row=2, columnspan=2, sticky='nswe')
self.a4_joint = JointControl(self, 4)
self.a4_joint.grid(row=3, columnspan=2, sticky='nswe')
self.a5_joint = JointControl(self, 5)
self.a5_joint.grid(row=4, columnspan=2, sticky='nswe')
self.gripper = GripperControl(self)
self.gripper.grid(row=5, columnspan=2, sticky='nswe')
self.home_button = ttk.Button(self, text='Домой', width=6)
self.home_button.grid(row=6, column=0, sticky='nswe')
self.home_button.bind('<Button-1>', self.go_home)
self.home_button = ttk.Button(self, text='Свеча', width=6)
self.home_button.grid(row=6, column=1, sticky='nswe')
self.home_button.bind('<Button-1>', self.go_candle)
for child in self.winfo_children():
child.grid_configure(padx=2, pady=2)
def go_home(self, *args):
u"""Отправляет манипулятор в домашнюю позицию."""
R1.arm.set_joints_angles(0.016,
0.04,
-0.072,
0.0432,
2.839)
def go_candle(self, *args):
u"""Приводит манипулятор в положение свечки."""
R1.arm.set_joints_angles(2.9400474018133402,
1.1251030074812907,
-2.5235000069592695,
1.769468876296561,
2.838871440356912)
class JointControl(ttk.Frame):
u"""Фрейм управления отдельной степенью."""
def __init__(self, parent, joint):
u"""Инициализация класса."""
ttk.Frame.__init__(self, parent)
self.columnconfigure(0, weight=1)
self.columnconfigure(1, weight=1)
self.columnconfigure(2, weight=1)
self.columnconfigure(3, weight=1)
self.joint = joint
self.label = 'A{}:'.format(joint)
self.angle = tk.StringVar()
ttk.Label(self, text=self.label, width=6, anchor='e').grid(column=0,
row=0,
sticky=tk.E)
self.minus_button = ttk.Button(self, text='-', width=7)
self.minus_button.grid(column=1, row=0)
self.minus_button.bind('<Button-1>', self.minus_button_press)
self.minus_button.bind('<ButtonRelease-1>', key_released)
self.state_label = ttk.Label(self,
textvariable=ARM_JOINTS_ANGLES[joint-1],
width=5,
anchor=tk.CENTER)
self.state_label.grid(column=2, row=0, sticky='nswe')
self.plus_button = ttk.Button(self, text='+', width=7)
self.plus_button.grid(column=3, row=0)
self.plus_button.bind('<Button-1>', self.plus_button_press)
self.plus_button.bind('<ButtonRelease-1>', key_released)
def plus_button_press(self, *args):
u"""Задаёт скорость оси, при нажатии на кнопку '+'."""
vel = ARM_VELOCITY
arm_velocities = [vel if x == self.joint - 1 else 0 for x in range(5)]
R1.arm.set_joints_velocities(*arm_velocities)
def minus_button_press(self, *args):
u"""Задаёт скорость оси, при нажатии на кнопку '-'."""
vel = -1 * ARM_VELOCITY
arm_velocities = [vel if x == self.joint - 1 else 0 for x in range(5)]
R1.arm.set_joints_velocities(*arm_velocities)
class BaseControl(ttk.LabelFrame):
u"""Фрейм управления движением базы."""
def __init__(self, parent):
u"""Инициализация класса."""
ttk.LabelFrame.__init__(self, parent, text='Управление платформой:')
self.columnconfigure(0, weight=1)
self.columnconfigure(1, weight=1)
self.columnconfigure(2, weight=1)
controls_style = ttk.Style()
controls_style.configure('base.TButton', font=('TkDefaultFont', 20))
# Rotate left
self.rl_button = ttk.Button(self,
text=u'\u21b6',
width=2,
style='base.TButton')
self.rl_button.grid(column=0, row=0, sticky=tk.SE)
self.rl_button.bind('<Button-1>', self.rl_button_press)
self.rl_button.bind('<ButtonRelease-1>', key_released)
# Forward
self.f_button = ttk.Button(self,
text=u'\u2191',
width=2,
style='base.TButton')
self.f_button.grid(column=1, row=0, sticky=tk.S)
self.f_button.bind('<Button-1>', self.f_button_press)
self.f_button.bind('<ButtonRelease-1>', key_released)
# Rotate right
self.rr_button = ttk.Button(self,
text=u'\u21b7',
width=2,
style='base.TButton')
self.rr_button.grid(column=2, row=0, sticky=tk.SW)
self.rr_button.bind('<Button-1>', self.rr_button_press)
self.rr_button.bind('<ButtonRelease-1>', key_released)
# Left
self.l_button = ttk.Button(self,
text=u'\u2190',
width=2,
style='base.TButton')
self.l_button.grid(column=0, row=1, sticky=tk.NE)
self.l_button.bind('<Button-1>', self.l_button_press)
self.l_button.bind('<ButtonRelease-1>', key_released)
# Backwards
self.b_button = ttk.Button(self,
text=u'\u2193',
width=2,
style='base.TButton')
self.b_button.grid(column=1, row=1, sticky=tk.N)
self.b_button.bind('<Button-1>', self.b_button_press)
self.b_button.bind('<ButtonRelease-1>', key_released)
# Right
self.r_button = ttk.Button(self,
text=u'\u2192',
width=2,
style='base.TButton')
self.r_button.grid(column=2, row=1, sticky=tk.NW)
self.r_button.bind('<Button-1>', self.r_button_press)
self.r_button.bind('<ButtonRelease-1>', key_released)
for child in self.winfo_children():
child.grid_configure(padx=2, pady=2)
def rl_button_press(self, *args):
u"""Обрабатыевает нажатие на кнопку RL."""
R1.base.set_velocity(ang_z=BASE_VELOCITY)
def f_button_press(self, *args):
u"""Обрабатыевает нажатие на кнопку F."""
R1.base.set_velocity(lin_x=BASE_VELOCITY)
def l_button_press(self, *args):
u"""Обрабатыевает нажатие на кнопку L."""
R1.base.set_velocity(lin_y=BASE_VELOCITY)
def r_button_press(self, *args):
u"""Обрабатыевает нажатие на кнопку R."""
R1.base.set_velocity(lin_y=-BASE_VELOCITY)
def b_button_press(self, *args):
u"""Обрабатыевает нажатие на кнопку B."""
R1.base.set_velocity(lin_x=-BASE_VELOCITY)
def rr_button_press(self, *args):
u"""Обрабатыевает нажатие на кнопку RR."""
R1.base.set_velocity(ang_z=-BASE_VELOCITY)
class GripperControl(ttk.Frame):
u"""Фрейм управления гриппером."""
def __init__(self, parent):
u"""Инициализация класса."""
ttk.Frame.__init__(self, parent)
self.columnconfigure(0, weight=1)
self.columnconfigure(1, weight=1)
self.columnconfigure(2, weight=1)
self.columnconfigure(3, weight=1)
self.gripper_state = tk.StringVar()
ttk.Label(self,
text='Схват:',
width=6,
anchor='e').grid(column=0,
row=0,
sticky='e')
self.close_button = ttk.Button(self, text='Закрыть', width=7)
self.close_button.grid(column=1, row=0)
self.close_button.bind('<Button-1>', self.close_gripper)
ttk.Label(self,
textvariable=self.gripper_state,
anchor=tk.CENTER,
width=5).grid(column=2, row=0, sticky=(tk.W, tk.E))
self.open_button = ttk.Button(self, text='Открыть', width=7)
self.open_button.grid(column=3, row=0)
self.open_button.bind('<Button-1>', self.open_gripper)
def close_gripper(self, *args):
u"""Закрывает гриппер и записывает 'Closed' в его статус."""
self.gripper_state.set('Закрыт')
R1.arm.gripper.set_gripper_state(False)
def open_gripper(self, *args):
u"""Открывает гриппер и записывает 'Opened' в его статус."""
self.gripper_state.set('Открыт')
R1.arm.gripper.set_gripper_state(True)
class AutomaticControls(ttk.Frame):
u"""Фрейм автоматического управления."""
def __init__(self, parent):
u"""Инициализация класса."""
ttk.Frame.__init__(self, parent)
self.columnconfigure(0, weight=1)
self.columnconfigure(1, weight=1)
self.pt_list = tk.StringVar()
# Points Listbox
self.points_list = tk.Listbox(self,
height=29,
selectmode='browse',
listvariable=self.pt_list)
self.points_list.grid(column=0,
row=0,
sticky='nswe',
rowspan=2,
columnspan=2)
# Buttons frame
self.buttons_frame = ttk.Frame(self)
self.buttons_frame.grid(column=2, row=0, sticky='n')
# Add base button
self.add_base_button = ttk.Button(self.buttons_frame,
text=u'Платформа',
width=9)
self.add_base_button.grid(column=0, row=0, columnspan=2)
self.add_base_button.bind('<Button-1>', self.add_to_list)
# Add arm button
self.add_arm_button = ttk.Button(self.buttons_frame,
text=u'Манипулятор',
width=9)
self.add_arm_button.grid(column=0, row=1, columnspan=2)
self.add_arm_button.bind('<Button-1>', self.add_arm_point)
# Edit button
# ttk.Button(self.buttons_frame,
# text=u'Редактировать',
# width=9).grid(column=0, row=1)
# Remove button
self.grip_open_button = ttk.Button(self.buttons_frame,
text=u'Откр',
width=3)
self.grip_open_button.grid(column=0, row=2)
self.grip_open_button.bind('<Button-1>', self.open_gripper)
self.grip_close_button = ttk.Button(self.buttons_frame,
text=u'Закр',
width=3)
self.grip_close_button.grid(column=1, row=2)
self.grip_close_button.bind('<Button-1>', self.close_gripper)
self.remove_button = ttk.Button(self.buttons_frame,
text=u'Удалить',
width=9)
self.remove_button.grid(column=0, row=3, columnspan=2)
self.remove_button.bind('<Button-1>', self.remove_point)
# Start button
ttk.Button(self.buttons_frame,
text=u'Старт',
width=9,
command=self.start).grid(column=0, row=4, columnspan=2)
# Stop button
ttk.Button(self.buttons_frame,
text=u'Стоп',
width=9).grid(column=0, row=5, columnspan=2)
# Up button
ttk.Button(self, text=u'Вверх', command=self.moveup).grid(column=0, row=2)
# Down button
ttk.Button(self, text=u'Вниз', command=self.movedown).grid(column=1, row=2)
for child in self.winfo_children():
child.grid_configure(padx=5, pady=5)
for child in self.buttons_frame.winfo_children():
child.grid_configure(padx=5, pady=5)
def add_to_list(self, event):
u"""Добавляет движение в список движений."""
BaseMotionAddition(self)
def add_arm_point(self, event):
u"""Span window to add Arm point."""
ArmMotionAddition(self)
def remove_point(self, event):
u"""Удаляет выбранное движение из списка."""
if len(self.points_list.curselection()) > 0:
index = int(self.points_list.curselection()[0])
points = listbox_to_list(self.pt_list.get())
POINTS_DICT.pop(points.pop(index))
listbox_string = ' '.join(points)
self.pt_list.set(listbox_string)
def start(self):
u"""Запускает выполнение программы движения робота."""
for name in listbox_to_list(self.pt_list.get()):
if name.startswith('Base'):
R1.base.set_velocity(0, 0, 0)
R1.base.lin(*POINTS_DICT[name])
R1.base.set_velocity(0, 0, 0)
elif name.startswith('Arm'):
R1.arm.ptp(POINTS_DICT[name])
def moveup(self):
index = int(self.points_list.curselection()[0])
points = listbox_to_list(self.pt_list.get())
if index >= 1:
item = points.pop(index)
points.insert(index-1, item)
listbox_string = ' '.join(points)
self.pt_list.set(listbox_string)
def movedown(self):
index = int(self.points_list.curselection()[0])
points = listbox_to_list(self.pt_list.get())
if index <= len(points)-1:
item = points.pop(index)
points.insert(index+1, item)
listbox_string = ' '.join(points)
self.pt_list.set(listbox_string)
def close_gripper(self, *args):
pass
def open_gripper(self, *args):
pass
class BaseMotionAddition(tk.Toplevel):
u"""Окно добавления движения."""
def __init__(self, parent):
u"""Инициализация класса."""
tk.Toplevel.__init__(self, parent)
self.parent = parent
self.title(u'Движение платформы')
self.resizable(0, 0)
self.frm = ttk.Frame(self)
self.frm.grid(column=0, row=0, sticky='nswe')
ttk.Label(self.frm,
text=u'Имя точки:').grid(column=0, row=0, sticky='e')
# Point's name
self.point_name = tk.StringVar()
ttk.Entry(self.frm,
textvariable=self.point_name).grid(column=1,
row=0,
sticky='w')
# X coordinate
ttk.Label(self.frm,
text=u'X:',
width=3).grid(column=3, row=0, sticky='e')
self.X = ttk.Entry(self.frm)
self.X.grid(column=4, row=0, sticky='w')
# Y coordinate
ttk.Label(self.frm,
text=u'Y:',
width=3).grid(column=3, row=1, sticky='e')
self.Y = ttk.Entry(self.frm)
self.Y.grid(column=4, row=1, sticky='w')
# Orientation
ttk.Label(self.frm,
text=u'\u03c6:',
width=3).grid(column=3, row=2, sticky='e')
self.Phi = ttk.Entry(self.frm)
self.Phi.grid(column=4, row=2, sticky='w')
# Touch Up! button
ttk.Button(self.frm,
text='Touch Up',
command=self.touch_up).grid(column=4, row=3)
# Save button
save_button = ttk.Button(self.frm, text=u'Сохранить', command=self.save)
save_button.grid(row=3, column=0)
# Cancel button
cancel_button = ttk.Button(self.frm,
text=u'Отмена',
command=self.cancel)
cancel_button.grid(row=3, column=1)
for child in self.frm.winfo_children():
child.grid_configure(padx=5, pady=5)
def cancel(self):
u"""Закрывает окно, не сохраняя результат."""
self.destroy()
def save(self):
u"""Сохраняет точку в список точек."""
points_list = listbox_to_list(self.parent.pt_list.get())
name = 'Base:{}'.format(self.point_name.get())
x = self.X.get()
y = self.Y.get()
phi = self.Phi.get()
if self.input_is_valid(name, x, y, phi):
POINTS_DICT[name] = (float(x), float(y), radians(float(phi)))
points_list.append(name)
listbox_string = ' '.join(points_list)
self.parent.pt_list.set(listbox_string)
self.destroy()
else:
tkMessageBox.showerror(u"Ошибка добавления точки.",
u"Проверьте поля ввода.")
def touch_up(self):
u"""Записывает текущие координаты базы в поля ввода координат."""
odometry = R1.base.get_odometry()
self.X.insert(0, odometry[0])
self.Y.insert(0, odometry[1])
self.Phi.insert(0, degrees(odometry[2]))
def input_is_valid(self, name, x, y, phi):
u"""Check input data for validity."""
name_ok = name not in POINTS_DICT.keys()
x_ok = isfloat(self.X.get())
y_ok = isfloat(self.Y.get())
phi_ok = isfloat(self.Phi.get())
if name_ok and x_ok and y_ok and phi_ok:
return True
else:
return False
class ArmMotionAddition(tk.Toplevel):
u"""Window that add arm motion to points lists."""
def __init__(self, parent):
u"""Class constructor."""
tk.Toplevel.__init__(self, parent)
self.parent = parent
self.title(u'Движение манипулятора')
self.resizable(0, 0)
frame = ttk.Frame(self)
frame.grid(row=0, column=0, sticky='nswe')
# Coordinates
coordinates = ttk.LabelFrame(frame,
text=u"Введите координаты и углы ориентации")
coordinates.grid(row=0, column=0, columnspan=3, sticky='nswe')
# X
self.X = tk.StringVar()
ttk.Label(coordinates, text=u"X:").grid(row=0, column=0)
x_input = ttk.Entry(coordinates, textvariable=self.X)
x_input.grid(row=0, column=1)
# Y
self.Y = tk.StringVar()
ttk.Label(coordinates, text=u"Y:").grid(row=1, column=0)
y_input = ttk.Entry(coordinates, textvariable=self.Y)
y_input.grid(row=1, column=1)
# Z
self.Z = tk.StringVar()
ttk.Label(coordinates, text=u"Z:").grid(row=2, column=0)
z_input = ttk.Entry(coordinates, textvariable=self.Z)
z_input.grid(row=2, column=1)
# W
self.W = tk.StringVar()
ttk.Label(coordinates, text=u"W:").grid(row=0, column=2)
w_input = ttk.Entry(coordinates, textvariable=self.W)
w_input.grid(row=0, column=3)
# O
self.O = tk.StringVar()
ttk.Label(coordinates, text=u"O:").grid(row=1, column=2)
o_input = ttk.Entry(coordinates, textvariable=self.O)
o_input.grid(row=1, column=3)
# Name
self.point_name = tk.StringVar()
ttk.Label(coordinates, text=u"Имя:").grid(row=2, column=2)
name_input = ttk.Entry(coordinates, textvariable=self.point_name)
name_input.grid(row=2, column=3)
# Configuration
configuration = ttk.LabelFrame(frame, text=u"Выберите конфигурацию")
configuration.grid(row=1, column=0, columnspan=3, sticky='nswe')
self.elbow = tk.IntVar()
self.oriset = tk.IntVar()
ttk.Radiobutton(configuration,
text=u"Локоть вверх",
variable=self.elbow,
value=0).grid(row=0, column=0)
ttk.Radiobutton(configuration,
text=u"Локоть вниз",
variable=self.elbow,
value=1).grid(row=1, column=0)
ttk.Radiobutton(configuration,
text=u"Прямое плечо",
variable=self.oriset,
value=0).grid(row=0, column=1)
ttk.Radiobutton(configuration,
text=u"Обратное плечо",
variable=self.oriset,
value=1).grid(row=1, column=1)
ttk.Button(configuration,
text="Текущие координаты",
command=self.touch_up).grid(row=0,
column=2)
ttk.Button(frame,
text="Move Arm",
command=self.move_arm).grid(row=2,
column=1)
# Save
ttk.Button(frame,
text=u"Сохранить",
command=self.save).grid(row=2,
column=2)
# Cancel
ttk.Button(frame,
text=u"Отмена",
command=self.cancel).grid(row=2,
column=0)
for child in self.winfo_children():
child.grid_configure(padx=5, pady=5)
for child in frame.winfo_children():
child.grid_configure(padx=5, pady=5)
for child in coordinates.winfo_children():
child.grid_configure(padx=5, pady=5)
for child in configuration.winfo_children():
child.grid_configure(padx=5, pady=5)
for child in gripper.winfo_children():
child.grid_configure(padx=5, pady=5)
def save(self):
u"""Save arm position to points list."""
if self.input_is_valid():
points_list = listbox_to_list(self.parent.pt_list.get())
name = 'Arm:{}'.format(self.point_name.get())
x = self.X.get()
y = self.Y.get()
z = self.Z.get()
w = self.W.get()
ori = self.oriset.get()
elbow = self.elbow.get()
try:
point = rospyoubot._joints_angles_for_pose(x, y, z, w, ori,
elbow)
POINTS_DICT[name] = point
points_list.append(name)
listbox_string = ' '.join(points_list)
self.parent.pt_list.set(listbox_string)
self.destroy()
except ValueError:
tkMessageBox.showerror(u"Ошибка добавления точки.",
u"Точка недостижима")
else:
tkMessageBox.showerror(u"Ошибка добавления точки.",
u"Проверьте поля ввода.")
def move_arm(self):
u"""Move arm to entered coordinates."""
pass
def touch_up(self):
u"""Save current joints angles as point."""
joints = R1.arm.get_current_joints_positions()
x, y, z, w, o = rospyoubot._joints_positions_to_cartesian(self.oriset.get(),
*joints)
self.X.set(x)
self.Y.set(y)
self.Z.set(z)
self.W.set(w)
self.O.set(o)
def cancel(self):
u"""Закрывает окно, не сохраняя результат."""
self.destroy()
def input_is_valid(self):
u"""Check if all inputs are valid."""
x_ok = isfloat(self.X.get())
y_ok = isfloat(self.Y.get())
z_ok = isfloat(self.Z.get())
w_ok = isfloat(self.W.get())
o_ok = isfloat(self.O.get())
name = 'Arm:' + self.point_name.get()
name_not_empty = self.point_name.get() != ''
name_ok = name not in POINTS_DICT.keys()
everything_ok = (x_ok and y_ok and z_ok and w_ok and o_ok and name_ok
and name_not_empty)
if everything_ok:
return True
else:
return False
def key_pressed(event):
u"""Обрабатывает нажатие на кнопку клавиатуры."""
# Base movement
if event.char == 'i':
R1.base.set_velocity(lin_x=BASE_VELOCITY)
elif event.char == 'k':
R1.base.set_velocity(lin_x=-BASE_VELOCITY)
elif event.char == 'j':
R1.base.set_velocity(lin_y=BASE_VELOCITY)
elif event.char == 'l':
R1.base.set_velocity(lin_y=-BASE_VELOCITY)
elif event.char == 'u':
R1.base.set_velocity(ang_z=BASE_VELOCITY)
elif event.char == 'o':
R1.base.set_velocity(ang_z=-BASE_VELOCITY)
# Arm movement
if event.char == 'q':
R1.arm.set_joints_velocities(1, 0, 0, 0, 0)
elif event.char == 'Q':
R1.arm.set_joints_velocities(-1, 0, 0, 0, 0)
if event.char == 'w':
R1.arm.set_joints_velocities(0, 1, 0, 0, 0)
elif event.char == 'W':
R1.arm.set_joints_velocities(0, -1, 0, 0, 0)
if event.char == 'e':
R1.arm.set_joints_velocities(0, 0, 1, 0, 0)
elif event.char == 'E':
R1.arm.set_joints_velocities(0, 0, -1, 0, 0)
if event.char == 'r':
R1.arm.set_joints_velocities(0, 0, 0, 1, 0)
elif event.char == 'R':
R1.arm.set_joints_velocities(0, 0, 0, -1, 0)
if event.char == 't':
R1.arm.set_joints_velocities(0, 0, 0, 0, 1)
elif event.char == 'T':
R1.arm.set_joints_velocities(0, 0, 0, 0, -1)
if event.char == 'G':
R1.arm.gripper.set_gripper_state(True)
if event.char == 'g':
R1.arm.gripper.set_gripper_state(False)
def key_released(event):
u"""Обрабатывает отпускание кнопки клавиатуры."""
R1.base.set_velocity()
R1.arm.set_joints_velocities(0, 0, 0, 0, 0)
def update_joints_labels():
u"""бновляет данные о текущем угле поворота осей и одометрии базы."""
current_joints_positions = list(R1.arm.get_current_joints_positions())
odom = R1.base.get_odometry()
for index, value in enumerate(odom):
ODOMETRY[index].set(round(value, 3))
for index, value in enumerate(current_joints_positions):
ARM_JOINTS_ANGLES[index].set(round(degrees(value), 3))
ROOT.after(100, update_joints_labels)
def listbox_to_list(listbox_str):
u"""Convert listbox string into list."""
string = listbox_str[1:-1]
list_from_string = string.split()
striped_list = [item.strip(",'") for item in list_from_string]
return striped_list
def isfloat(string):
u"""Return True if string can be converted into float."""
try:
float(string)
return True
except ValueError:
return False
if __name__ == '__main__':
ROOT = tk.Tk()
ROOT.title("youBot control")
ROOT.resizable(1, 0)
ROOT.columnconfigure(0, weight=1)
BASE_VELOCITY = 0.2
ARM_VELOCITY = 1
R1 = rospyoubot.YouBot()
ARM_JOINTS_ANGLES = [tk.StringVar() for _ in range(5)]
ODOMETRY = [tk.StringVar() for _ in range(3)]
POINTS_DICT = {}
MAINFRAME = MainApplication(ROOT)
ROOT.update()
ROOT.minsize(ROOT.winfo_width(), ROOT.winfo_height())
ROOT.bind('<Key>', key_pressed)
ROOT.bind('<KeyRelease>', key_released)
ROOT.after(100, update_joints_labels)
ROOT.mainloop()
| bsd-2-clause | 1,213,052,310,638,447,400 | 38.376276 | 84 | 0.535551 | false |
jwodder/inplace | test/test_symlinks.py | 1 | 2816 | from operator import attrgetter
import os
from os.path import relpath
import platform
import pytest
from in_place import InPlace
from test_in_place_util import TEXT
pytestmark = pytest.mark.xfail(
platform.system() == "Windows" and platform.python_implementation() == "PyPy",
reason="Symlinks are not implemented on PyPy on Windows as of v7.3.3",
)
def test_symlink_nobackup(tmp_path):
assert list(tmp_path.iterdir()) == []
realdir = tmp_path / "real"
realdir.mkdir()
real = realdir / "realfile.txt"
real.write_text(TEXT)
linkdir = tmp_path / "link"
linkdir.mkdir()
link = linkdir / "linkfile.txt"
target = relpath(real, linkdir)
link.symlink_to(target)
with InPlace(str(link)) as fp:
for line in fp:
fp.write(line.swapcase())
assert list(realdir.iterdir()) == [real]
assert list(linkdir.iterdir()) == [link]
assert link.is_symlink()
assert os.readlink(str(link)) == target
assert link.read_text() == TEXT.swapcase()
assert real.read_text() == TEXT.swapcase()
def test_symlink_backup_ext(tmp_path):
assert list(tmp_path.iterdir()) == []
realdir = tmp_path / "real"
realdir.mkdir()
real = realdir / "realfile.txt"
real.write_text(TEXT)
linkdir = tmp_path / "link"
linkdir.mkdir()
link = linkdir / "linkfile.txt"
target = relpath(real, linkdir)
link.symlink_to(target)
with InPlace(str(link), backup_ext="~") as fp:
for line in fp:
fp.write(line.swapcase())
assert list(realdir.iterdir()) == [real]
assert sorted(linkdir.iterdir(), key=attrgetter("name")) == [
link,
link.with_suffix(".txt~"),
]
assert link.is_symlink()
assert os.readlink(str(link)) == target
assert link.with_suffix(".txt~").read_text() == TEXT
assert link.read_text() == TEXT.swapcase()
assert real.read_text() == TEXT.swapcase()
def test_symlink_backup(tmp_path):
assert list(tmp_path.iterdir()) == []
realdir = tmp_path / "real"
realdir.mkdir()
real = realdir / "realfile.txt"
real.write_text(TEXT)
linkdir = tmp_path / "link"
linkdir.mkdir()
link = linkdir / "linkfile.txt"
target = relpath(real, linkdir)
link.symlink_to(target)
bkp = tmp_path / "backup.txt"
with InPlace(str(link), backup=str(bkp)) as fp:
for line in fp:
fp.write(line.swapcase())
assert sorted(tmp_path.iterdir(), key=attrgetter("name")) == [
bkp,
linkdir,
realdir,
]
assert list(realdir.iterdir()) == [real]
assert list(linkdir.iterdir()) == [link]
assert link.is_symlink()
assert os.readlink(str(link)) == target
assert bkp.read_text() == TEXT
assert link.read_text() == TEXT.swapcase()
assert real.read_text() == TEXT.swapcase()
| mit | 1,346,868,938,396,955,400 | 30.640449 | 82 | 0.626065 | false |
adrienpacifico/openfisca-france | openfisca_france/model/prelevements_obligatoires/prelevements_sociaux/cotisations_sociales/exonerations.py | 1 | 24644 | # -*- coding: utf-8 -*-
from __future__ import division
from numpy import datetime64, maximum as max_, minimum as min_, round as round_, timedelta64
from ....base import * # noqa analysis:ignore
from .base import apply_bareme_for_relevant_type_sal
class jei_date_demande(Variable):
column = DateCol(default = date(2099, 12, 31))
entity_class = Individus
label = u"Date de demande (et d'octroi) du statut de jeune entreprise innovante (JEI)"
class exoneration_cotisations_employeur_geographiques(Variable):
column = FloatCol
entity_class = Individus
label = u"Exonérations de cotisations employeur dépendant d'une zone géographique"
url = "https://www.apce.com/pid815/aides-au-recrutement.html?espace=1&tp=1"
def function(self, simulation, period):
exoneration_cotisations_employeur_zfu = simulation.calculate_add('exoneration_cotisations_employeur_zfu',
period)
exoneration_cotisations_employeur_zrd = simulation.calculate_add('exoneration_cotisations_employeur_zrd',
period)
exoneration_cotisations_employeur_zrr = simulation.calculate_add('exoneration_cotisations_employeur_zrr',
period)
exonerations_geographiques = (exoneration_cotisations_employeur_zfu + exoneration_cotisations_employeur_zrd +
exoneration_cotisations_employeur_zrr)
return period, exonerations_geographiques
class exoneration_cotisations_employeur_jei(Variable):
column = FloatCol
entity_class = Individus
label = u"Exonrérations de cotisations employeur pour une jeune entreprise innovante"
url = "http://www.apce.com/pid1653/jeune-entreprise-innovante.html?pid=1653&pagination=2"
def function(self, simulation, period):
period = period.this_month
assiette_allegement = simulation.calculate('assiette_allegement', period)
jei_date_demande = simulation.calculate('jei_date_demande', period)
jeune_entreprise_innovante = simulation.calculate('jeune_entreprise_innovante', period)
plafond_securite_sociale = simulation.calculate('plafond_securite_sociale', period)
smic_proratise = simulation.calculate('smic_proratise', period)
type_sal = simulation.calculate('type_sal', period)
bareme_by_type_sal_name = simulation.legislation_at(period.start).cotsoc.cotisations_employeur
bareme_names = ['vieillesse_deplafonnee', 'vieillesse_plafonnee', 'maladie', 'famille']
exoneration = smic_proratise * 0.0
for bareme_name in bareme_names:
exoneration += apply_bareme_for_relevant_type_sal(
bareme_by_type_sal_name = bareme_by_type_sal_name,
bareme_name = bareme_name,
type_sal = type_sal,
base = min_(assiette_allegement, 4.5 * smic_proratise),
plafond_securite_sociale = plafond_securite_sociale,
round_base_decimals = 2,
)
exoneration_relative_year_passed = exoneration_relative_year(period, jei_date_demande)
rate_by_year_passed = {
0: 1,
1: 1,
2: 1,
3: 1,
4: 1,
5: 1,
6: 1,
7: 1,
} # TODO: move to legislation parameters file
for year_passed, rate in rate_by_year_passed.iteritems():
if (exoneration_relative_year_passed == year_passed).any():
exoneration[exoneration_relative_year_passed == year_passed] = rate * exoneration
return period, - exoneration * jeune_entreprise_innovante
class exoneration_cotisations_employeur_zfu(Variable):
column = FloatCol
entity_class = Individus
label = u"Exonrérations de cotisations employeur pour l'embauche en zone franche urbaine (ZFU)"
url = "http://www.apce.com/pid553/exoneration-dans-les-zfu.html?espace=1&tp=1&pagination=2"
# TODO
# Ce dispositif d'exonération sociale est fermé depuis le 1er janvier 2015 mais reste applicable aux entreprises qui
# en bénéficiaient avant cette date.
# - ne pas être détenues à plus de 25 % par des entreprises employant plus de 250 salariés et dont le chiffre d'affaires
# ou dont le bilan excède 50 M€ ou 43 M€,
# - disposer des éléments d'exploitation ou des stocks nécessaires à l'activité des salariés,
# - être à jour de ses cotisations sociales ou avoir souscrit à un engagement d'apurement progressif de ses dettes.
#
# Secteurs d'activité concernés
#
# L'exonération est applicable, quel que soit le secteur d'activité.
# Toutefois, les entreprises exerçant une activité principale dans les secteurs de la construction automobile,
# construction navale, fabrication de fibres textiles artificielles ou synthétiques, sidérurgie ou des transports
# routiers de marchandises, ne pourront pas bénéficier de cette exonération.
# Embauche de résidents (clause d'embauche locale)
# Pour les entreprises qui se créent ou s'implantent dans une ZFU à compter du 1er janvier 2012, le bénéfice de
# l'exonération des cotisations sociales est subordonnée lors de toute nouvelle embauche à la condition que la moitié
# de salariés embauchés ou employés résident en ZFU ou en zone urbaine sensible.
#
# Le respect de la condition d'embauche locale est apprécié à la date d'effet de la nouvelle embauche dès la deuxième
# embauche.
#
# Précision : les salariés employés sont ceux déjà présents dans l'entreprise à la date de la nouvelle embauche, les
# salariés embauchés sont ceux recrutés depuis la date de création ou d'implantation de l'entreprise en ZFU.
#
# Est considéré comme résident le salarié habitant soit dans la ZFU d'implantation, soit dans l'une des ZUS de l'unité
# urbaine où se trouve la ZFU. Le maire peut, à la demande de l'employeur, fournir des éléments d'informations relatifs
# à la qualité de résident dans la zone afin de déterminer si la proportion exigée est respectée.
#
# Si la proportion n'est pas respectée à la date d'effet de l'embauche, l'employeur dispose d'un délai de 3 mois pour
# régulariser la situation. A défaut, le bénéfice de l'exonération est suspendu du 1er jour du mois suivant
# l'expiration du délai de 3 mois, jusqu'au 1er jour du mois suivant la date où la condition est de nouveau remplie.
#
# Le salarié résident doit être titulaire d'un contrat à durée indéterminée ou d'un contrat à durée déterminée d'au
# moins 12 mois, conclu pour une durée minimale de 16 heures par semaine.
# 5 ans +
# Dans les entreprises de 5 salariés et plus, les cotisations employeur bénéficient d'un abattement sur la base
# imposable pendant 3 ans de :
# - 60 % la première année,
# - 40 % la seconde année,
# - 20 % la troisième année.
#
# Dans les entreprises de moins de 5 salariés, un abattement est appliqué sur 9 ans de la manière suivante :
# - 60 % les 5 premières années,
# - 40 % les 2 années suivantes,
# - 20 % les deux dernières années.
#
# Le cumul de l'ensemble des aides publiques de minimis (allégements fiscaux, sociaux et aides des collectivités
# territoriales) ne peut dépasser le plafond des aides de minimis, fixé à 200 000 euros sur une période glissante de 36
# mois (100 000 euros pour les entreprises de transport routier).
def function(self, simulation, period):
period = period.this_month
assiette_allegement = simulation.calculate('assiette_allegement', period)
contrat_de_travail_duree = simulation.calculate('contrat_de_travail_duree', period) # 0: CDI, 1:CDD
contrat_de_travail_debut = simulation.calculate('contrat_de_travail_debut', period)
contrat_de_travail_fin = simulation.calculate('contrat_de_travail_fin', period)
effectif_entreprise = simulation.calculate('effectif_entreprise', period)
entreprise_chiffre_affaire = simulation.calculate('entreprise_chiffre_affaire', period)
entreprise_bilan = simulation.calculate('entreprise_bilan', period)
smic_proratise = simulation.calculate('smic_proratise', period)
taux_versement_transport = simulation.calculate('taux_versement_transport', period)
# TODO: move to legislation parameters file
entreprise_eligible = (entreprise_chiffre_affaire <= 1e7) | (entreprise_bilan <= 1e7)
smic_proratise = simulation.calculate('smic_proratise', period)
zone_franche_urbaine = simulation.calculate('zone_franche_urbaine', period)
duree_cdd_eligible = (contrat_de_travail_fin > contrat_de_travail_debut + timedelta64(365, 'D'))
# TODO: move to legislation parameters file
contrat_de_travail_eligible = (contrat_de_travail_debut <= datetime64("2014-12-31")) * (
(contrat_de_travail_duree == 0) + (
(contrat_de_travail_duree == 1) * (duree_cdd_eligible)
)
)
# TODO: move to legislation parameters file
eligible = (
contrat_de_travail_eligible *
(effectif_entreprise <= 50) *
zone_franche_urbaine *
entreprise_eligible
)
bareme_by_name = simulation.legislation_at(period.start).cotsoc.cotisations_employeur['prive_non_cadre']
taux_max = (
bareme_by_name['vieillesse_deplafonnee'].rates[0] +
bareme_by_name['vieillesse_plafonnee'].rates[0] +
bareme_by_name['maladie'].rates[0] +
bareme_by_name['famille'].rates[0] +
bareme_by_name['fnal1'].rates[0] +
bareme_by_name['fnal2'].rates[0] * (effectif_entreprise >= 20) +
taux_versement_transport
)
# TODO: move to legislation parameters file : voir http://www.urssaf.fr/images/ref_lc2009-077.pdf
seuil_max = 2
seuil_min = 1.4
taux_exoneration = compute_taux_exoneration(assiette_allegement, smic_proratise, taux_max, seuil_max, seuil_min)
exoneration_relative_year_passed = exoneration_relative_year(period, contrat_de_travail_debut)
large_rate_by_year_passed = {
0: 1,
1: 1,
2: 1,
3: 1,
4: 1,
5: .60,
6: .40,
7: .20,
} # TODO: move to legislation parameters file
small_rate_by_year_passed = {
0: 1,
1: 1,
2: 1,
3: 1,
4: 1,
5: .60,
6: .60,
7: .60,
8: .60,
9: .60,
10: .40,
11: .40,
12: .20,
13: .20,
} # TODO: move to legislation parameters file
large_taux_exoneration = eligible * 0.0
small_taux_exoneration = eligible * 0.0
for year_passed, rate in large_rate_by_year_passed.iteritems():
if (exoneration_relative_year_passed == year_passed).any():
large_taux_exoneration[exoneration_relative_year_passed == year_passed] = rate * taux_exoneration
for year_passed, rate in small_rate_by_year_passed.iteritems():
if (exoneration_relative_year_passed == year_passed).any():
small_taux_exoneration[exoneration_relative_year_passed == year_passed] = rate * taux_exoneration
exoneration_cotisations_zfu = eligible * assiette_allegement * (
small_taux_exoneration * (effectif_entreprise <= 5) +
large_taux_exoneration * (effectif_entreprise > 5)
)
return period, exoneration_cotisations_zfu
# TODO: propager dans le temps
class exoneration_cotisations_employeur_zrd(Variable):
column = FloatCol
entity_class = Individus
label = u"Exonrérations de cotisations employeur pour l'embauche en zone de restructuration de la Défense (ZRD)"
url = "http://www.apce.com/pid11668/exoneration-dans-les-zrd.html?espace=1&tp=1"
def function(self, simulation, period):
period = period.this_month
assiette_allegement = simulation.calculate('assiette_allegement', period)
entreprise_creation = simulation.calculate('entreprise_creation', period)
smic_proratise = simulation.calculate('smic_proratise', period)
zone_restructuration_defense = simulation.calculate('zone_restructuration_defense', period)
eligible = zone_restructuration_defense
taux_max = .281 # TODO: move to legislation parameters file
seuil_max = 2.4
seuil_min = 1.4
taux_exoneration = compute_taux_exoneration(assiette_allegement, smic_proratise, taux_max, seuil_max, seuil_min)
exoneration_relative_year_passed = exoneration_relative_year(period, entreprise_creation)
rate_by_year_passed = {
0: 1,
1: 1,
2: 1,
3: 2 / 3,
4: 1 / 3,
} # TODO: move to legislation parameters file
ratio = eligible * 0.0
for year_passed, rate in rate_by_year_passed.iteritems():
if (exoneration_relative_year_passed == year_passed).any():
ratio[exoneration_relative_year_passed == year_passed] = rate
exoneration_cotisations_zrd = ratio * taux_exoneration * assiette_allegement * eligible
return period, exoneration_cotisations_zrd
class exoneration_cotisations_employeur_zrr(Variable):
column = FloatCol
entity_class = Individus
label = u"Exonrérations de cotisations employeur pour l'embauche en zone de revitalisation rurale (ZRR)"
url = "http://www.apce.com/pid538/embauches-en-zru-et-zrr.html?espace=1&tp=1"
# Les entreprises et groupements d'employeurs exerçant une activité industrielle, commerciale, artisanale, agricole
# ou libérale et cotisant au régime d'assurance chômage.
# Les entreprises concernées, y compris chacune de celles appartenant à un groupement d'employeurs, doivent avoir
# au moins un établissement situé en zone de revitalisation rurale.
#
# A noter : les associations à but non lucratif sont exclues du dispositif. Par contre, quelle que soit leur forme
# juridique, les entreprises d'insertion ou d'intérim d'insertion peuvent en bénéficier. Les régies de quartier
# peuvent en bénéficier lorsque leur activité est susceptible d'entraîner l'assujettissement à la TVA à l'impôt sur
# les sociétés ainsi qu'à la contribution économique territoriale qu'elles en soient effectivement redevables
# ou non.
#
# L'employeur ne doit avoir procédé à aucun licenciement économique durant les 12 mois précédant l'embauche.
def function(self, simulation, period):
period = period.this_month
assiette_allegement = simulation.calculate('assiette_allegement', period)
contrat_de_travail_duree = simulation.calculate('contrat_de_travail_duree', period) # 0: CDI, 1:CDD
contrat_de_travail_debut = simulation.calculate('contrat_de_travail_debut', period)
contrat_de_travail_fin = simulation.calculate('contrat_de_travail_fin', period)
effectif_entreprise = simulation.calculate('effectif_entreprise', period)
smic_proratise = simulation.calculate('smic_proratise', period)
zone_revitalisation_rurale = simulation.calculate('zone_revitalisation_rurale', period)
duree_cdd_eligible = contrat_de_travail_fin > contrat_de_travail_debut + timedelta64(365, 'D')
# TODO: move to legislation parameters file
contrat_de_travail_eligible = (
contrat_de_travail_duree == 0) + (
(contrat_de_travail_duree == 1) * (duree_cdd_eligible)
)
duree_validite = (
datetime64(period.start) + timedelta64(1, 'D') - contrat_de_travail_debut).astype('timedelta64[Y]') < 1
eligible = (
contrat_de_travail_eligible *
(effectif_entreprise <= 50) *
zone_revitalisation_rurale *
duree_validite
)
taux_max = .281 if period.start.year < 2015 else .2655 # TODO: move to legislation parameters file
seuil_max = 2.4
seuil_min = 1.5
taux_exoneration = compute_taux_exoneration(assiette_allegement, smic_proratise, taux_max, seuil_max, seuil_min)
exoneration_cotisations_zrr = taux_exoneration * assiette_allegement * eligible
return period, exoneration_cotisations_zrr
# Aides à la création
class exoneration_is_creation_zrr(Variable):
column = FloatCol
entity_class = Individus
label = u"Exonrérations fiscales pour création d'une entreprise en zone de revitalisation rurale (ZRR)"
url = 'http://www.apce.com/pid11690/exonerations-d-impots-zrr.html?espace=1&tp=1'
def function(self, simulation, period):
period = period.this_year
effectif_entreprise = simulation.calculate('effectif_entreprise', period)
entreprise_benefice = simulation.calculate('entreprise_benefice', period)
# TODO: MODIFIER avec création d'entreprise
contrat_de_travail_duree = simulation.calculate('contrat_de_travail_duree', period) # 0: CDI, 1:CDD
contrat_de_travail_debut = simulation.calculate('contrat_de_travail_debut', period)
contrat_de_travail_fin = simulation.calculate('contrat_de_travail_fin', period)
duree_eligible = contrat_de_travail_fin > contrat_de_travail_debut + timedelta64(365, 'D')
# TODO: move to legislation parameters file
contrat_de_travail_eligible = (
contrat_de_travail_duree == 0) + (
(contrat_de_travail_duree == 1) * (duree_eligible)
)
zone_revitalisation_rurale = simulation.calculate('zone_revitalisation_rurale', period)
eligible = (
contrat_de_travail_eligible *
(effectif_entreprise <= 50) *
zone_revitalisation_rurale
)
exoneration_relative_year_passed = exoneration_relative_year(period, contrat_de_travail_debut)
rate_by_year_passed = {
0: 1,
1: 1,
2: 1,
3: 1,
4: 1,
5: .75,
6: .50,
7: .25,
} # TODO: move to legislation parameters file
taux_exoneraion = eligible * 0.0
for year_passed, rate in rate_by_year_passed.iteritems():
taux_exoneraion[exoneration_relative_year_passed == year_passed] = rate
return period, taux_exoneraion * entreprise_benefice
# TODO: mettre sur toutes les années
# # class bassin_emploi_redynamiser(Variable):
# column = BoolCol
# entity_class = Individus
# label = u"L'entreprise est située danns un bassin d'emploi à redynamiser(BER)"
# # La liste des bassins d'emploi à redynamiser a été fixée par le décret n°2007-228 du 20 février 2007.
# # Actuellement, deux régions sont concernées : Champagne-Ardenne (zone d'emploi de la Vallée de la Meuse)
# # et Midi-Pyrénées (zone d'emploi de Lavelanet).
#
# def function(self, simulation, period):
# effectif_entreprise = simulation.calculate('effectif_entreprise', period)
# return period, (effectif_entreprise >= 1) * False
class jeune_entreprise_innovante(Variable):
column = BoolCol
entity_class = Individus
label = u"L'entreprise est une jeune entreprise innovante"
def function(self, simulation, period):
# Toute entreprise existante au 1er janvier 2004 ou créée entre le 1er janvier 2004 et le 31 décembre 2016 à
# condition de remplir les conditions suivantes :
#
# avoir moins de 8 ans d'existence au moment de la demande
#
# être réellement nouvelle, c'est-à-dire ne pas avoir été créée dans le cadre d'une concentration,
# d'une restructuration, d'une extension d'activité préexistante ou d'une reprise
#
# employer moins de 250 personnes au cours de l'exercice au titre duquel elle demande à bénéficier de ce statut
#
# réaliser un chiffre d'affaires inférieur à 50 M€ et disposer d'un total de bilan inférieur à 43 M€
#
# être indépendante, c'est-à-dire que son capital doit être détenu pour 50 % au minimum par :
#
# - des personnes physiques
#
# - une ou plusieurs autres JEI dont 50 % du capital au moins est détenu par des personnes physiques
#
# - des associations ou fondations reconnues d'utilité publique à caractère scientifique
#
# - des établissements de recherche et d'enseignement et leurs filiales
#
# - des structures d'investissement sous réserve qu'il n'y ait pas de lien de dépendance telles que des :
# - fonds communs de placement dans l'innovation (FCPI)
# - sociétés de capital-risque
# - fonds d'investissement de proximité (FIP)
# - sociétés de développement régional (SDR)
# - sociétés financières d'innovation (SFI)
# - sociétés unipersonnelles d'investissements à risques (SUIR).
#
# réaliser des dépenses de R§D représentant au moins 15 % des charges fiscalement déductibles au titre du même
# exercice.
effectif_entreprise = simulation.calculate('effectif_entreprise', period)
entreprise_bilan = simulation.calculate('entreprise_bilan', period)
entreprise_chiffre_affaire = simulation.calculate('entreprise_chiffre_affaire', period)
entreprise_creation = simulation.calculate('entreprise_creation', period)
# entreprise_depenses_rd = simulation.calculate('entreprise_depenses_rd', period)
jei_date_demande = simulation.calculate('jei_date_demande', period)
# TODO: move to legislation parameters file
# entreprise_depenses_rd > .15 TODO
independance = True
jeune_entreprise_innovante = (
independance *
(effectif_entreprise < 250) *
(entreprise_creation <= datetime64("2016-12-31")) *
((jei_date_demande + timedelta64(1, 'D') - entreprise_creation).astype('timedelta64[Y]') < 8) *
(entreprise_chiffre_affaire < 50e6) *
(entreprise_bilan < 43e6)
)
return period, jeune_entreprise_innovante
class bassin_emploi_redynamiser(Variable):
column = BoolCol
entity_class = Individus
label = u"L'entreprise est située danns un bassin d'emploi à redynamiser (BER)"
# La liste des bassins d'emploi à redynamiser a été fixée par le décret n°2007-228 du 20 février 2007.
# Actuellement, deux régions sont concernées : Champagne-Ardenne (zone d'emploi de la Vallée de la Meuse)
# et Midi-Pyrénées (zone d'emploi de Lavelanet).
def function(self, simulation, period):
effectif_entreprise = simulation.calculate('effectif_entreprise', period)
return period, (effectif_entreprise >= 1) * False
class zone_restructuration_defense(Variable):
column = BoolCol
entity_class = Individus
label = u"L'entreprise est située dans une zone de restructuration de la Défense (ZRD)"
def function(self, simulation, period):
effectif_entreprise = simulation.calculate('effectif_entreprise', period)
return period, (effectif_entreprise >= 1) * False
class zone_franche_urbaine(Variable):
column = BoolCol
entity_class = Individus
label = u"L'entreprise est située danns une zone franche urbaine (ZFU)"
def function(self, simulation, period):
effectif_entreprise = simulation.calculate('effectif_entreprise', period)
return period, (effectif_entreprise >= 1) * False
class zone_revitalisation_rurale(Variable):
column = BoolCol
entity_class = Individus
label = u"L'entreprise est située dans une zone de revitalisation rurale (ZRR)"
def function(self, simulation, period):
effectif_entreprise = simulation.calculate('effectif_entreprise', period)
return period, (effectif_entreprise >= 1) * False
# Helpers
def compute_taux_exoneration(assiette_allegement, smic_proratise, taux_max, seuil_max, seuil_min = 1):
ratio_smic_salaire = smic_proratise / (assiette_allegement + 1e-16)
# règle d'arrondi: 4 décimales au dix-millième le plus proche ( # TODO: reprise de l'allègement Fillon unchecked)
return round_(
taux_max * min_(1, max_(seuil_max * seuil_min * ratio_smic_salaire - seuil_min, 0) / (seuil_max - seuil_min)),
4,
)
def exoneration_relative_year(period, other_date):
return (datetime64(period.start) + timedelta64(1, 'D') - other_date).astype('timedelta64[Y]')
| agpl-3.0 | 2,386,070,103,312,394,000 | 47.772 | 120 | 0.672517 | false |
Niddel/magnet-api2-sdk-python | magnetsdk2/cef.py | 1 | 4911 | # -*- coding: utf-8 -*-
"""
This module implements writing CEF format events.
"""
from math import ceil, trunc
import six
from magnetsdk2.time import seconds_from_UTC_epoch
def escape_header_entry(x):
"""
Escapes backslashes and pipes from a header entry.
:param x: the string value to escape
:return: escaped and trimmed UTF-8 encoded str / bytes
"""
if not isinstance(x, six.string_types):
x = x.__str__()
return x.replace('\\', '\\\\').replace('|', '\\|').strip()
def header(device_vendor, device_product, device_version, signature_id, name, severity):
"""
Builds a CEF version 0 header with the given fields
:return: escaped and trimmed UTF-8 encoded str / bytes
"""
if isinstance(severity, float):
severity = trunc(severity)
if isinstance(severity, six.integer_types):
if severity < 0 or severity > 10:
raise ValueError('severity must be between 0 and 10')
severity = '{0:d}'.format(severity)
return '|'.join(map(escape_header_entry,
['CEF:0', device_vendor, device_product, device_version, signature_id, name,
severity, ''])).strip()
def escape_extension_value(x):
"""
Escapes backslashes, pipes, equals signs and newlines from an extension entry value.
:param x: the string value to escape
:return: escaped and trimmed UTF-8 encoded str / bytes
"""
if not isinstance(x, six.string_types):
x = x.__str__()
return x.replace('\\', '\\\\').replace('=', '\\=').replace('\n', '\\n').replace('\r',
'\\r').strip()
def extension(fields):
"""
Builds a CEF version 0 extension with the given fields. Fields will be sorted by name.
:param fields: dict containing fields to include
:return: escaped and trimmed UTF-8 encoded str / bytes
"""
fields = sorted([(k, v) for k, v in six.iteritems(fields) if v], key=lambda x: x[0])
return ' '.join([e[0].strip() + '=' + escape_extension_value(e[1])
for e in fields]).strip()
def timestamp(ts):
"""
Converts an ISO date and time in UTC into milliseconds from epoch as expected by CEF format.
:param ts: string containing the date and time in ISO 8601 format
:return: number of milliseconds since epoch
"""
if not ts:
return None
if not ts.endswith('Z'):
ts = ts + 'Z'
return '{0:d}'.format(trunc(seconds_from_UTC_epoch(ts) * 1000))
def convert_alert_cef(obj, alert, organization):
"""
Converts a Niddel Magnet v2 API alert into an approximate CEF version 0 representation.
:param obj: file-like object in binary mode to write to
:param alert: dict containing a Niddel Magnet v2 API
:return: an str / bytes object containing a CEF event
"""
obj.write(header(device_vendor='Niddel', device_product='Magnet', device_version='1.0',
signature_id='infected_outbound',
name='Potentially Infected or Compromised Endpoint',
severity=max(ceil(alert['confidence'] / 10), 0)).encode('UTF-8'))
ext = {
'cs1': organization,
'cs1Label': 'organizationId',
'cs2': alert['batchDate'],
'cs2Label': 'batchDate',
'start': timestamp(alert['logDate'] + 'T' + alert['aggFirst']),
'end': timestamp(alert['logDate'] + 'T' + alert['aggLast']),
'externalId': alert['id'],
'cfp1': alert['confidence'],
'cfp1Label': 'confidence',
'cnt': alert['aggCount'],
'shost': alert.get('netSrcIpRdomain', None),
'src': alert.get('netSrcIp', None),
'dst': alert.get('netDstIp', None),
'dhost': alert.get('netDstDomain', None),
'dpt': alert.get('netDstPort', None),
'proto': alert.get('netL4proto', None),
'app': alert.get('netL7proto', alert.get('netApp', None)),
'suid': alert.get('netSrcUser', None),
'deviceCustomDate1': timestamp(alert.get('createdAt', None)),
'deviceCustomDate1Label': 'createdAt',
'deviceCustomDate2': timestamp(alert.get('updatedAt', None)),
'deviceCustomDate2Label': 'updatedAt',
'deviceDirection': 1,
'dtz': 'GMT'
}
if 'netBlocked' in alert:
if alert['netBlocked']:
ext['act'] = 'allow'
else:
ext['act'] = 'deny'
if 'tags' in alert:
ext['cs3'] = ','.join(sorted(alert['tags']))
ext['cs3Label'] = 'tags'
if 'netDeviceTypes' in alert:
ext['cs4'] = ','.join(sorted(alert['netDeviceTypes']))
ext['cs4Label'] = 'netDeviceTypes'
if 'netSrcProcessId' in alert:
ext['cs5'] = alert['netSrcProcessId']
ext['cs5Label'] = 'netSrcProcessId'
# merge header and extension
obj.write(extension(ext).encode('UTF-8'))
| apache-2.0 | -9,034,116,061,246,250,000 | 35.377778 | 100 | 0.589086 | false |
pombredanne/metamorphosys-desktop | metamorphosys/META/3rdParty/ctemplate-1.0/src/htmlparser/generate_fsm.py | 1 | 10998 | #!/usr/bin/env python
#
# Copyright (c) 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ---
#
# Generate a C include file from a finite state machine definition.
#
# Right now the form is the one expected by htmlparser.c so this file is pretty
# tightly coupled with htmlparser.c.
#
__author__ = '[email protected] (Filipe Almeida)'
import sys
from fsm_config import FSMConfig
class FSMGenerateAbstract(object):
def __init__(self, config):
self._config = config
def Generate(self):
"""Returns the generated FSM description for the specified language.
Raises a TypeError, because abstract methods can not be called.
Raises:
TypeError
"""
raise TypeError('Abstract method %s.%s called' % (self._class.__name__,
self._function))
class FSMGenerateC(FSMGenerateAbstract):
"""Generate the C definition from a statemachien configuration object."""
TABSTOP_ = 2
def _Prefix(self):
"""Return a c declaration prefix."""
return self._config.name.lower() + '_'
def _StateInternalC(self, st):
"""Return the internal name of the state."""
return '%sSTATE_INT_%s' % (self._Prefix().upper(), st.upper())
def _StateExternalC(self, st):
"""Return the external name of the state."""
return '%sSTATE_%s' % (self._Prefix().upper(), st.upper())
def _MakeTuple(self, data):
"""Converts data to a string representation of a C tuple."""
return '{ %s }' % ', '.join(data)
def _CreateHeader(self):
"""Print the include file header."""
out = []
if self._config.comment:
out.append('/* ' + self._config.comment)
else:
out.append('/* State machine definition for ' + self._config.name)
out.append(' * Auto generated by generate_fsm.py. Please do not edit.')
out.append(' */')
return '\n'.join(out)
def _ListToIndentedString(self, list):
indented_list = [' ' + e for e in list]
return ',\n'.join(indented_list)
def _CreateEnum(self, name, data):
"""Print a c enum definition."""
return 'enum %s {\n%s\n};\n' % (name,
self._ListToIndentedString(data))
def _CreateStructList(self, name, type, data):
"""Print a c flat list.
Generic function to print list in c in the form of a struct.
Args:
name: name of the structure.
type: type of the struct.
data: contents of the struct as a list of elements
Returns:
String with the generated list.
"""
return "static const %s %s[] = {\n%s\n};\n" % (
type,
name,
self._ListToIndentedString(data))
def _CreateStatesEnum(self):
"""Print the internal states enum.
Prints an enum containing all the valid states.
Returns:
String containing a C enumeration of the states.
"""
list = [] # output list
for state in self._config.states:
list.append(self._StateInternalC(state))
return self._CreateEnum(self._Prefix() + 'state_internal_enum', list)
def _CreateStatesExternal(self):
"""Print a struct with a mapping from internal to external states."""
list = [] # output list
for state_name in self._config.states:
list.append(self._StateExternalC(
self._config.states[state_name].external_name))
return self._CreateStructList(self._Prefix() + 'states_external',
'int',
list)
def _CreateStatesInternalNames(self):
"""Return a struct mapping internal states to a strings."""
out = [] # output list
for state_name in self._config.states:
out.append('"' + state_name + '"')
return self._CreateStructList(self._Prefix() + 'states_internal_names',
'char *',
out)
def _CreateNumStates(self):
"""Print a Macro defining the number of states."""
return "#define %s_NUM_STATES %s" % (self._config.name.upper(),
str(len(self._config.states) + 1))
def _ExpandBracketExpression(self, expression):
"""Expand ranges in a regexp bracket expression.
Returns a string with the ranges in a bracket expression expanded.
The bracket expression is similar to grep(1) or regular expression bracket
expressions but it does not support the negation (^) modifier or named
character classes like [:alpha:] or [:alnum:].
The especial character class [:default:] will expand to all elements in the
ascii range.
For example, the expression 'a-c13A-D' will expand to 'abc13ABCD'.
Args:
expression: A regexp bracket expression. Ie: 'A-Z0-9'.
Returns:
A string with the ranges in the bracket expression expanded.
"""
def ExpandRange(start, end):
"""Return a sequence of characters between start and end.
Args:
start: first character of the sequence.
end: last character of the sequence.
Returns:
string containing the sequence of characters between start and end.
"""
return [chr(c) for c in range(ord(start), ord(end) + 1)]
def ListNext(input_list):
"""Pop the first element of a list.
Args:
input_list: python list object.
Returns:
First element of the list or None if the list is empty.
"""
if input_list:
return input_list.pop(0)
else:
return None
out = [] # List containing the output
# Special case for the character class [:default:]
if expression == '[:default:]':
out = [chr(c) for c in range(0, 255)]
return ''.join(out)
chars = [c for c in expression] # list o characters in the expression.
current = ListNext(chars)
while current:
next = ListNext(chars)
if next == '-':
next = ListNext(chars)
if next:
out.extend(ExpandRange(current, next))
else:
out.append(current)
out.append('-')
current = ListNext(chars)
else:
out.append(current)
current = next
return ''.join(out)
def _CreateTransitionTable(self):
"""Print the state transition list.
Returns a set of C structures that define the transition table for the state
machine. This structure is a list of lists of ints (int **). The outer list
indexes the source state and the inner list contains the destination state
for each of the possible input characters:
const int * const* transitions[source][input] == destination.
The conditions are mapped from the conditions variable.
Returns:
String containing the generated transition table in a C struct.
"""
out = [] # output list
default_state = 'STATEMACHINE_ERROR'
state_table = {}
for state in self._config.states:
state_table[state] = [default_state for col in xrange(255)]
# We process the transition in reverse order while updating the table.
for i_transition in range(len(self._config.transitions) - 1, -1, -1):
transition = self._config.transitions[i_transition]
(condition_name, src, dst) = (transition.condition,
transition.source,
transition.destination)
condition = self._config.conditions[condition_name]
char_list = self._ExpandBracketExpression(condition)
for c in char_list:
state_table[src][ord(c)] = self._StateInternalC(dst)
# Create the inner lists which map input characters to destination states.
for state in self._config.states:
transition_row = []
for c in xrange(0, 255):
transition_row.append(' /* %06s */ %s' % (repr(chr(c)),
state_table[state][c]))
out.append(self._CreateStructList('%stransition_row_%s' %
(self._Prefix(),
state),
'int',
transition_row))
out.append('\n')
# Create the outer list, which map source states to input characters.
out.append('static const %s %s[] = {\n' % ('int *', self._Prefix() +
'state_transitions'))
row_list = [' %stransition_row_%s' %
(self._Prefix(), row) for row in self._config.states]
out.append(',\n'.join(row_list))
out.append('\n};\n')
return ''.join(out)
def Generate(self):
"""Returns the generated the C include statements for the statemachine."""
print '\n'.join((self._CreateHeader(),
self._CreateNumStates(),
self._CreateStatesEnum(),
self._CreateStatesExternal(),
self._CreateStatesInternalNames(),
self._CreateTransitionTable()))
def main():
if len(sys.argv) != 2:
print "usage: generate_fsm.py config_file"
sys.exit(1)
config = FSMConfig()
config.Load(sys.argv[1])
gen = FSMGenerateC(config)
gen.Generate()
if __name__ == "__main__":
main()
| mit | -816,260,982,260,084,900 | 31.327273 | 80 | 0.600382 | false |
cameronbwhite/PyOLP | PyOLP/paginated.py | 1 | 3928 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
# Copyright (C) 2013, Cameron White
#
# PyGithub is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>.
class PaginatedList():
def __init__(self, contentClass, requester, uri, parameters=None):
self.__requester = requester
self.__contentClass = contentClass
self.__uri = uri
self.__parameters = parameters
self.__getFirstPage()
def _applyContentClass(self, element):
return self.__contentClass(
self.__requester, self.__headers, element)
def _isBiggerThan(self, index):
return len(self.__elements) > index or self.__couldGrow()
def __couldGrow(self):
if self.__next is None:
return False
else:
return True
def __fetchToIndex(self, index):
while len(self.__elements) <= index and self.__couldGrow():
self.__grow()
def __getFirstPage(self):
headers, data = self.__requester.requestJsonAndCheck(
self.__uri,
self.__parameters
)
self.__elements = self.__parse(headers, data)
def __getitem__(self, index):
assert isinstance(index, (int, slice))
if isinstance(index, (int, long)):
self.__fetchToIndex(index)
return self.__elements[index]
else:
return self._Slice(self, index)
def __getNextPage(self):
headers, data = self.__requester.requestJsonAndCheck(
self.__next
)
return self.__parse(headers, data)
def __grow(self):
newElements = self.__getNextPage()
self.__elements += newElements
return newElements
def __iter__(self):
for element in self.__elements:
yield self.__contentClass(
self.__requester, self.__headers, element)
while self.__couldGrow():
self.__grow()
for element in self.__elements:
yield self._applyContentClass(element)
def __parse(self, headers, data):
self.__headers = headers
meta = data["meta"]
self.__limit = meta["limit"]
self.__next = meta["next"]
self.__offset = meta["offset"]
self.__previous = meta["previous"]
self.__total_count = meta["total_count"]
return data["objects"]
class _Slice:
def __init__(self, theList, theSlice):
self.__list = theList
self.__start = theSlice.start or 0
self.__stop = theSlice.stop
self.__step = theSlice.step or 1
def __iter__(self):
index = self.__start
while not self.__finished(index):
if self.__list._isBiggerThan(index):
yield self.__list._applyContentClass(self.__list[index])
index += self.__step
else:
return
def __finished(self, index):
return self.__stop is not None and index >= self.__stop
| gpl-3.0 | 1,788,661,924,577,692,200 | 33.156522 | 79 | 0.532332 | false |
sea-kg/inventory-files | contrib/ppa/build_source_pkg_for_ppa.py | 1 | 6043 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import fileinput
import re
import subprocess
import os
import sys
import shutil
from pprint import pprint
import datetime
print ("Welcome to preapre ppa package...")
dists = []
# https://wiki.ubuntu.com/Releases
dists.append({
"name": "Ubuntu 16.04.7 LTS (xenial)",
"dist_name": "xenial",
"ppa_name_suffix": "ppa-ubuntu-16-04-xenial-1",
"end": "April 2021",
"version": "16.04.7 LTS"
})
dists.append({
"name": "Ubuntu 18.04.5 LTS (bionic)",
"dist_name": "bionic",
"ppa_name_suffix": "ppa-ubuntu-18-04-bionic-2",
"end": "April 2023",
"version": "18.04.5 LTS"
})
dists.append({
"name": "Ubuntu 20.04.2 LTS (focal)",
"dist_name": "focal",
"ppa_name_suffix": "ppa-ubuntu-20-04-focal-2",
"end": "April 2025",
"version": "20.04.2 LTS"
})
dists.append({
"name": "Ubuntu 20.10 (groovy)",
"dist_name": "groovy",
"ppa_name_suffix": "ppa-ubuntu-20-10-groovy-1",
"end": "July 2021",
"version": "20.10"
})
print("Please choose dist name:")
i = 0
for d in dists:
print(' ' + str(i) + '. ' + d['dist_name'] + ' (' + d['version'] + '), date end: ' + d['end'])
i = i + 1
dist_num_ = input("Enter number of dist: ")
dist_num_ = int(dist_num_)
if dist_num_ >= len(dists):
sys.exit("Wrong dist number")
dist_name_ = dists[dist_num_]['dist_name']
ppa_name_ = dists[dist_num_]['ppa_name_suffix']
print("Dist Name: " + dist_name_)
#############################################
def clear_all():
print( " -> Clear all")
if os.path.exists('./inventory-files'):
shutil.rmtree('./inventory-files')
print( " -> DONE")
print( " -> Cleanup previous ppa packages")
onlyfiles = [f for f in os.listdir('./') if os.path.isfile(os.path.join('./', f))]
for f in onlyfiles:
m = re.search(r'^inventory-files_(\d+\.\d+\.\d+)-ppa-.*(\.orig\.tar\.gz|source\.changes|_source\.build|_source.ppa.upload|\.tar\.gz|_source\.buildinfo|\.dsc)$', f)
if m:
print('Remove file ' + f)
os.remove(f)
clear_all()
print( " -> Prepare sources directory ")
os.mkdir('./inventory-files')
def copytree(src, dst, symlinks=False, ignore=None):
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
shutil.copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
shutil.copytree('../../src', './inventory-files/src', symlinks=False, ignore=None)
shutil.copytree('../../res', './inventory-files/res', symlinks=False, ignore=None)
shutil.copy2('../../inventory-files.pro', './inventory-files/inventory-files.pro')
shutil.copy2('../../inventory-files.qrc', './inventory-files/inventory-files.qrc')
shutil.copy2('../../VERSION', './inventory-files/VERSION')
shutil.copy2('../../LICENSE', './inventory-files/LICENSE')
shutil.copytree('./debian', './inventory-files/debian', symlinks=False, ignore=None)
shutil.copytree('./install-files', './inventory-files/install-files', symlinks=False, ignore=None)
print( " -> DONE ")
#############################################
print( " -> Read version of package ")
f = open("../../VERSION",'r')
filedata = f.read()
f.close()
print(filedata)
m = re.search('(\\d+\\.\\d+\\.\\d+)', filedata)
if m:
current_version = m.group(1)
print ("\n *** Current version: " + current_version + "\n")
# parse CHANGELOG.md
changelog_list = []
version_logs = {'version': '', 'dt': '', 'logs': []}
lines = [line.rstrip('\n') for line in open('../../CHANGELOG.md')]
for li in lines:
m = re.search(r'[ ]*##[ ]+\[v(\d+\.\d+\.\d+)\][ ]*-[ ]*(\d+)-(\d+)-(\d+)[ ]*\((.*)\).*', li)
if m:
if version_logs['version'] != '':
changelog_list.append(version_logs)
version_logs = {'version': '', 'dt': '', 'logs': []}
ver = m.group(1)
year = int(m.group(2))
month = int(m.group(3))
day = int(m.group(4))
_dt = datetime.date(year, month, day)
# must be format Mon, 22 Mar 2010 00:37:31 +0100
dt = _dt.strftime("%a, %d %b %Y %H:%M:%S +0700")
version_logs['version'] = ver
version_logs['dt'] = dt
if version_logs['version'] == '':
continue
m = re.search('[ ]*-[ ]*(.*)', li)
if m:
line_log = m.group(1)
version_logs['logs'].append(line_log)
if version_logs['version'] != '':
changelog_list.append(version_logs)
version_logs = {'version': '', 'dt': '', 'logs': []}
print(version_logs)
#############################################
print( " -> Prepare changelog ")
changelog="./inventory-files/debian/changelog"
f = open(changelog,'w')
li_count = 0
for li in changelog_list:
if li_count != 0:
f.write("\n")
f.write("\n")
li_count = li_count + 1
f.write("inventory-files (" + li['version'] + "-" + ppa_name_ + ") " + dist_name_ + "; urgency=low\n\n")
for li_log in li['logs']:
li_log = li_log.strip()
if li_log != '':
f.write(" * " + li_log + "\n")
f.write("\n")
#if li['dt'] == '?':
# li['dt'] = subprocess.Popen(['date', '-R'], stdout=subprocess.PIPE).communicate()[0]
f.write(" -- Evgenii Sopov <[email protected]> " + li['dt']) # 2 space!!!
f.write("\n")
f.close()
print( " -> DONE ")
# TODO
# subprocess.call("./clean_sources_ppa.sh")
#############################################
print( " -> Prepare tar.gz source package ")
os.system("cd ./ && tar -acf inventory-files_" + current_version + "-" + ppa_name_ + ".orig.tar.gz inventory-files")
os.system("cd ./inventory-files && debuild -S -sa")
print( " -> DONE ")
dput_filename = "inventory-files_" + current_version + "-" + ppa_name_ + "_source.changes"
os.system("debsign -k 3AA3105C5766233DD2F243A3A742BE2E628592AC " + dput_filename)
sys.stdout.write("Are you want try upload source package to ppa.launchpad? [y/n]: ")
ask_upload_ = input().lower()
if ask_upload_ == "y":
os.system("dput ppa:sea5kg/inventory-files " + dput_filename)
| mit | 3,938,819,940,711,231,500 | 28.478049 | 171 | 0.557836 | false |
Kaushikpatnaik/LSTM-Encoder-for-Driver-Telematics | dataProcess.py | 1 | 7823 | import numpy as np
import os
import random
from collections import defaultdict, Counter
from sklearn.utils import resample, shuffle
import math
def createFeatures(dataWin1, dataWin2, dataWin3):
# given three raw data windows compute velocity accelaration
# and change in direction
vecData = np.array(np.subtract(dataWin2, dataWin1))
vecData2 = np.array(np.subtract(dataWin3, dataWin2))
accData = np.subtract(vecData2, vecData)
dirData = np.arctan(np.divide(dataWin2[1],dataWin2[0]))
minVecX, minVecY = np.amin(vecData, axis=0)
maxVecX, maxVecY = np.amax(vecData, axis=0)
avgVecX, avgVecY = np.average(vecData, axis=0)
minAccX, minAccY = np.amin(accData, axis=0)
maxAccX, maxAccY = np.amax(accData, axis=0)
avgAccX, avgAccY = np.average(accData, axis=0)
minDir = np.amin(dirData, axis=0)
maxDir = np.amax(dirData, axis=0)
avgDir = np.average(dirData, axis=0)
featVector = [minVecX, minVecY, maxVecX, maxVecY, avgVecX, avgVecY, minDir, maxDir, avgDir, minAccX, minAccY, maxAccX, maxAccY, avgAccX, avgAccY]
return featVector
def getData(allpath):
# function which given a filepath returns seqID a list of sequence IDs
# and dataFile a numpy array containing the features
dataFile = []
seqID = []
filepath = []
for dirs, subdir, files in os.walk(allpath):
for ifile in files:
filepath.append(dirs + "/" + ifile)
for path in filepath:
s = path.split("/")
data = []
with open(path,"r") as filename:
count = 0
countSeq = 1
temp_collec = []
for line in filename:
a,b = line.split(",")
data.append([a,b[0:-1]])
i = 2
#round off the trip length to the nearest 200
rng = int(np.floor((len(data)-6)/200)*200)
while i<rng:
dataWin1 = np.array(data[i-1:i+3], dtype=float)
dataWin2 = np.array(data[i:i+4], dtype=float)
dataWin3 = np.array(data[i+1:i+5], dtype=float)
temp = createFeatures(dataWin1, dataWin2, dataWin3)
#convert all "nan's" and zeros to small values
for k in range(len(temp)):
if math.isnan(temp[k]):
temp[k] = 0.00001
temp_collec.append(temp[k])
count += 1
if count == 50:
#print len(temp_collec)
dataFile.append(temp_collec)
temp = s[3].split(".")
seqID.append(s[2]+"-"+temp[0]+"-"+str(countSeq))
temp_collec = []
countSeq += 1
count = 0
i += 4
dataFile = np.array(dataFile)
seqID = np.array(seqID)
returnVal = [seqID, dataFile]
return returnVal
if __name__ == "__main__":
ROOTDIR = "./"
subdirpath = []
# read through the directory to obtain subdirectory for each driver
for dirs, subdirs, files in os.walk(ROOTDIR+"data"):
for subdir in subdirs:
subdirpath.append(dirs+"/"+subdir)
# for each driver, we collect data from 40 other drivers as false
# trips
driver_collec = defaultdict(list)
for subdir in subdirpath:
s = subdir.split('/')
driver_collec[s[2]].append(subdir)
for j in range(1):
#draw a random choice
temp = random.choice(subdirpath)
if temp != subdir:
driver_collec[s[2]].append(temp)
# for each key of the dictionary we generate a csv file
for key in driver_collec.keys():
filepath = []
values = driver_collec[key]
print "Creating file for driver: " + str(key)
# get data for the driver
[dSeqID, dData] = getData(values[0])
# get data for other drivers
[oSeqID, oData] = getData(values[1])
'''
k = 2
while k < len(values[2:]):
[temp1, temp2] = getData(values[k])
#print temp1.shape, temp2.shape
#print oSeqID.shape, oData.shape
oSeqID = np.hstack((oSeqID, temp1))
oData = np.vstack((oData, temp2))
k += 1
'''
print oData.shape, dData.shape
print "Resampling Data"
if oData.shape[0] > dData.shape[0]:
row = dData.shape[0]
trow = oData.shape[0]
# resample data with replacement
while row < (trow-row):
temp1, temp2 = resample(dData, dSeqID, n_samples = row, random_state = 0)
#print temp1.shape, temp2.shape
#print dSeqID.shape, dData.shape
dSeqID = np.hstack((dSeqID, temp2))
dData = np.vstack((dData, temp1))
row += row
diff = trow - row
temp1, temp2 = resample(dData, dSeqID, n_samples = diff, random_state = 0)
dSeqID = np.hstack((dSeqID, temp2))
dData = np.vstack((dData, temp1))
else:
row = oData.shape[0]
trow = dData.shape[0]
# resample data with replacement
while row < (trow-row):
temp1, temp2 = resample(oData, oSeqID, n_samples = row, random_state = 0)
#print temp1.shape, temp2.shape
#print dSeqID.shape, dData.shape
oSeqID = np.hstack((oSeqID, temp2))
oData = np.vstack((oData, temp1))
row += row
diff = trow - row
temp1, temp2 = resample(oData, oSeqID, n_samples = diff, random_state = 0)
oSeqID = np.hstack((oSeqID, temp2))
oData = np.vstack((oData, temp1))
print oData.shape, dData.shape
print dSeqID.shape, oSeqID.shape
# append data
seqID = np.hstack((dSeqID, oSeqID))
data = np.vstack((dData, oData))
print "Shuffling Data"
# shuffle
seqID, data = shuffle(seqID, data, random_state = 0)
row, col = data.shape
print "Created Dataset in desired format"
# write to file
with open(ROOTDIR+"proc_data/datafile_"+str(key)+".csv","w") as filename:
for i in range(row):
writedata = data[i]
newwritedata = np.reshape(writedata, (50,15))
for j in range(50):
for k in range(14):
filename.write(str(newwritedata[j][k]))
filename.write(",")
filename.write(str(newwritedata[j][14]))
filename.write("\n")
# since class names are not unique, create a dictionary of names and save it also
with open(ROOTDIR+"proc_data/classfile_"+str(key)+".csv","w") as filename:
for i in range(row):
temp = seqID[i].split("-")
#print temp[0], str(key), temp[0] == str(key)
for k in range(50):
writedata = temp[0]
if writedata == str(key):
filename.write(str(1))
else:
filename.write(str(2))
filename.write("\n")
# write out the mapping
with open(ROOTDIR+"proc_data/classmap_"+str(key)+".csv","w") as filename:
for i in range(row):
writedata = seqID[i]
filename.write(writedata)
filename.write("\n")
| mit | -992,711,337,174,148,200 | 34.721461 | 149 | 0.51208 | false |
zorna/zorna | zorna/site/templatetags/site_tags.py | 1 | 1112 | from django.template import TemplateSyntaxError
from django import template
register = template.Library()
from zorna.site.models import SiteOptions
class check_if_has_access_to_option_node(template.Node):
def __init__(self, key, var_name):
self.var_name = var_name
if not (key[0] == key[-1] and key[0] in ('"', "'")):
self.key = key
else:
self.key = key[1:-1]
def render(self, context):
request = context['request']
try:
context[self.var_name] = SiteOptions.objects.is_access_valid(
request.user, self.key)
except:
pass
return ''
@register.tag(name="check_if_has_access_to")
def check_if_has_access_to_option(parser, token):
bits = token.split_contents()
if 4 != len(bits):
raise TemplateSyntaxError('%r expects 4 arguments' % bits[0])
if bits[-2] != 'as':
raise TemplateSyntaxError(
'%r expects "as" as the second argument' % bits[0])
key = bits[1]
varname = bits[-1]
return check_if_has_access_to_option_node(key, varname)
| bsd-3-clause | -8,295,376,805,729,403,000 | 29.054054 | 73 | 0.598921 | false |
gthank/patois | setup.py | 1 | 1536 | from __future__ import (print_function, absolute_import,
unicode_literals, division)
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import patois
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
with open('README.rst', 'r') as readme:
patois_long_description = readme.read()
with open('LICENSE', 'r') as license:
patois_license = license.read()
patois_classifiers = (
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: BSD',
'Operating System :: POSIX :: Linux',
'Operating System :: POSIX :: Linux',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Utilities',
)
setup(
name='patois',
version=patois.__version__,
description='Python VM compatibility library',
long_description=patois_long_description,
author='Hank Gay',
author_email='[email protected]',
url="https://pypi.python.org/pypi/patois/",
py_modules=['patois',],
license=patois_license,
zip_safe=False,
classifiers=patois_classifiers,
)
| mit | 7,708,837,189,164,913,000 | 25.482759 | 56 | 0.64974 | false |
NS2LPS/pyslave | pyslave/magic.py | 1 | 20094 | """This module defines magic IPython functions to run pyslave from the IPython shell."""
import time, os, logging, inspect, logging.handlers, sys, io
from collections import OrderedDict
import configparser
import traceback
import sys
from matplotlib.pyplot import figure
from IPython.core.magic import register_line_magic, needs_local_scope
from pyslave import instruments, __slave__
from pyslave.slave import SlaveWindow
__slave_window__ = __slave__['window']
# Logger
logger = logging.getLogger('pyslave.magic')
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.NullHandler())
# List of resources that are handled
__resources__ = ['VISA', 'NIDAQ', 'COM', 'Other']
# Keep trace of all instruments
__instruments__ = OrderedDict()
# Keep track of opened COM ports and VISA devices
__opened_COM__ = []
__opened_VISA__ = []
__opened_NIDAQ__ = []
# Argument parsing functions
def __arg_split__(line):
"""Split line on whitespace but do not split string parameters."""
res = ['']
line = str(line)
s = line.replace("\"\"\"", chr(240))
single_quote = False
double_quote = False
triple_quote = False
for c in s:
if single_quote or double_quote or triple_quote:
res[-1] += c
single_quote ^= c is chr(39)
double_quote ^= c is chr(34)
triple_quote ^= c is chr(240)
else:
if c is ' ':
res.append('')
else:
res[-1] += c
single_quote = c is chr(39)
double_quote = c is chr(34)
triple_quote = c is chr(240)
return [x.replace(chr(240), "\"\"\"" ) for x in res if x]
########################################################
# Instruments loading and listing magic
########################################################
def __read_config_instruments__():
__config__ = configparser.ConfigParser()
__config__.read(os.path.join(os.path.dirname(__file__), 'pyslave.ini'))
config_instruments = dict()
for resource in __resources__:
if __config__.has_section(resource):
section = __config__[resource]
for k,v in section.items():
if not k.startswith('__'):
vsplit = v.split(' ')
if len(vsplit)==1:
config_instruments[k] = {'resource':resource,'address':vsplit[0],'driver':None}
elif len(vsplit)==2:
config_instruments[k] = {'resource':resource,'address':vsplit[0],'driver':vsplit[1]}
else:
print('Badly formatted line in pyslave.ini:')
print('{0} = {1}'.format(k,v))
return config_instruments
# Not used for the moment
def __read_config_special__(section):
__config__ = configparser.ConfigParser()
__config__.read(os.path.join(os.path.dirname(__file__), 'pyslave.ini'))
config_special = {}
if __config__.has_section(section):
section = __config__[section]
for k,v in section.items():
if k.startswith('__'):
config_special[k] = v
return config_special
def __open__(resource, address, name, driver, local_ns, verbose=False):
if resource=='VISA':
info = instruments.__visa_rm__.resource_info(address)
res_name = info.resource_name
if res_name in __opened_VISA__:
print('{0} is already opened'.format(address))
return None
inst = instruments.openVISA(address, driver, verbose)
name = __get_name__(inst,verbose) if name is None else name
__opened_VISA__.append(res_name)
elif resource=='NIDAQ':
if address in __opened_NIDAQ__:
print('{0} is already opened'.format(address))
inst = instruments.openNIDAQ(address, driver, verbose)
name = __get_name__(inst,verbose) if name is None else name
__opened_NIDAQ__.append(address)
elif resource=='COM':
if address in __opened_COM__:
print('{0} is already opened'.format(address))
return None
inst = instruments.openCOM(address, driver, verbose)
name = __get_name__(inst,verbose) if name is None else name
__opened_COM__.append(address)
elif resource=='Other':
inst = instruments.openOther(address, driver, verbose)
name = __get_name__(inst,verbose) if name is None else name
local_ns[name] = inst
__instruments__[name] = inst
logger.info('Opening {0} {1} as {2} with {3} ({4})'.format(resource, address, name, inst.__driver_name__, inst.__driver_module__))
print('{0:10s} : {1} {2}'.format(name, inst.__inst_id__, inst.__address__))
return inst
def __get_name__(inst, verbose=False):
prefix = inst.__inst_type__
prev = [ int(k[len(prefix):]) for k in __instruments__.keys() if k.startswith(prefix) ]
i = 1
while i in prev:
i += 1
name = prefix + str(i)
if verbose:
inp = input('Instrument name [{0}] : '.format(name))
inp = inp.strip()
name = inp or name
return name
@register_line_magic
@needs_local_scope
def openinstr(line, local_ns):
"""Opens an instrument through a name or address.
The function first looks into the pyslave.ini file. If an
entry is found corresponding to the given name, the corresponding
instrument is opened.
If no matches is found in pyslva.ini:
- if the given name contains COM, the function opens the coresponding COM port
- otherwise, the function assumes the passed name is a VISA alias or address and
tries to open it
A driver can be passed as a second argument, it will override the driver
specified in the pyslave.ini file.
Examples :
# Open by name
openinstr dmm1
# Open by address or alias
openinstr TCPIP::192.168.0.81::INSTR
openinstr ZND
openinstr GPIB0::22::INSTR
openinstr GPIB0::22::INSTR yokogawa.yogogawa7651.yokogawa7651
"""
args = __arg_split__(line)
instr_name = args[0]
driver = args[1] if len(args)>1 else None
# Look up in the pyslave.ini file
config_instruments = __read_config_instruments__()
if instr_name in config_instruments:
name = instr_name
if name in __instruments__ :
print('{0} already exists. Close it before opening it again.'.format(name))
return
resource = config_instruments[instr_name]['resource']
address = config_instruments[instr_name]['address']
if driver is None:
driver = config_instruments[instr_name].get('driver',None)
__open__(resource, address, instr_name, driver, local_ns, True)
elif 'COM' in instr_name:
__open__('COM', instr_name, None, driver, local_ns, True)
else:
__open__('VISA', instr_name, None, driver, local_ns, True)
@register_line_magic
@needs_local_scope
def closeinstr(line, local_ns):
"""Close the specified instrument."""
name = line.strip()
if not name:
return
logger.info('Closing {0}.'.format(name))
if name not in __instruments__:
print('Unknown instrument {0}.'.format(name))
return
inst = __instruments__[name]
list_resources = {'VISA':__opened_VISA__,'NIDAQ':__opened_NIDAQ__,'COM':__opened_COM__}
l = list_resources.get(inst.__resource__,None)
if l:
l.remove(inst.__address__)
try:
inst.close()
except:
pass
if name in local_ns:
del local_ns[name]
del __instruments__[name]
@register_line_magic
@needs_local_scope
def closeall(line, local_ns):
"""Close all instruments."""
while __instruments__:
name,inst = __instruments__.popitem()
logger.info('Closing {0}.'.format(name))
list_resources = {'VISA':__opened_VISA__,'NIDAQ':__opened_NIDAQ__,'COM':__opened_COM__}
l = list_resources.get(inst.__resource__,None)
if l:
l.remove(inst.__address__)
try:
inst.close()
except:
pass
if name in local_ns:
del local_ns[name]
del inst
@register_line_magic
@needs_local_scope
def openall(line, local_ns):
"""Load all instruments listed in the pyslave.ini file."""
config = __read_config_instruments__()
err = ''
for k,v in config.items():
if k in __instruments__:
print('{0} is already loaded.'.format(k))
else:
try:
__open__(v['resource'],v['address'],k,v['driver'],local_ns)
except:
err = err + '{0} cannot be loaded\n'.format(k)
print(err)
@register_line_magic
@needs_local_scope
def openGPIB(line, local_ns):
"""Load all GPIB instruments."""
for address in instruments.__visa_rm__.list_resources('GPIB?*::INSTR'):
try:
__open__('VISA',address,None,None,local_ns)
except:
traceback.print_exc(limit=1,file=sys.stdout)
print('Error while opening {0}.'.format(address))
@register_line_magic
def listall(line):
"""List all loaded instruments."""
for k,v in __instruments__.items():
print('{0:10s} : {1} {2}'.format(k, v.__inst_id__, v.__address__))
@register_line_magic
def listVISA(line):
"""List all available VISA instruments."""
instruments.__visa_rm__.__update__()
for address in instruments.__visa_rm__.__list_resources_cached__:
print(address)
@register_line_magic
def resetVISA(line):
"""Reset VISA connection.
Close instruments before running this function"""
instruments.resetVISA()
del listall, openall, openinstr, openGPIB, closeinstr, closeall, listVISA, resetVISA
########################################################
# Scripts launching, pausing, resuming, aborting magic
########################################################
class SlaveError(Exception):
pass
def __replace__(line, add_pause):
line = line.expandtabs(4)
line = line.replace('#draw','thread.draw()')
line = line.replace('#pause','thread.pause()')
if '#abort' in line:
if add_pause and line.strip().startswith('#abort'):
line = line.replace('#abort','thread.pause()') + '\n ' + line.replace('#abort','if thread.stopflag : break')
else:
line = line.replace('#abort','if thread.stopflag : break')
line = line.replace('#looptime(','thread.looptime(')
line = line.replace('#looptime','thread.looptime()')
line = line.replace('#disp', 'thread.display')
line = line.replace('#live(','thread.live(')
return line
def __convert__(filename):
"""Convert a python script so that it can be called by slave.
The converted file is named by appending '_converted' to the filename."""
with open(filename,'r') as f:
script = f.read()
if '#main' not in script:
raise SlaveError('Could not find #main section in {0}'.format(filename))
header, main = [s.strip() for s in script.split('#main',maxsplit=1)]
with io.StringIO() as f:
print('# Auto generated script file',file=f)
print('',file=f)
# Put back header
print(header, file=f)
print('', file=f)
# Create script function
print('# Main script function', file=f)
print('def __slave_script__(thread):', file=f)
add_pause = '#pause' not in main
for l in main.split('\n'):
print(" ",__replace__(l, add_pause), file=f)
output = f.getvalue()
return output
def __findline__(target, filename):
target = target.strip()
i = 0
with open(filename,'r') as f:
for line in f:
i += 1
if line.strip().startswith(target):
msg = ["""File "{0}", line {1}\n""".format(filename, i), line]
break
else:
msg = None
return msg
def __start_slave__(script, filename, local_ns):
"""Start Slave thread with the passed code"""
global __slave_window__
try:
code = compile(script, "Converted " + filename, 'exec')
except:
print('Error while compiling {0}:'.format(filename))
exc_type, exc_value, exc_traceback = sys.exc_info()
msg = traceback.format_exception_only(exc_type, exc_value)
if exc_type is SyntaxError:
res = __findline__(msg[1], filename)
if res is not None:
msg[0] = res[0]
else:
msg = msg[1:]
for s in msg:
print(s, end='')
return
glob = globals()
for k,v in local_ns.items():
if not k.startswith('_'):
glob[k]=v
locals = dict()
try:
exec(code, glob, locals)
except:
print('Error while executing {0}:'.format(filename))
exc_type, exc_value, exc_traceback = sys.exc_info()
for f,l in traceback.walk_tb(exc_traceback):
line = l-1
try:
res = __findline__(script.splitlines()[line], filename)
except:
res = None
if res is not None:
for s in res:
print(s, end='')
for s in traceback.format_exception_only(exc_type, exc_value):
print(s, end='')
return
local_ns.update(locals)
glob.update(locals)
if __slave_window__ is None:
__slave_window__ = SlaveWindow()
__slave__['window'] = __slave_window__
__slave_window__.show()
__slave_window__.thread_start(__slave_script__, script.splitlines())
logger.info('Starting script {0}:\n{1}'.format(filename, script))
@register_line_magic
@needs_local_scope
def call(filename, local_ns):
"""Convert and launch a script in slave."""
if not filename.endswith('.py'):
filename = filename + '.py'
try:
script = __convert__(filename)
except :
#traceback.print_exc(file=sys.stdout)
exc_type, exc_value, exc_traceback = sys.exc_info()
print('Error while converting {0}:'.format(filename))
for s in traceback.format_exception_only(exc_type, exc_value):
print(s)
return
__start_slave__(script, filename, local_ns)
@register_line_magic
def convert(filename):
"""Convert a script and show the result in the console."""
if not filename.endswith('.py'):
filename = filename + '.py'
out = __convert__(filename)
print(out)
@register_line_magic
@needs_local_scope
def monitor(line, local_ns):
"""Monitor the output of an instrument and plot it.
The first argument is the function to monitor.
The second optional argument is the time period of the monitoring.
The default value is 1s.
The results are stored in monitor_out.
Examples:
%monitor dmm1
%monitor dmm1 0.2
%monitor instr1.read()
"""
args = __arg_split__(line)
script = """
import time
from pydata import xy
fig = figure()
monitor_out = xy(x=empty(0), y=empty(0))
t0 = time.time()
def __slave_script__(thread):
while True:
val = {0}
thread.display('Monitored value '+str(val))
monitor_out.append(time.time()-t0, val)
monitor_out.plot(fig)
thread.draw()
time.sleep({1})
thread.pause()
if thread.stopflag : break""".format(args[0] if '(' in args[0] else args[0] + '()',
args[1] if len(args)>1 else 1)
__start_slave__(script, 'monitor', local_ns)
print("Results are stored in monitor_out.")
measure_parameters = OrderedDict([
('iterable' , ''),
('set_function' , 'dcpwr1(x)'),
('set_sleep' , '0'),
('read_function' , 'dmm1()'),
('read_sleep' , '0'),
('plot','y'),
('filename','iv.txt'),
])
text_input = OrderedDict([
('iterable' , 'Parameter values to iterate over'),
('set_function' , 'Set parameter (parameter variable is x)'),
('set_sleep' , 'Sleep (in s)'),
('read_function' , 'Read value'),
('read_sleep' , 'Sleep (in s)'),
('plot' , 'Plot (y/n)'),
('filename' , 'Save to (space for not saving)'),
])
@register_line_magic
@needs_local_scope
def measure(line, local_ns):
"""Measure the output of an instrument and plot it while scanning a parameter.
Results are stored in measure_out."""
if line :
args = __arg_split__(line)
args = dict([ (args[i],args[i+1]) for i in range(0 ,len(args),2)])
measure_parameters.update(args)
else :
print("Press enter to keep previous value. Abort with many q's (qqqq...).")
for k,v in text_input.items():
inp = input('{0} [{1}] : '.format(v, measure_parameters[k]))
if inp.endswith('qqqq') : return
if inp : measure_parameters[k] = inp.strip()
if '(' not in measure_parameters['read_function'] : measure_parameters['read_function']+= '()'
if '(' not in measure_parameters['set_function'] and '=' not in measure_parameters['set_function'] :
measure_parameters['set_function']+= '(x)'
script = """
import time
from pydata import xy
if '{plot}'=='y': fig = figure()
measure_out = xy(x=array({iterable}), y=ones_like(array({iterable}))*nan)
def __slave_script__(thread):
for i,x in enumerate(measure_out.x):
{set_function}
time.sleep({set_sleep})
y = {read_function}
thread.display('Step ' + str(i+1) + '/' + str(len(measure_out.x)))
thread.looptime()
measure_out.y[i] = y
if '{plot}'=='y':
measure_out.plot(fig)
thread.draw()
time.sleep({read_sleep})
thread.pause()
if thread.stopflag : break
if "{filename}" :
measure_out.save("{filename}")
""".format(**measure_parameters)
__start_slave__(script, 'measure', local_ns)
if not line:
print('To quickly start the same measurement, copy paste the line below : ')
print('measure {0}'.format(' '.join(["{0}='{1}'".format(k,v) for k,v in measure_parameters.items()])))
print("Results are stored in measure_out.")
@register_line_magic
def pause(line):
"""Pause the running script."""
if __slave_window__ is None : return
__slave_window__.on_pushButton_Pause_clicked(echo=True)
@register_line_magic
def resume(line):
"""Resume the paused script."""
if __slave_window__ is None : return
__slave_window__.on_pushButton_Resume_clicked(echo=True)
@register_line_magic
def abort(line):
"""Abort the running script."""
if __slave_window__ is None : return
__slave_window__.on_pushButton_Abort_clicked(echo=True)
@register_line_magic
def kill(line):
"""Kill the running script."""
if __slave_window__ is None : return
__slave_window__.on_pushButton_Kill_clicked(echo=True)
@register_line_magic
def window(line):
"""Show the slave window."""
global __slave_window__
if __slave_window__ is None :
__slave_window__ = SlaveWindow()
__slave__['window'] = __slave_window__
__slave_window__.show()
@register_line_magic
@needs_local_scope
def capture(line, local_ns):
args = __arg_split__(line)
# First argument
func = args[0] if '(' in args[0] else args[0].strip() + '()'
# Second argument
filename = str(args[1]) if len(args)>1 else None
# Optional extra arguments
param = eval('dict({0})'.format(','.join(args[2:])))
# Fetch data
data = eval(func, globals(), local_ns)
# Plot data
exec("capture_fig = figure()", globals(), local_ns)
data.plot(local_ns['capture_fig'])
exec("capture_fig.show()", globals(), local_ns)
local_ns['capture_out'] = data
# Save data to file
if filename :
msg = data.save(filename, **param)
else:
print("Data are stored in capture_out. Figure is capture_fig.")
del call, convert, window, pause, resume, abort, kill, monitor, measure, capture
| mit | -9,023,431,940,506,280,000 | 34.129371 | 134 | 0.57659 | false |
EricssonResearch/calvin-base | calvin/runtime/south/transports/lib/twisted/base_transport.py | 1 | 4660 | # -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from calvin.utilities.calvin_callback import CalvinCBClass
from calvin.utilities import calvinlogger
_log = calvinlogger.get_logger(__name__)
class CalvinServerBase(CalvinCBClass):
"""
BaseServerClass for implementing calvinip servers in diffrent frameworks
Callbacks in the API
self._callback_execute('server_started', port)
Called when the server have started listening on the port
port is an integer port number of the lisening server
self._callback_execute('server_stopped')
Called when the server have stopped listening on the port
self._callback_execute('client_connected', uri, proto)
Called when a client is connected
uri is the uri that the client connected has example "calvinip://127.0.0.1:78445"
proto is the protocol to be sent to the CalvinTransportBase, can be none
"""
def __init__(self, iface='', port=0, callbacks=None, *args, **kwargs):
"""
iface The interface to listen on defaults to all
port The port to listen on defaults to system generated
This port should be returned in the server_started callback
callbacks The callbacks subscribed on this class
"""
super(CalvinServerBase, self).__init__(callbacks, callback_valid_names=['server_started',
'server_stopped',
'client_connected'])
def start(self):
"""
Called when the server transport is started
"""
raise NotImplementedError()
def stop(self):
"""
Called when the server transport is stopped
"""
raise NotImplementedError()
def is_listening(self):
"""
returns if the server is listening
"""
raise NotImplementedError()
class CalvinTransportBase(CalvinCBClass):
"""
BaseTransport for implementing calvinip transports in diffrent frameworks
self._callback_execute('connected')
Called when the client is connected
self._callback_execute('disconnected', reason)
Called when the client disconnects
reason the a string desribing the reason for disconnecting
(normal, error, ..)
self._callback_execute('connection_failed', reason)
Called when the connection fails
reason the a string desribing the reason for disconnecting
(normal, error, ..)
self._callback_execute('data', data)
Called when we have raw data in the transport.
Always an entire package
"""
def __init__(self, host, port, callbacks=None, proto=None, *args, **kwargs):
"""
host The host address of the client
port The port to connect to, 0 means system allocated
callback callbacks is a set of callbacks that the client wants
proto Can be sent in here if its a connecting client from a server instance
"""
self._rtt = None
super(CalvinTransportBase, self).__init__(callbacks, callback_valid_names=['connected', 'disconnected', 'connection_failed', 'data'])
def is_connected(self):
"""
returns True if the transport is connected
"""
raise NotImplementedError()
def disconnect(self):
"""
Used for disconnecting the client
"""
raise NotImplementedError()
def send(self, data):
"""
Used for sending data to the client
data Is raw data one package
"""
raise NotImplementedError()
def join(self):
"""
Called when the client should connect
"""
raise NotImplementedError()
def get_rtt(self):
return self._rtt
| apache-2.0 | 6,143,106,125,151,566,000 | 35.692913 | 141 | 0.600858 | false |
ToonTownInfiniteRepo/ToontownInfinite | toontown/building/DistributedBuildingMgrAI.py | 1 | 9120 | from direct.directnotify.DirectNotifyGlobal import *
from otp.ai.AIBaseGlobal import *
from toontown.building import DistributedBuildingAI
from toontown.building import GagshopBuildingAI
from toontown.building import HQBuildingAI
from toontown.building import KartShopBuildingAI
from toontown.building import PetshopBuildingAI
from toontown.hood import ZoneUtil
# from toontown.building import DistributedAnimBuildingAI
class DistributedBuildingMgrAI:
notify = directNotify.newCategory('DistributedBuildingMgrAI')
def __init__(self, air, branchId, dnaStore, trophyMgr):
self.air = air
self.branchId = branchId
self.canonicalBranchId = ZoneUtil.getCanonicalZoneId(self.branchId)
self.dnaStore = dnaStore
self.trophyMgr = trophyMgr
self.__buildings = {}
self.findAllLandmarkBuildings()
def cleanup(self):
for building in self.__buildings.values():
building.cleanup()
self.__buildings = {}
def isValidBlockNumber(self, blockNumber):
return blockNumber in self.__buildings
def isSuitBlock(self, blockNumber):
if not self.isValidBlockNumber(blockNumber):
return False
return self.__buildings[blockNumber].isSuitBlock()
def getSuitBlocks(self):
blocks = []
for blockNumber, building in self.__buildings.items():
if building.isSuitBlock():
blocks.append(blockNumber)
return blocks
def getEstablishedSuitBlocks(self):
blocks = []
for blockNumber, building in self.__buildings.items():
if building.isEstablishedSuitBlock():
blocks.append(blockNumber)
return blocks
def getToonBlocks(self):
blocks = []
for blockNumber, building in self.__buildings.items():
if isinstance(building, HQBuildingAI.HQBuildingAI):
continue
if isinstance(building, GagshopBuildingAI.GagshopBuildingAI):
continue
if isinstance(building, PetshopBuildingAI.PetshopBuildingAI):
continue
if isinstance(building, KartShopBuildingAI.KartShopBuildingAI):
continue
if not building.isSuitBlock():
blocks.append(blockNumber)
return blocks
def getBuildings(self):
return self.__buildings.values()
def getFrontDoorPoint(self, blockNumber):
if self.isValidBlockNumber(blockNumber):
return self.__buildings[blockNumber].getFrontDoorPoint()
def getBuildingTrack(self, blockNumber):
if self.isValidBlockNumber(blockNumber):
return self.__buildings[blockNumber].track
def getBuilding(self, blockNumber):
if self.isValidBlockNumber(blockNumber):
return self.__buildings[blockNumber]
def setFrontDoorPoint(self, blockNumber, point):
if self.isValidBlockNumber(blockNumber):
return self.__buildings[blockNumber].setFrontDoorPoint(point)
def getDNABlockLists(self):
blocks = []
hqBlocks = []
gagshopBlocks = []
petshopBlocks = []
kartshopBlocks = []
animBldgBlocks = []
for i in xrange(self.dnaStore.getNumBlockNumbers()):
blockNumber = self.dnaStore.getBlockNumberAt(i)
buildingType = self.dnaStore.getBlockBuildingType(blockNumber)
if buildingType == 'hq':
hqBlocks.append(blockNumber)
elif buildingType == 'gagshop':
gagshopBlocks.append(blockNumber)
elif buildingType == 'petshop':
petshopBlocks.append(blockNumber)
elif buildingType == 'kartshop':
kartshopBlocks.append(blockNumber)
elif buildingType == 'animbldg':
animBldgBlocks.append(blockNumber)
else:
blocks.append(blockNumber)
return (blocks, hqBlocks, gagshopBlocks, petshopBlocks, kartshopBlocks,
animBldgBlocks)
def findAllLandmarkBuildings(self):
backups = simbase.backups.load('blockinfo', (self.air.districtId, self.branchId), default={})
(blocks, hqBlocks, gagshopBlocks, petshopBlocks, kartshopBlocks,
animBldgBlocks) = self.getDNABlockLists()
for blockNumber in blocks:
self.newBuilding(blockNumber, backup=backups.get(blockNumber, None))
for blockNumber in animBldgBlocks:
self.newAnimBuilding(blockNumber, backup=backups.get(blockNumber, None))
for blockNumber in hqBlocks:
self.newHQBuilding(blockNumber)
for blockNumber in gagshopBlocks:
self.newGagshopBuilding(blockNumber)
for block in petshopBlocks:
self.newPetshopBuilding(block)
for block in kartshopBlocks:
self.newKartShopBuilding(block)
def newBuilding(self, blockNumber, backup=None):
building = DistributedBuildingAI.DistributedBuildingAI(
self.air, blockNumber, self.branchId, self.trophyMgr)
building.generateWithRequired(self.branchId)
if backup is not None:
state = backup.get('state', 'toon')
if ((state == 'suit') and simbase.air.wantCogbuildings) or (
(state == 'cogdo') and simbase.air.wantCogdominiums):
building.track = backup.get('track', 'c')
building.difficulty = backup.get('difficulty', 1)
building.numFloors = backup.get('numFloors', 1)
building.updateSavedBy(backup.get('savedBy'))
building.becameSuitTime = backup.get('becameSuitTime', time.mktime(time.gmtime()))
if (state == 'suit') and simbase.air.wantCogbuildings:
building.setState('suit')
elif (state == 'cogdo') and simbase.air.wantCogdominiums:
building.setState('cogdo')
else:
building.setState('toon')
else:
building.setState('toon')
else:
building.setState('toon')
self.__buildings[blockNumber] = building
return building
def newAnimBuilding(self, blockNumber, backup=None):
return self.newBuilding(blockNumber, backup=backup)
def newHQBuilding(self, blockNumber):
dnaStore = self.air.dnaStoreMap[self.canonicalBranchId]
exteriorZoneId = dnaStore.getZoneFromBlockNumber(blockNumber)
exteriorZoneId = ZoneUtil.getTrueZoneId(exteriorZoneId, self.branchId)
interiorZoneId = (self.branchId - (self.branchId%100)) + 500 + blockNumber
building = HQBuildingAI.HQBuildingAI(
self.air, exteriorZoneId, interiorZoneId, blockNumber)
self.__buildings[blockNumber] = building
return building
def newGagshopBuilding(self, blockNumber):
dnaStore = self.air.dnaStoreMap[self.canonicalBranchId]
exteriorZoneId = dnaStore.getZoneFromBlockNumber(blockNumber)
exteriorZoneId = ZoneUtil.getTrueZoneId(exteriorZoneId, self.branchId)
interiorZoneId = (self.branchId - (self.branchId%100)) + 500 + blockNumber
building = GagshopBuildingAI.GagshopBuildingAI(
self.air, exteriorZoneId, interiorZoneId, blockNumber)
self.__buildings[blockNumber] = building
return building
def newPetshopBuilding(self, blockNumber):
dnaStore = self.air.dnaStoreMap[self.canonicalBranchId]
exteriorZoneId = dnaStore.getZoneFromBlockNumber(blockNumber)
exteriorZoneId = ZoneUtil.getTrueZoneId(exteriorZoneId, self.branchId)
interiorZoneId = (self.branchId - (self.branchId%100)) + 500 + blockNumber
building = PetshopBuildingAI.PetshopBuildingAI(
self.air, exteriorZoneId, interiorZoneId, blockNumber)
self.__buildings[blockNumber] = building
return building
def newKartShopBuilding(self, blockNumber):
dnaStore = self.air.dnaStoreMap[self.canonicalBranchId]
exteriorZoneId = dnaStore.getZoneFromBlockNumber(blockNumber)
exteriorZoneId = ZoneUtil.getTrueZoneId(exteriorZoneId, self.branchId)
interiorZoneId = (self.branchId - (self.branchId%100)) + 500 + blockNumber
building = KartShopBuildingAI.KartShopBuildingAI(
self.air, exteriorZoneId, interiorZoneId, blockNumber)
self.__buildings[blockNumber] = building
return building
def save(self):
buildings = {}
for blockNumber in self.getSuitBlocks():
building = self.getBuilding(blockNumber)
backup = {
'state': building.fsm.getCurrentState().getName(),
'block': building.block,
'track': building.track,
'difficulty': building.difficulty,
'numFloors': building.numFloors,
'savedBy': building.savedBy,
'becameSuitTime': building.becameSuitTime
}
buildings[blockNumber] = backup
simbase.backups.save('blockinfo', (self.air.districtId, self.branchId), buildings)
| mit | 4,311,047,661,063,541,000 | 42.428571 | 101 | 0.651645 | false |
fernandolobato/balarco | works/serializers.py | 1 | 4427 | from rest_framework import serializers
from . import models
from clients import serializers as client_serializers
from users import serializers as user_serializers
class WorkTypeSerializer(serializers.ModelSerializer):
name = serializers.CharField(read_only=True)
class Meta:
model = models.WorkType
fields = ('id', 'work_type_id', 'name',)
class ArtTypeSerializer(serializers.ModelSerializer):
class Meta:
model = models.ArtType
fields = ('id', 'work_type', 'name',)
class ArtIgualaSerializer(serializers.ModelSerializer):
art_type_name = serializers.CharField(source='art_type.name', read_only=True)
class Meta:
model = models.ArtIguala
fields = ('id', 'iguala', 'art_type', 'quantity', 'art_type_name')
class IgualaSerializer(serializers.ModelSerializer):
client_complete = client_serializers.ClientSerializer(source='client', read_only=True)
art_iguala = ArtIgualaSerializer(many=True, read_only=True)
class Meta:
model = models.Iguala
fields = ('id', 'client', 'client_complete', 'name', 'start_date', 'end_date',
'art_iguala',)
class StatusSerializer(serializers.ModelSerializer):
name = serializers.CharField(source='__str__', read_only=True)
class Meta:
model = models.Status
fields = ('id', 'status_id', 'name',)
class ArtWorkSerializer(serializers.ModelSerializer):
art_type_complete = ArtTypeSerializer(source='art_type', read_only=True)
class Meta:
model = models.ArtWork
fields = ('id', 'work', 'art_type', 'quantity', 'art_type_complete')
class FileSerializer(serializers.ModelSerializer):
class Meta:
model = models.File
fields = ('id', 'work', 'upload',)
class WorkDesignerSerializer(serializers.ModelSerializer):
start_date = serializers.DateTimeField(read_only=True)
end_date = serializers.DateTimeField(read_only=True)
designer_name = serializers.CharField(source='designer.first_name', read_only=True)
designer_last_name = serializers.CharField(source='designer.last_name', read_only=True)
class Meta:
model = models.WorkDesigner
fields = ('id', 'designer', 'designer_name', 'designer_last_name', 'work',
'start_date', 'end_date', 'active_work',)
class StatusChangeSerializer(serializers.ModelSerializer):
date = serializers.DateTimeField(read_only=True)
class Meta:
model = models.StatusChange
fields = ('id', 'work', 'status', 'user', 'date',)
class WorkSerializer(serializers.ModelSerializer):
creation_date = serializers.DateField(read_only=True)
executive_complete = user_serializers.UserSerializer(source='executive', read_only=True)
contact_complete = client_serializers.ContactSerializer(source='contact', read_only=True)
current_status_complete = StatusSerializer(source='current_status', read_only=True)
work_type_complete = WorkTypeSerializer(source='work_type', read_only=True)
iguala_complete = IgualaSerializer(source='iguala', read_only=True)
art_works = ArtWorkSerializer(many=True, read_only=True)
files = FileSerializer(many=True, read_only=True)
work_designers = WorkDesignerSerializer(many=True, read_only=True)
status_changes = StatusChangeSerializer(many=True, read_only=True)
class Meta:
model = models.Work
fields = ('id',
'executive',
'executive_complete',
'contact',
'contact_complete',
'current_status',
'current_status_complete',
'work_type',
'work_type_complete',
'iguala',
'iguala_complete',
'creation_date',
'name',
'expected_delivery_date',
'brief',
'final_link',
'art_works',
'files',
'work_designers',
'status_changes'
)
class NotificationSerializer(serializers.ModelSerializer):
# work_complete = WorkSerializer(source='work', read_only=True)
# user_complete = user_serializers.UserSerializer(source='user', read_only=True)
class Meta:
model = models.Notification
fields = ('id', 'work', 'user', 'date', 'text', 'seen')
| mit | -4,358,667,984,826,051,600 | 31.313869 | 93 | 0.635871 | false |
MarsZone/DreamLand | muddery/utils/readers.py | 1 | 3270 | """
This module parse data files to lines.
"""
from __future__ import print_function
import csv
import codecs
try:
import xlrd
except ImportError:
xlrd = None
class DataReader(object):
"""
Game data file reader.
"""
types = None
def __init__(self, filename = None):
"""
Args:
filename: (String) data file's name.
Returns:
None
"""
self.filename = filename
def __iter__(self):
return self
def next(self):
return self.readln()
def readln(self):
"""
Read data line.
Returns:
list: data line
"""
# No data.
raise StopIteration
class CSVReader(DataReader):
"""
CSV file's reader.
"""
types = ("csv",)
def __init__(self, filename=None):
"""
Args:
filename: (String) data file's name.
Returns:
None
"""
super(CSVReader, self).__init__(filename)
self.reader = None
if filename:
csvfile = open(filename, 'r')
# test BOM
head = csvfile.read(len(codecs.BOM_UTF8))
if head != codecs.BOM_UTF8:
# read from beginning
csvfile.seek(0)
self.reader = csv.reader(csvfile)
def readln(self):
"""
Read data line.
Returns:
list: data line
"""
if not self.reader:
raise StopIteration
# Read line.
return self.reader.next()
class XLSReader(DataReader):
"""
XLS/XLSX file's reader.
"""
types = ("xls", "xlsx")
def __init__(self, filename=None):
"""
Args:
filename: (String) data file's name.
Returns:
None
"""
super(XLSReader, self).__init__(filename)
if not xlrd:
print('**********************************************************')
print('You need to install "xlrd" first to import xls/xlsx files!')
print('You can use "pip install xlrd" to install it! ')
print('**********************************************************')
return
# load file
self.sheet = None
self.row_pos = 0
if filename:
book = xlrd.open_workbook(filename)
self.sheet = book.sheet_by_index(0)
def readln(self):
"""
Read data line.
Returns:
list: data line
"""
if not self.sheet:
raise StopIteration
if self.row_pos >= self.sheet.nrows:
raise StopIteration
# Read line.
pos = self.row_pos
self.row_pos += 1
return self.sheet.row_values(pos)
all_readers = [CSVReader, XLSReader]
def get_readers():
"""
Get all available writers.
Returns:
list: available writers
"""
return all_readers
reader_dict = {type: reader for reader in all_readers for type in reader.types}
def get_reader(reader_type):
"""
Get a reader by reader's type.
Args:
type: (String) reader's type.
Returns:
reader
"""
return reader_dict.get(reader_type, None) | bsd-3-clause | 2,917,408,387,520,421,400 | 19.31677 | 79 | 0.486544 | false |
ZeitOnline/zeit.edit | src/zeit/edit/browser/tests/test_form.py | 1 | 8030 | from mock import Mock
import zeit.cms.content.interfaces
import zeit.cms.testing
import zeit.edit.browser.form
import zeit.edit.browser.view
import zeit.edit.testing
import zope.formlib.form
import zope.interface
import zope.publisher.browser
import zope.schema
class IExample(zope.interface.Interface):
foo = zope.schema.TextLine(title=u'foo')
class WidgetCSSMixin(zeit.cms.testing.ZeitCmsTestCase):
# XXX This test should be moved to zeit.cms.browser, but it seems nearly
# impossible to instantiate an EditForm, so we punt on this for now;
# InlineForms are friendlier (since they don't pull in the
# main_template.pt)
def render_form(self, form_class):
ANY_CONTEXT = Mock()
zope.interface.alsoProvides(ANY_CONTEXT, IExample)
request = zope.publisher.browser.TestRequest()
form = form_class(ANY_CONTEXT, request)
return form()
def test_css_class_on_widget_is_rendered_to_html(self):
class ExampleForm(zeit.edit.browser.form.InlineForm):
form_fields = zope.formlib.form.FormFields(IExample)
legend = 'Legend'
def setUpWidgets(self):
super(ExampleForm, self).setUpWidgets()
self.widgets['foo'].vivi_css_class = 'barbaz qux'
self.assertEllipsis("""\
...<div class="field fieldname-foo required fieldtype-text barbaz qux">
...<div class="label">...""", self.render_form(ExampleForm))
def test_widget_without_css_class_does_not_break(self):
class ExampleForm(zeit.edit.browser.form.InlineForm):
form_fields = zope.formlib.form.FormFields(IExample)
legend = 'Legend'
self.assertEllipsis("""\
...<div class="field fieldname-foo required fieldtype-text">
...<div class="label">...""", self.render_form(ExampleForm))
class FoldableFormGroup(zeit.edit.testing.FunctionalTestCase):
def render(self, in_workingcopy,
folded_workingcopy=False, folded_repository=False):
class ExampleForm(zeit.edit.browser.form.FoldableFormGroup):
title = 'Example'
if folded_workingcopy is not None:
ExampleForm.folded_workingcopy = folded_workingcopy
if folded_repository is not None:
ExampleForm.folded_repository = folded_repository
context = Mock()
if in_workingcopy:
zope.interface.alsoProvides(
context, zeit.cms.checkout.interfaces.ILocalContent)
request = zope.publisher.browser.TestRequest()
zope.interface.alsoProvides(
request, zeit.cms.browser.interfaces.ICMSLayer)
form = ExampleForm(context, request, Mock(), Mock())
return form()
def test_setting_folded_workingcopy_renders_css_class(self):
self.assertEllipsis(
'...folded...', self.render(
in_workingcopy=True,
folded_workingcopy=True))
self.assertNotIn(
'...folded...', self.render(
in_workingcopy=False,
folded_workingcopy=True, folded_repository=False))
def test_setting_folded_repository_renders_css_class(self):
self.assertEllipsis(
'...folded...', self.render(
in_workingcopy=False,
folded_repository=True))
self.assertNotIn(
'...folded...', self.render(
in_workingcopy=True,
folded_repository=True))
def test_default_for_workingcopy_is_folded(self):
self.assertEllipsis(
'...folded...', self.render(
in_workingcopy=True,
folded_workingcopy=None, folded_repository=None))
def test_default_for_repository_is_folded(self):
self.assertEllipsis(
'...folded...', self.render(
in_workingcopy=False,
folded_workingcopy=None, folded_repository=None))
class InlineEditForm(zeit.edit.browser.form.InlineForm):
legend = ''
prefix = 'edit'
form_fields = zope.formlib.form.FormFields(
zeit.cms.content.interfaces.ICommonMetadata).select(
'supertitle', 'subtitle')
class LightboxEditForm(zeit.edit.browser.view.EditBox):
form_fields = zope.formlib.form.FormFields(
zeit.cms.content.interfaces.ICommonMetadata).select(
'supertitle', 'subtitle')
class InlineFormAutoSaveTest(zeit.edit.testing.SeleniumTestCase):
def setUp(self):
super(InlineFormAutoSaveTest, self).setUp()
with zeit.cms.testing.site(None):
zope.configuration.xmlconfig.string("""\
<?xml version="1.0" encoding="UTF-8" ?>
<configure
package="zeit.edit.browser.tests"
xmlns:browser="http://namespaces.zope.org/browser">
<include package="zope.browserpage" file="meta.zcml" />
<browser:page
for="zeit.cms.content.interfaces.ICommonMetadata"
layer="zeit.cms.browser.interfaces.ICMSLayer"
name="edit-inline.html"
class=".test_form.InlineEditForm"
permission="zeit.EditContent"
/>
<browser:page
for="zeit.cms.content.interfaces.ICommonMetadata"
layer="zeit.cms.browser.interfaces.ICMSLayer"
name="edit-lightbox.html"
class=".test_form.LightboxEditForm"
permission="zeit.EditContent"
/>
<browser:page
for="zeit.cms.content.interfaces.ICommonMetadata"
layer="zeit.cms.browser.interfaces.ICMSLayer"
name="inlineform"
template="inlineform.pt"
permission="zeit.EditContent"
/>
<browser:page
for="zeit.cms.content.interfaces.ICommonMetadata"
layer="zeit.cms.browser.interfaces.ICMSLayer"
name="inlineform-nested"
template="inlineform-nested.pt"
permission="zeit.EditContent"
/>
<browser:page
for="zeit.cms.content.interfaces.ICommonMetadata"
layer="zeit.cms.browser.interfaces.ICMSLayer"
name="inlineform-lightbox"
template="inlineform-lightbox.pt"
permission="zeit.EditContent"
/>
</configure>
""")
def tearDown(self):
# XXX plone.testing.zca.pushGlobalRegistry() doesn't work,
# the view is not found.
with zeit.cms.testing.site(None):
zope.component.getSiteManager().unregisterAdapter(
required=(zeit.cms.content.interfaces.ICommonMetadata,
zeit.cms.browser.interfaces.ICMSLayer),
provided=zope.interface.Interface,
name='autosave-edit')
super(InlineFormAutoSaveTest, self).tearDown()
def test_submits_form_on_focusout(self):
s = self.selenium
self.open('/repository/testcontent/@@checkout')
# XXX @@checkout?came_from=@@autosave-edit does not work
self.open('/workingcopy/zope.user/testcontent/@@inlineform')
input = 'edit.subtitle'
s.waitForElementPresent(input)
s.type(input, 'asdf')
s.click('header')
s.waitForElementNotPresent('css=.field.dirty')
# Re-open the page and verify that the data is still there
s.refresh()
s.waitForElementPresent(input)
s.assertValue(input, 'asdf')
def test_nested_inlineform_only_submits_inner_form(self):
s = self.selenium
self.open('/repository/testcontent/@@checkout')
self.open('/workingcopy/zope.user/testcontent/@@inlineform-nested')
input = 'edit.subtitle'
s.waitForElementPresent(input)
self.eval('zeit.cms.InlineForm.submitted = 0;')
self.eval("""zeit.cms.InlineForm.prototype.submit = function() {
zeit.cms.InlineForm.submitted += 1; }""")
s.type(input, 'asdf')
s.click('header')
self.assertEqual(1, self.eval('zeit.cms.InlineForm.submitted'))
def test_subpageform_in_lightbox_submits_correctly(self):
s = self.selenium
self.open('/repository/testcontent/@@checkout')
self.open('/workingcopy/zope.user/testcontent/@@inlineform-lightbox')
input = 'form.subtitle'
s.waitForElementPresent(input)
s.click('id=form.actions.apply')
s.waitForElementNotPresent(input)
| bsd-3-clause | 1,862,386,785,870,568,000 | 33.612069 | 77 | 0.653176 | false |
philouc/pyhrf | python/pyhrf/test/test_sandbox_parcellation.py | 1 | 63911 | import os.path as op
import unittest
import numpy as np
import pyhrf
from numpy.testing import assert_array_equal, assert_array_almost_equal, assert_almost_equal, assert_array_less, assert_equal
import shutil
import pyhrf.graph as pgraph
import pyhrf.sandbox.parcellation as pm
from pyhrf.tools import assert_file_exists
from scipy.sparse import cs_graph_components
import math
from pyhrf.ndarray import expand_array_in_mask, xndarray, MRI3Daxes
from pyhrf import Condition
import pyhrf.boldsynth.scenarios as simu
from pyhrf.tools import Pipeline
from pyhrf.core import FmriData
from pyhrf.parcellation import parcellation_dist
# launch all the tests in here:
# pyhrf_maketests -v test_sandbox_parcellation
# to see real data:
# anatomist cortex_occipital_* hrf_territory
# anatomist cortex_occipital_*
def my_func_to_test(p, output_path):
return p
class StatTest(unittest.TestCase):
def setUp(self):
pass
def test_norm_bc(self):
a = np.array([[1.,2.],
[2.,4.],
[5.,10.]])
b = np.array([.5,.5])
expected_norm = np.array([np.linalg.norm(x-b)**2 for x in a])
assert_almost_equal(pm.norm2_bc(a,b), expected_norm)
def test_gmm_known_weights_simu_1D(self):
"""
Test biGMM fit with known post weights, from biGMM samples (no noise)
1D case.
"""
plot = False
pyhrf.verbose.set_verbosity(0)
np.random.seed(2354) #make reproducible results
# biGMM parameters:
mu0 = np.array([2., 5., 10.])
v0 = 3.
mu1 = np.array([12., 15., 20.])
v1 = 3.
l = .3 #weight for component 1
# generate samples from biGMM:
nsamples = 10000
samples = np.random.randn(nsamples) * v0**.5 + mu0[:,np.newaxis]
card_c1 = int(l*nsamples)
samples[:, :card_c1] = np.random.randn(card_c1) * v1**.5 + \
mu1[:, np.newaxis]
samples = samples.T
# compute sample-specific posterior weights:
nfeats = len(mu0)
print nfeats
post_weights = 1 / (1 + (1-l)/l * (v0/v1)**(-nfeats/2.) * \
np.exp(-(pm.norm2_bc(samples,mu0)/v0 - \
pm.norm2_bc(samples,mu1)/v1)/2.))
# fit GMM knowing post weights:
mu0_est, mu1_est, v0_est, v1_est, l_est, llh = \
pm.informedGMM_MV(samples, post_weights)
# Some outputs:
mus = np.array([[mu0,mu1], [mu0_est, mu1_est]]).transpose()
vs = np.array([[v0,v1], [v0_est, v1_est]]).T
ls = np.array([[1-l,l], [1-l_est, l_est]]).T
from pyhrf.tools import get_2Dtable_string
pyhrf.verbose(1, 'means:')
for f in xrange(samples.shape[1]):
pyhrf.verbose(1, get_2Dtable_string(mus[f,:,:], ['C0','C1'],
['true', 'estim']))
pyhrf.verbose(1, 'vars:')
pyhrf.verbose(1, get_2Dtable_string(vs, ['C0','C1'], ['true', 'estim']))
pyhrf.verbose(1, 'weights:')
pyhrf.verbose(1, get_2Dtable_string(ls, ['C0','C1'], ['true', 'estim']))
if plot:
import matplotlib.pyplot as plt
from pyhrf.plot import plot_gaussian_mixture
f = 1 #check this feature
# plt.vlines(samples[:,f], 0, post_weights, 'r')
# plt.vlines(samples[:,f], 0, 1-post_weights, 'b')
plot_gaussian_mixture(np.array([[mu0[f],mu1[f]], [v0,v1]]),
props=[1-l,l], color='r')
plot_gaussian_mixture(np.array([[mu0_est[f], mu1_est[f]],
[v0_est, v1_est]]),
props=[1-l_est, l_est], color='b')
plt.hist(samples[:,f], color='g',normed=True)
plt.show()
assert_array_almost_equal([mu0_est, mu1_est], [mu0,mu1], decimal=1)
assert_array_almost_equal([v0_est, v1_est], [v0,v1], decimal=1)
assert_array_almost_equal([1-l_est,l_est], [1-l,l], decimal=1)
def test_gmm_known_weights_difvars_noise(self):
"""
Test biGMM fit with known post weights, from biGMM samples (no noise)
1D case.
"""
plot = False
pyhrf.verbose.set_verbosity(0)
np.random.seed(2354) #make reproducible results
# biGMM parameters:
mu0 = np.array([2., 5., 10.])
v0 = 3.
mu1 = np.array([12., 15., 20.])
v1 = v0/50.
l = .3 #weight for component 1
v_noise = 1.5
# generate samples from biGMM:
nsamples = 10000
samples = np.random.randn(nsamples) * v0**.5 + mu0[:,np.newaxis]
card_c1 = int(l*nsamples)
samples[:, :card_c1] = np.random.randn(card_c1) * v1**.5 + \
mu1[:, np.newaxis]
samples = samples.T
noise = np.random.randn(samples.shape[0], samples.shape[1]) * v_noise
#print 'noise shape = ', str(noise.shape)
noisy_samples = samples + noise
# compute sample-specific posterior weights:
nfeats = len(mu0)
print nfeats
post_weights = 1 / (1 + (1-l)/l * (v0/v1)**(-nfeats/2.) * \
np.exp(-(pm.norm2_bc(samples,mu0)/v0 - \
pm.norm2_bc(samples,mu1)/v1)/2.))
# fit GMM knowing post weights:
mu0_est, mu1_est, v0_est, v1_est, l_est, llh = \
pm.informedGMM_MV(noisy_samples, post_weights)
# Some outputs:
mus = np.array([[mu0,mu1], [mu0_est, mu1_est]]).transpose()
vs = np.array([[v0,v1], [v0_est, v1_est]]).T
ls = np.array([[1-l,l], [1-l_est, l_est]]).T
from pyhrf.tools import get_2Dtable_string
pyhrf.verbose(1, 'means:')
for f in xrange(samples.shape[1]):
pyhrf.verbose(1, get_2Dtable_string(mus[f,:,:], ['C0','C1'],
['true', 'estim']))
pyhrf.verbose(1, 'vars:')
pyhrf.verbose(1, get_2Dtable_string(vs, ['C0','C1'], ['true', 'estim']))
pyhrf.verbose(1, 'weights:')
pyhrf.verbose(1, get_2Dtable_string(ls, ['C0','C1'], ['true', 'estim']))
if plot:
import matplotlib.pyplot as plt
from pyhrf.plot import plot_gaussian_mixture
f = 1 #check this feature
# plt.vlines(samples[:,f], 0, post_weights, 'r')
# plt.vlines(samples[:,f], 0, 1-post_weights, 'b')
plot_gaussian_mixture(np.array([[mu0[f],mu1[f]], [v0,v1]]),
props=[1-l,l], color='r')
plot_gaussian_mixture(np.array([[mu0_est[f], mu1_est[f]],
[v0_est, v1_est]]),
props=[1-l_est, l_est], color='b')
plt.hist(samples[:,f], color='g',normed=True)
plt.show()
assert_array_almost_equal([mu0_est, mu1_est], [mu0,mu1], decimal=1)
assert_array_almost_equal([v0_est, v1_est], [v0,v1], decimal=1)
assert_array_almost_equal([1-l_est,l_est], [1-l,l], decimal=1)
def test_gmm_known_weights_difvars_noisea(self):
"""
Test biGMM fit with known post weights, from biGMM samples (no noise)
1D case.
"""
plot = False
pyhrf.verbose.set_verbosity(0)
np.random.seed(2354) #make reproducible results
# biGMM parameters:
mu0 = np.array([2., 5., 10.])
v0 = 3.
mu1 = np.array([12., 15., 20.])
v1 = v0/50.
l = .3 #weight for component 1
v_noise = 1.5
# generate samples from biGMM:
nsamples = 10000
samples = np.random.randn(nsamples) * v0**.5 + mu0[:,np.newaxis]
card_c1 = int(l*nsamples)
samples[:, :card_c1] = np.random.randn(card_c1) * v1**.5 + \
mu1[:, np.newaxis]
samples = samples.T
noise = np.random.randn(samples.shape[0], samples.shape[1]) * v_noise
#print 'noise shape = ', str(noise.shape)
noisy_samples = samples + noise
# compute sample-specific posterior weights:
nfeats = len(mu0)
print nfeats
post_weights = 1 / (1 + (1-l)/l * (v0/v1)**(-nfeats/2.) * \
np.exp(-(pm.norm2_bc(noisy_samples,mu0)/v0 - \
pm.norm2_bc(noisy_samples,mu1)/v1)/2.))
# fit GMM knowing post weights:
mu0_est, mu1_est, v0_est, v1_est, l_est, llh = \
pm.informedGMM_MV(samples, post_weights)
# Some outputs:
mus = np.array([[mu0,mu1], [mu0_est, mu1_est]]).transpose()
vs = np.array([[v0,v1], [v0_est, v1_est]]).T
ls = np.array([[1-l,l], [1-l_est, l_est]]).T
from pyhrf.tools import get_2Dtable_string
pyhrf.verbose(1, 'means:')
for f in xrange(samples.shape[1]):
pyhrf.verbose(1, get_2Dtable_string(mus[f,:,:], ['C0','C1'],
['true', 'estim']))
pyhrf.verbose(1, 'vars:')
pyhrf.verbose(1, get_2Dtable_string(vs, ['C0','C1'], ['true', 'estim']))
pyhrf.verbose(1, 'weights:')
pyhrf.verbose(1, get_2Dtable_string(ls, ['C0','C1'], ['true', 'estim']))
if plot:
import matplotlib.pyplot as plt
from pyhrf.plot import plot_gaussian_mixture
f = 1 #check this feature
# plt.vlines(samples[:,f], 0, post_weights, 'r')
# plt.vlines(samples[:,f], 0, 1-post_weights, 'b')
plot_gaussian_mixture(np.array([[mu0[f],mu1[f]], [v0,v1]]),
props=[1-l,l], color='r')
plot_gaussian_mixture(np.array([[mu0_est[f], mu1_est[f]],
[v0_est, v1_est]]),
props=[1-l_est, l_est], color='b')
plt.hist(samples[:,f], color='g',normed=True)
plt.show()
assert_array_almost_equal([mu0_est, mu1_est], [mu0,mu1], decimal=1)
assert_array_almost_equal([v0_est, v1_est], [v0,v1], decimal=1)
assert_array_almost_equal([1-l_est,l_est], [1-l,l], decimal=1)
def test_gmm_known_weights_noisea(self):
"""
Test biGMM fit with known post weights, from biGMM samples (no noise)
1D case.
"""
plot = False
pyhrf.verbose.set_verbosity(0)
np.random.seed(2354) #make reproducible results
# biGMM parameters:
mu0 = np.array([2., 5., 10.])
v0 = 3.
mu1 = np.array([12., 15., 20.])
v1 = 3.
l = .3 #weight for component 1
v_noise = 1.5
# generate samples from biGMM:
nsamples = 10000
samples = np.random.randn(nsamples) * v0**.5 + mu0[:,np.newaxis]
card_c1 = int(l*nsamples)
samples[:, :card_c1] = np.random.randn(card_c1) * v1**.5 + \
mu1[:, np.newaxis]
samples = samples.T
noise = np.random.randn(samples.shape[0], samples.shape[1]) * v_noise
#print 'noise shape = ', str(noise.shape)
noisy_samples = samples + noise
# compute sample-specific posterior weights:
nfeats = len(mu0)
print nfeats
post_weights = 1 / (1 + (1-l)/l * (v0/v1)**(-nfeats/2.) * \
np.exp(-(pm.norm2_bc(samples,mu0)/v0 - \
pm.norm2_bc(samples,mu1)/v1)/2.))
# fit GMM knowing post weights:
mu0_est, mu1_est, v0_est, v1_est, l_est, llh = \
pm.informedGMM_MV(noisy_samples, post_weights)
# Some outputs:
mus = np.array([[mu0,mu1], [mu0_est, mu1_est]]).transpose()
vs = np.array([[v0,v1], [v0_est, v1_est]]).T
ls = np.array([[1-l,l], [1-l_est, l_est]]).T
from pyhrf.tools import get_2Dtable_string
pyhrf.verbose(1, 'means:')
for f in xrange(samples.shape[1]):
pyhrf.verbose(1, get_2Dtable_string(mus[f,:,:], ['C0','C1'],
['true', 'estim']))
pyhrf.verbose(1, 'vars:')
pyhrf.verbose(1, get_2Dtable_string(vs, ['C0','C1'], ['true', 'estim']))
pyhrf.verbose(1, 'weights:')
pyhrf.verbose(1, get_2Dtable_string(ls, ['C0','C1'], ['true', 'estim']))
if plot:
import matplotlib.pyplot as plt
from pyhrf.plot import plot_gaussian_mixture
f = 1 #check this feature
# plt.vlines(samples[:,f], 0, post_weights, 'r')
# plt.vlines(samples[:,f], 0, 1-post_weights, 'b')
plot_gaussian_mixture(np.array([[mu0[f],mu1[f]], [v0,v1]]),
props=[1-l,l], color='r')
plot_gaussian_mixture(np.array([[mu0_est[f], mu1_est[f]],
[v0_est, v1_est]]),
props=[1-l_est, l_est], color='b')
plt.hist(samples[:,f], color='g',normed=True)
plt.show()
assert_array_almost_equal([mu0_est, mu1_est], [mu0,mu1], decimal=1)
assert_array_almost_equal([v0_est, v1_est], [v0,v1], decimal=1)
assert_array_almost_equal([1-l_est,l_est], [1-l,l], decimal=1)
def test_gmm_known_weights_noise(self):
"""
Test biGMM fit with known post weights, from biGMM samples (no noise)
1D case.
"""
plot = False
pyhrf.verbose.set_verbosity(0)
np.random.seed(2354) #make reproducible results
# biGMM parameters:
mu0 = np.array([2., 5., 10.])
v0 = 3.
mu1 = np.array([12., 15., 20.])
v1 = 3.
l = .3 #weight for component 1
v_noise = 1.5
# generate samples from biGMM:
nsamples = 10000
samples = np.random.randn(nsamples) * v0**.5 + mu0[:,np.newaxis]
card_c1 = int(l*nsamples)
samples[:, :card_c1] = np.random.randn(card_c1) * v1**.5 + \
mu1[:, np.newaxis]
samples = samples.T
print 'samples shape = ', str(samples.shape)
noise = np.random.randn(samples.shape[0], samples.shape[1]) * v_noise
print 'noise shape = ', str(noise.shape)
noisy_samples = samples + noise
# compute sample-specific posterior weights:
nfeats = len(mu0)
print nfeats
post_weights = 1 / (1 + (1-l)/l * (v0/v1)**(-nfeats/2.) * \
np.exp(-(pm.norm2_bc(samples,mu0)/v0 - \
pm.norm2_bc(samples,mu1)/v1)/2.))
# fit GMM knowing post weights:
mu0_est, mu1_est, v0_est, v1_est, l_est, llh = \
pm.informedGMM_MV(noisy_samples, post_weights)
# Some outputs:
mus = np.array([[mu0,mu1], [mu0_est, mu1_est]]).transpose()
vs = np.array([[v0,v1], [v0_est, v1_est]]).T
ls = np.array([[1-l,l], [1-l_est, l_est]]).T
from pyhrf.tools import get_2Dtable_string
pyhrf.verbose(1, 'means:')
for f in xrange(samples.shape[1]):
pyhrf.verbose(1, get_2Dtable_string(mus[f,:,:], ['C0','C1'],
['true', 'estim']))
pyhrf.verbose(1, 'vars:')
pyhrf.verbose(1, get_2Dtable_string(vs, ['C0','C1'], ['true', 'estim']))
pyhrf.verbose(1, 'weights:')
pyhrf.verbose(1, get_2Dtable_string(ls, ['C0','C1'], ['true', 'estim']))
if plot:
import matplotlib.pyplot as plt
from pyhrf.plot import plot_gaussian_mixture
f = 1 #check this feature
plot_gaussian_mixture(np.array([[mu0[f],mu1[f]], [v0,v1]]),
props=[1-l,l], color='r')
plot_gaussian_mixture(np.array([[mu0_est[f], mu1_est[f]],
[v0_est, v1_est]]),
props=[1-l_est, l_est], color='b')
plt.hist(samples[:,f], color='g',normed=True)
plt.show()
assert_array_almost_equal([mu0_est, mu1_est], [mu0,mu1], decimal=1)
assert_array_almost_equal([v0_est, v1_est], [v0,v1], decimal=1)
assert_array_almost_equal([1-l_est,l_est], [1-l,l], decimal=1)
def test_informedGMM_parameters(self):
"""
Check that merge is in favour of non-activ at the same feature level,
starting from singleton clusters.
"""
pyhrf.verbose.set_verbosity(0)
n_samples = 10000000
# Generation of bi-Gaussian distribution
mu0, v0 = 1., 0.6 # mean and standard deviation
g0 = np.random.normal(mu0, v0, n_samples)
l0 = 0.3
print 'Gaussian 0: mu0 = %d, v0 = %d, lambda0 = %d' % (mu0, v0, l0)
mu1, v1 = 5., 1.3 # mean and standard deviation
g1 = np.random.normal(mu1, v1, n_samples)
l1 = 0.7
print 'Gaussian 1: mu1 = %d, v1 = %d, lambda1 = %d' % (mu1, v1, l1)
features = g0.copy()
features[:l1*len(features)] = g1[:l1*len(features)]
alphas = 1 / (1 + (1-l0)/l0 * (v0/v1)**.5 * \
np.exp(-((features-mu0)**2/v0 - (features-mu1)**2/v1)/2.))
print 'N samples: ', n_samples
print ''
print 'Original parameters'
print 'mu1:', mu1, 'mu0:', mu0
print 'v1:', v1, 'v0:', v0
print 'lambda:', l0
print ''
umu, uv, ul = pm.informedGMM_MV(features, alphas)
print 'Updated parameters'
print 'mu1:', umu[1], 'mu0:', umu[0]
print 'v1:', uv[1], 'v0:', uv[0]
print 'lambda:', ul[0]
print ''
assert_array_almost_equal(umu, [mu0, mu1])
assert_array_almost_equal( uv, [v0, v1])
def test_gmm_known_alpha0(self):
"""
Test biGMM update with posterior weights equal to 0
"""
plot = False
pyhrf.verbose.set_verbosity(0)
np.random.seed(2354) #make reproducible results
# biGMM parameters:
mu0 = 2.
v0 = 5.
l = 0.
mu1 = 0.
v1 = 0.
# generate samples from biGMM:
nsamples = 10000
samples = np.random.randn(nsamples) * v0**.5 + mu0
card_c1 = int(l*nsamples)
samples[:card_c1] = np.random.randn(card_c1) * v1**.5 + mu1
post_weights = np.zeros_like(samples)
# fit GMM knowing post weights:
mu_est, v_est, l_est = pm.informedGMM(samples, post_weights)
assert_array_almost_equal(mu_est, [mu0,mu1], decimal=1)
assert_array_almost_equal(v_est, [v0,v1], decimal=1)
assert_array_almost_equal(l_est, [1-l,l], decimal=1)
# Some outputs:
mus = np.array([[mu0,mu1], mu_est]).T
vs = np.array([[v0,v1], v_est]).T
ls = np.array([[1-l,l], l_est]).T
from pyhrf.tools import get_2Dtable_string
pyhrf.verbose(1, 'means:')
pyhrf.verbose(1, get_2Dtable_string(mus, ['C0','C1'], ['true', 'estim']))
pyhrf.verbose(1, 'vars:')
pyhrf.verbose(1, get_2Dtable_string(vs, ['C0','C1'], ['true', 'estim']))
pyhrf.verbose(1, 'weights:')
pyhrf.verbose(1, get_2Dtable_string(ls, ['C0','C1'], ['true', 'estim']))
if plot:
import matplotlib.pyplot as plt
from pyhrf.plot import plot_gaussian_mixture
#plt.vlines(samples, 0, post_weights, 'r')
#plt.vlines(samples, 0, 1-post_weights, 'b')
plot_gaussian_mixture(np.array([[mu0,mu1], [v0,v1]]),
props=[1-l,l], color='r')
plot_gaussian_mixture(np.array([mu_est, v_est]),
props=l_est, color='b')
plt.hist(samples, color='g',normed=True)
plt.show()
def test_gmm_likelihood(self):
"""
Test the log likelihood computation
"""
pyhrf.verbose.set_verbosity(0)
np.random.seed(2354) #make reproducible results
# biGMM parameters:
mu0 = 2.
v0 = 5.
mu1 = 10.
v1 = 5.
l = .4 #weight for component 1
# generate samples from biGMM:
nsamples = 10000
samples = np.random.randn(nsamples) * v0**.5 + mu0
card_c1 = int(l*nsamples)
samples[:card_c1] = np.random.randn(card_c1) * v1**.5 + mu1
# Calculate loglikelihood
d = loglikelihood_computation(samples, mu0, v0, mu1, v1, l)
assert_array_almost_equal(d, 0.0, decimal=1)
## fit GMM knowing post weights:
#mu_est, v_est, l_est = informedGMM(samples, post_weights)
#d2 = loglikelihood_computation(samples, mu0, v0, mu1, v1, l)
class FeatureExtractionTest(unittest.TestCase):
def setUp(self):
# called before any unit test of the class
self.my_param = "OK"
self.tmp_path = pyhrf.get_tmp_path() #create a temporary folder
self.clean_tmp = True
def tearDown(self):
# called after any unit test of the class
if self.clean_tmp:
pyhrf.verbose(1, 'clean tmp path')
shutil.rmtree(self.tmp_path)
def test_new_obj(self):
# a unit test
result = my_func_to_test(self.my_param, output_path=self.tmp_path)
assert result == "OK"
def test_generate_features(self):
pyhrf.verbose.set_verbosity(0)
p = np.array([1,1,1,1,1,2,2,2,2,2], dtype=int)
act_labels = np.array([0,0,1,1,0,0,0,1,1,1], dtype=int)
feat_levels = {1: (np.array([.4, .3]), #feats non-activ for parc 1
np.array([5., 7.]),), #feats activ for parc 1
2: (np.array([.1, .2]), #feats non-activ for parc 2
np.array([9., 8.]),)} #feats activ for parc 2
expected_features = np.array([[.4, .3],
[.4, .3],
[5., 7.],
[5., 7.],
[.4, .3],
[.1, .2],
[.1, .2],
[9., 8.],
[9., 8.],
[9., 8.]])
features = pm.generate_features(p, act_labels, feat_levels, 0.)
assert_array_equal(features, expected_features)
# Test feature extraction previous to parcellation
def test_feature_extraction(self):
pyhrf.verbose.set_verbosity(0)
method = 'glm_deriv'
data0 = simulate_fmri_data()
dt = data0.simulation[0]['dt']
time_length = data0.simulation[0]['duration']
ncond = len(data0.simulation[0]['condition_defs'])
labels = data0.simulation[0]['labels']
territories = data0.simulation[0]['hrf_territories']
ampl, pvalues, feats, bvars = pm.feature_extraction(data0, method, dt, time_length, ncond)
assert_array_equal(feats.shape,bvars.shape)
assert_equal(feats.shape[0],ampl.shape[0])
fn1 = op.join(self.tmp_path, 'features_representation1.png')
fn2 = op.join(self.tmp_path, 'features_representation2.png')
fn3 = op.join(self.tmp_path, 'features_representation3.png')
fn4 = op.join(self.tmp_path, 'features_representation4.png')
name = pm.represent_features(feats, labels.T, 1.-pvalues, territories, 1, fn1)
name = pm.represent_features(feats, labels.T, 1.-pvalues, territories, 0, fn2)
mask = np.where(labels.flatten()==0)
f = feats[mask,:]
name = pm.represent_features(f[0,:,:], labels.T[mask], 1.-pvalues[mask], territories[mask], 1, fn3)
mask = np.where(labels.flatten()==1)
f = feats[mask,:]
name = pm.represent_features(f[0,:,:], labels.T[mask], 1.-pvalues[mask], territories[mask], 1, fn4)
print fn1
#res = pm.represent_features(feats, labels.T, bvars[:,0])
self.assertTrue(op.exists(fn1), msg='%s does not exist'%fn1)
self.clean_tmp = False #HACK
"""
# Test feature extraction previous to parcellation
def test_feature_extraction(self):
pyhrf.verbose.set_verbosity(0)
method = 'glm_deriv'
data0 = simulate_fmri_data()
dt = data0.simulation[0]['dt']
time_length = data0.simulation[0]['duration']
ncond = len(data0.simulation[0]['condition_defs'])
labels = data0.simulation[0]['labels']
territories = data0.simulation[0]['hrf_territories']
ampl, feats, bvars = pm.feature_extraction(data0, method, dt, time_length, ncond)
assert_array_equal(feats.shape,bvars.shape)
assert_equal(feats.shape[0],ampl.shape[0])
fn = op.join(self.tmp_path, 'features_representation.png')
name = pm.represent_features(feats, labels.T, ampl, territories, 1, fn)
name = pm.represent_features(feats, labels.T, ampl, territories, 0, fn)
mask = np.where(labels.flatten()==0)
f = feats[mask,:]
name = pm.represent_features(f[0,:,:], labels.T[mask], ampl[mask], territories[mask], 1, fn)
mask = np.where(labels.flatten()==1)
f = feats[mask,:]
name = pm.represent_features(f[0,:,:], labels.T[mask], ampl[mask], territories[mask], 1, fn)
print fn
#res = pm.represent_features(feats, labels.T, bvars[:,0])
self.assertTrue(op.exists(fn), msg='%s does not exist'%fn)
self.clean_tmp = False #HACK
"""
def simulate_fmri_data(scenario='high_snr', output_path=None):
## Scenarios
# low SNR level:
if (scenario=='low_snr'):
m_act= 3.8 # activation magnitude
v_noise = 1. # noise variance
# 1.5 normally, 0,2 no noise
else: # high SNR level:
m_act= 1.8 # activation magnitude
v_noise = 1.5 # noise variance
v_act=.25
v_inact=.25
conditions_def = [Condition(name='audio', m_act= m_act, v_act=v_act,
v_inact=v_inact, label_map='house_sun'),
#Condition(name='audio', m_act= 5.8, v_act=.25,
# v_inact=.25, label_map='squares_8x8'),
#Condition(name='video', m_act=2, v_act=.5,
#v_inact=.5, label_map='activated'),
]
# HRFs mapped to the 3 parcels:
duration = 25.
dt = .5
import pyhrf.boldsynth.scenarios as simu
primary_hrfs = [
simu.genBezierHRF(np.arange(0,duration+dt,dt), pic=[3,1],
normalize=True),
simu.genBezierHRF(np.arange(0,duration+dt,dt), pic=[5,1],
normalize=True),
simu.genBezierHRF(np.arange(0,duration+dt,dt), pic=[7,1],
normalize=True),
simu.genBezierHRF(np.arange(0,duration+dt,dt), pic=[10,1],
picw = 6,ushoot=[22,-0.2], normalize=True),
]
# Dictionnary mapping an item label to a quantity or a function that generates
# this quantity
simulation_steps = {
'condition_defs' : conditions_def,
'nb_voxels' : 400,
# Labels
'labels_vol' : simu.create_labels_vol, # 3D shape
'labels' : simu.flatten_labels_vol, # 1D shape (flatten)
# Nrls
'nrls' : simu.create_time_invariant_gaussian_nrls,
'dt': dt,
'duration': duration,
'dsf' : 2,
'tr' : 1.,
# Paradigm
'paradigm' : simu.create_localizer_paradigm,
'rastered_paradigm' : simu.rasterize_paradigm,
# HRF
'hrf_duration': duration,
'primary_hrfs' : primary_hrfs, # derivative of several hrfs
'nb_hrf_territories' : 4,
#'hrf_territories_name' : '3_territories_8x8',
'hrf_territories_name' : '4_territories',
'hrf_territories' : simu.load_hrf_territories,
'hrf' : simu.create_hrf_from_territories, # duplicate all HRF along all voxels
# Stim induced
'stim_induced_signal' : simu.create_stim_induced_signal,
# Noise
'v_gnoise' : v_noise,
'noise' : simu.create_gaussian_noise, #requires bold_shape, v_gnoise
# Drift
'drift_order' : 4,
'drift_var' : 11.,
'drift' : simu.create_polynomial_drift,
# Bold
'bold_shape' : simu.get_bold_shape,
'bold' : simu.create_bold_from_stim_induced,
}
simu_graph = Pipeline(simulation_steps)
simu_graph.resolve()
simu_vals = simu_graph.get_values()
if output_path is not None:
simu.simulation_save_vol_outputs(simu_vals, output_path)
return FmriData.from_simulation_dict(simu_vals)
def create_features(size='2D', feat_contrast='high', noise_var=0.,
n_features=2):
if size == '2D':
true_parcellation = np.array([[1,1,1,1],
[1,1,1,2],
[1,1,2,2],
[2,2,2,2]])
act_clusters = np.array([[1,1,0,0],
[1,1,0,0],
[0,0,1,1],
[1,0,1,1]])
ker_mask = pgraph.kerMask2D_4n
elif size == '1D':
ker_mask = None
true_parcellation = np.array([1,1,1,1,1,1,1,1,1,2,2,2])
act_clusters = np.array([0,1,1,0,0,0,0,1,0,1,1,1])
else:
raise Exception('Unsupported size scenario: %s' %str(size))
mask = true_parcellation>0
true_parcellation_flat = true_parcellation[np.where(mask)]
act_clusters_flat = act_clusters[np.where(mask)]
if feat_contrast == 'high':
if n_features == 2:
# non-act act
feat_levels = {1 : ([1., 1.], [10, 17.]),
2 : ([1., 1.], [40, 13.]),}
elif n_features == 1:
# non-act act
feat_levels = {1 : ([1.], [10]),
2 : ([1.], [40]),}
else:
raise Exception('nb features should be <= 2')
# non-act act
act_levels = {1 : ([.1], [.9]),
2 : ([.1], [.9]),}
elif feat_contrast == 'low':
if n_features == 2:
# non-act act
feat_levels = {1 : ([1., 1.], [8., 7.]),
2 : ([1., 1.], [8., 6.]),}
elif n_features == 1:
# non-act act
feat_levels = {1 : ([1.], [8.]),
2 : ([1.], [8.]),}
else:
raise Exception('nb features should be <= 2')
# non-act act
act_levels = {1 : ([.1], [.9]),
2 : ([.1], [.9]),}
else:
raise Exception('Unsupported feature contrast scenario: %s' \
%str(feat_contrast))
features = pm.generate_features(true_parcellation_flat,
act_clusters_flat,
feat_levels, noise_var=noise_var)
var = np.ones_like(features)
act = pm.generate_features(true_parcellation_flat, act_clusters_flat,
act_levels).squeeze()
pyhrf.verbose(1, 'features:')
pyhrf.verbose.printNdarray(1, features.T)
pyhrf.verbose(1, 'activation levels:')
pyhrf.verbose.printNdarray(1, act.T)
pyhrf.verbose(1, 'variances:')
pyhrf.verbose.printNdarray(1, var.T)
n_samples, n_features = features.shape
graph = pgraph.graph_from_lattice(mask, ker_mask)
return true_parcellation_flat, features, graph, var, act, \
act_clusters_flat, mask
class ParcellationTest(unittest.TestCase):
def setUp(self):
# called before any unit test of the class
self.my_param = "OK"
self.tmp_path = pyhrf.get_tmp_path() #create a temporary folder
self.clean_tmp = True
def tearDown(self):
# called after any unit test of the class
if self.clean_tmp:
pyhrf.verbose(1, 'clean tmp path')
shutil.rmtree(self.tmp_path)
def test_new_obj(self):
# a unit test
result = my_func_to_test(self.my_param, output_path=self.tmp_path)
assert result == "OK"
def test_spatialward_against_ward_sk(self):
"""
Check that pyhrf's spatial Ward parcellation is giving the same
results as scikit's spatial Ward parcellation
"""
pyhrf.verbose.set_verbosity(0)
true_parcellation, features, graph, var, act, act_labels, mask = \
create_features("1D", "high", 0.)
nbc = len(np.unique(true_parcellation))
act = np.zeros_like(act) + 1. #no activation levels
ww = pm.spatial_ward(features, graph, nb_clusters=nbc)
ww_sk = pm.spatial_ward_sk(features, graph, nb_clusters=nbc)
assert_array_equal(ww.labels_, ww_sk.labels_)
def test_spatialward_against_modelbasedspatialward(self):
"""
Check that pyhrf's spatial Ward parcellation is giving the same
results as scikit's spatial Ward parcellation
"""
pyhrf.verbose.set_verbosity(0)
true_parcellation, features, graph, var, act, act_labels, mask = \
create_features("2D", "high", 0.)
nbc = len(np.unique(true_parcellation))
act = np.zeros_like(act) + 1. #no activation levels
ww = pm.spatial_ward(features, graph, nb_clusters=nbc)
ww_mb = pm.spatial_ward_with_uncertainty(features, graph, var, act, \
nb_clusters = nbc, \
dist_type='uward2')
p0 = pm.align_parcellation(ww.labels_, ww_mb.labels_)
assert_array_equal(ww.labels_, p0)
def test_uspatialward_formula(self):
"""
Check that pyhrf's Uncertain spatial Ward parcellation is giving the same
results as Uncertain spatial Ward parcellation modified formula
"""
pyhrf.verbose.set_verbosity(0)
true_parcellation, features, graph, var, act, act_labels, mask = \
create_features("1D", "high", 0.)
nbc = len(np.unique(true_parcellation))
act = np.zeros_like(act) + 1. #no activation levels
ww = pm._with_uncertainty(features, graph, var, act, nb_clusters=nbc,
dist_type='uward')
ww_formula = pm._with_uncertainty(features, graph, var, act,
nb_clusters=nbc, dist_type='uward2')
assert_array_equal(ww.labels_, ww_formula.labels_)
def save_parcellation_outputs(self, pobj, mask):
pm.ward_tree_save(pobj, self.tmp_path, mask)
def test_spatialward_from_forged_features(self):
"""
Test spatial Ward on forged features
"""
pyhrf.verbose.set_verbosity(0)
self.tmp_path = './' #hack
true_parcellation, features, graph, var, act, act_labels, mask = \
create_features("2D", "high", 10.)
nbc = len(np.unique(true_parcellation))
act = np.zeros_like(act) + 1. #no activation levels
ww = pm.spatial_ward(features, graph, nb_clusters=nbc)
if 1:
self.save_parcellation_outputs(ww, mask)
tp = expand_array_in_mask(true_parcellation, mask)
fn_tp = op.join(self.tmp_path, 'true_parcellation.nii')
xndarray(tp, axes_names=MRI3Daxes[:mask.ndim]).save(fn_tp)
print 'pyhrf_view %s/*' %self.tmp_path
self.clean_tmp = False #hack
pdist, common_parcels = parcellation_dist(ww.labels_,
true_parcellation)
assert_array_equal(pdist, 3) #non-regression
def test_wpu_from_forged_features(self):
"""
Test spatial Ward with uncertainty on forged features
"""
pyhrf.verbose.set_verbosity(0)
true_parcellation, features, graph, var, act, act_labels, mask = \
create_features("2D", "high", 0.)
nbc = len(np.unique(true_parcellation))
ww = pm.spatial_ward_with_uncertainty(features, graph, var, act,
nb_clusters=nbc, save_history=False)
if 1:
self.save_parcellation_outputs(ww, mask)
tp = expand_array_in_mask(true_parcellation, mask)
fn_tp = op.join(self.tmp_path, 'true_parcellation.nii')
xndarray(tp, axes_names=MRI3Daxes[:mask.ndim]).save(fn_tp)
print 'pyhrf_view %s/*' %self.tmp_path
self.clean_tmp = False #hack
pdist, common_parcels = parcellation_dist(ww.labels_,
true_parcellation)
assert_array_equal(pdist, 3) #non-regression
def test_gmm_from_forged_features(self):
"""
Test spatial Ward with uncertainty on forged features
"""
pyhrf.verbose.set_verbosity(0)
true_parcellation, features, graph, var, act, act_labels, mask = \
create_features("2D", "high", 0.)
nbc = len(np.unique(true_parcellation))
ww = pm.spatial_ward_with_uncertainty(features, graph, var, act,
nb_clusters=nbc, save_history=False,
dist_type='mixt')
if 1:
self.save_parcellation_outputs(ww, mask)
tp = expand_array_in_mask(true_parcellation, mask)
fn_tp = op.join(self.tmp_path, 'true_parcellation.nii')
xndarray(tp, axes_names=MRI3Daxes[:mask.ndim]).save(fn_tp)
print 'pyhrf_view %s/*' %self.tmp_path
self.clean_tmp = False #hack
pdist, common_parcels = parcellation_dist(ww.labels_,
true_parcellation)
assert_array_equal(pdist, 3) #non-regression
def test_parcellation_spatialWard_2(self):
"""
Test WPU on a simple case.
"""
pyhrf.verbose.set_verbosity(0)
features = np.array([[100.,100.,1.,1.],
[100.,100.,1.,1.],
[100.,100.,1.,1.]]).T
n_samples, n_features = features.shape
graph = pgraph.graph_from_lattice(np.ones((2,2)), pgraph.kerMask2D_4n)
var = np.ones_like(features)
var_ini = np.ones_like(features)
act = np.ones_like(features[:,0])
act_ini = np.ones_like(features[:,0])
ww = pm.spatial_ward_with_uncertainty(features, graph, var, act, var_ini,
act_ini, nb_clusters=2)
p = ww.labels_
assert_array_equal(p, np.array([1,1,2,2]))
def test_parcellation_spatialWard_act_level_1D(self):
"""
Test the ability of WPU to 'jump' non-activating positions (1D case).
"""
pyhrf.verbose.set_verbosity(0)
np.seterr('raise')
true_parcellation = np.array([1,1,1,1,1,1,1,1,1,3,3,3])
act_labels = np.array([0,1,1,0,0,0,0,1,0,1,1,1])
# non-act act
feat_levels = {1 : ([1., 5.], [10, 7.]),
3 : ([1., 2.], [40, 3.]),}
features = pm.generate_features(true_parcellation, act_labels,
feat_levels)
var = np.ones_like(features)
# non-act act
act_levels = {1 : ([.3], [4.]),
3 : ([.3], [4.]),}
act = pm.generate_features(true_parcellation, act_labels, act_levels)
act = act.squeeze()
pyhrf.verbose(1, 'features:')
pyhrf.verbose.printNdarray(1, features.T)
pyhrf.verbose(1, 'activation levels:')
pyhrf.verbose.printNdarray(1, act)
pyhrf.verbose(1, 'variances:')
pyhrf.verbose.printNdarray(1, var.T)
n_samples, n_features = features.shape
graph = pgraph.graph_from_lattice(np.ones((1,n_samples)),
pgraph.kerMask2D_4n)
ww = pm.spatial_ward_with_uncertainty(features, graph, var, act,
nb_clusters=2)
p = ww.labels_
if 0:
fn = op.join(self.tmp_path, 'parcellation_tree.png')
pyhrf.verbose(1, 'fig parcellation tree: %s' %fn)
lab_colors = [('black','red')[l] for l in act_labels]
pm.render_ward_tree(ww, fn, leave_colors=lab_colors)
self.clean_tmp = False
# tolerate 2 differing positions, correspond to 2 non-active
# positions in between two different clusters
pm.assert_parcellation_equal(p, true_parcellation, tol=2)
def test_parcellation_mmp_act_level_1D(self):
"""
Test the ability of MMP to 'jump' non-activating positions (1D case).
"""
pyhrf.verbose.set_verbosity(0)
np.seterr('raise')
true_parcellation, features, graph, var, act, act_labels, mask = \
create_features("1D", "high", 0.)
ww = pm.spatial_ward_with_uncertainty(features, graph, var, act,
nb_clusters=2,
dist_type='mixt')
p = ww.labels_
if 0:
fn = op.join(self.tmp_path, 'parcellation_tree.png')
pyhrf.verbose(1, 'fig parcellation tree: %s' %fn)
lab_colors = [('black','red')[l] for l in act_labels]
pm.render_ward_tree(ww, fn, leave_colors=lab_colors)
self.clean_tmp = False #hack
# tolerate 2 differing positions, correspond to 2 non-active
# positions in between two different clusters
pm.assert_parcellation_equal(p, true_parcellation, tol=2)
def test_parcellation_spatialWard_act_level_2D(self):
"""
Test the ability of WPU to 'jump' non-activating positions (2D case).
"""
pyhrf.verbose.set_verbosity(0)
feat_contrast = "high"
noise_var = 0.
true_parcellation, features, graph, var, act, act_labels, mask = \
create_features("2D", feat_contrast, noise_var)
ww = pm.spatial_ward_with_uncertainty(features, graph, var, act,
nb_clusters=2)
p = ww.labels_
if pyhrf.verbose.verbosity > 0:
print 'true parcellation:'
print expand_array_in_mask(true_parcellation, mask)
print 'WPU parcellation:'
print expand_array_in_mask(p, mask)
print 'act labels:'
print act_labels
if 0:
fn = op.join(self.tmp_path, 'parcellation_tree.png')
pyhrf.verbose(1, 'fig parcellation tree: %s' %fn)
lab_colors = [('black','red')[l] for l in act_labels]
pm.render_ward_tree(ww, fn, leave_colors=lab_colors)
self.clean_tmp = False #HACK
# tolerate 2 differing positions, correspond to 2 non-active
# positions in between two different clusters
pm.assert_parcellation_equal(p, true_parcellation,
tol_pos=act_labels==0)
def test_parcellation_mmp_act_level_2D(self):
"""
Test the ability of MMP to 'jump' non-activating positions (2D case).
"""
pyhrf.verbose.set_verbosity(0)
feat_contrast = "high"
noise_var = 3.
true_parcellation, features, graph, var, act, act_labels, mask = \
create_features("2D", feat_contrast, noise_var)
ww = pm.spatial_ward_with_uncertainty(features, graph, var, act,
nb_clusters=2, dist_type='mixt')
p = ww.labels_
if pyhrf.verbose.verbosity > 0:
print 'true parcellation:'
print expand_array_in_mask(true_parcellation, mask)
print 'MMP parcellation:'
print expand_array_in_mask(p, mask)
print 'act labels:'
print act_labels
if 0:
fn = op.join(self.tmp_path, 'parcellation_tree.png')
pyhrf.verbose(1, 'fig parcellation tree: %s' %fn)
lab_colors = [('black','red')[l] for l in act_labels]
pm.render_ward_tree(ww, fn, leave_colors=lab_colors)
self.clean_tmp = False #HACK
# tolerate 2 differing positions, correspond to 2 non-active
# positions in between two different clusters
pm.assert_parcellation_equal(p, true_parcellation,
tol_pos=act_labels==0)
def test_parcellation_spatialWard_variance_1D(self):
"""
Test the ability of WPU to 'jump' non-activating positions (1D case).
"""
pyhrf.verbose.set_verbosity(0)
np.seterr('raise')
true_parcellation = np.array([1,1,1,1,1,1,1,1,1,3,3,3])
act_labels = np.array([1,1,1,1,1,1,1,1,1,1,1,1])
n = 0.5
var = (np.random.randn(*true_parcellation.shape))[:,np.newaxis] * n
features = (true_parcellation + var)
act = act_labels.squeeze()
pyhrf.verbose(1, 'features:')
pyhrf.verbose.printNdarray(1, features.T)
pyhrf.verbose(1, 'activation levels:')
pyhrf.verbose.printNdarray(1, act)
pyhrf.verbose(1, 'variances:')
pyhrf.verbose.printNdarray(1, var.T)
n_samples, n_features = features.shape
graph = pgraph.graph_from_lattice(np.ones((1,n_samples)),
pgraph.kerMask2D_4n)
ww = pm.spatial_ward_with_uncertainty(features, graph, var, act,
nb_clusters=2)
p = ww.labels_
# tolerate 2 differing positions, correspond to 2 non-active
# positions in between two different clusters
pm.assert_parcellation_equal(p, true_parcellation, tol=2)
def test_parcellation_spatialWard_variance_2D(self):
"""
Test the sensibility to variance (2D case).
"""
pyhrf.verbose.set_verbosity(0)
true_parcellation = np.array([[1,1,1,1],
[1,1,1,2],
[1,1,2,2],
[2,2,2,2]])
var = np.array([[0.1,-0.1,-0.1,-0.1],
[0.1,0.1,-0.1,-0.1],
[0.1,0.1,0.1,-0.1],
[0.1,0.1,0.1,0.1]])
act_clusters = np.array([[1,1,1,1],
[1,1,1,1],
[1,1,1,1],
[1,1,1,1]])
mask = true_parcellation>0
true_parcellation_flat = true_parcellation[np.where(mask)]
act_clusters_flat = act_clusters[np.where(mask)]
features = (true_parcellation+var).flatten()
act = np.ones_like(act_clusters)
pyhrf.verbose(1, 'features %s:' %str(features.shape))
pyhrf.verbose.printNdarray(1, features.T)
pyhrf.verbose(1, 'activation levels:')
pyhrf.verbose.printNdarray(1, act.T)
pyhrf.verbose(1, 'variances :')
pyhrf.verbose.printNdarray(1, var.T)
n_samples, n_features = features[:,np.newaxis].shape
graph = pgraph.graph_from_lattice(mask, pgraph.kerMask2D_4n)
if 0:
print graph.shape
print features.shape
print var.flatten().shape
print act_clusters_flat.shape
ww = pm.spatial_ward_with_uncertainty(features[:,np.newaxis], graph,
var.flatten()[:,np.newaxis],
act_clusters_flat[:,np.newaxis],
nb_clusters=2)
p = ww.labels_
if pyhrf.verbose.verbosity > 0:
print 'true parcellation:'
print true_parcellation
print 'WPU parcellation:'
print expand_array_in_mask(p, mask)
print 'act labels:'
print act_clusters
if 0:
fn = op.join(self.tmp_path, 'parcellation_tree.png')
print 'fig parcellation tree:', fn
lab_colors = [('black','red')[l] for l in act_clusters_flat]
pm.render_ward_tree(ww, fn, leave_colors=lab_colors)
#self.clean_tmp = False #HACK
# tolerate 2 differing positions, correspond to 2 non-active
# positions in between two different clusters
pm.assert_parcellation_equal(p, true_parcellation_flat,
tol_pos=act_clusters_flat==0)
def test_render_ward_tree(self):
pyhrf.verbose.set_verbosity(0)
features = np.array([[1.,2.,4.,8.,16,32]]).T
act = np.array([1,1,1,0,0,0])
var = np.ones_like(features)
n_samples, n_features = features.shape
graph = pgraph.graph_from_lattice(np.ones((1,n_samples)),
pgraph.kerMask2D_4n)
n_clusters = 1
ww = pm.spatial_ward_with_uncertainty(features, graph, var, act,
nb_clusters=n_clusters)
fn = op.join(self.tmp_path, 'parcellation_tree.png')
pyhrf.verbose(1, 'fig of parcellation tree: %s' %fn)
item_colors = [['black','red'][l] for l in act]
pm.render_ward_tree(ww, fn, leave_colors=item_colors)
self.assertTrue(op.exists(fn), msg='%s does not exist'%fn)
#self.clean_tmp = False #HACK
def test_ward_distance_1D_v1(self):
# Test: inertia is high between clusters and 0 in the same cluster
pyhrf.verbose.set_verbosity(0)
features = np.array([[10.,10.,10.,5.,5.,5.]]).T
var = np.ones_like(features)
mom_1 = np.array([1.,1.,1.,1.,1.,1.])
c_r = np.array([ 1, 2, 3, 4, 5])
c_c = np.array([ 0, 1, 2, 3, 4])
var = np.ones_like(features)
ini = np.array([0.,0.,0.,0.,0.])
act = np.array([1.,1.,1.,1.,1.,1.])
i1 = pm.compute_ward_dist(mom_1, features, c_r, c_c, var, act, ini)
pyhrf.verbose(1, 'inertia:')
pyhrf.verbose.printNdarray(1, i1)
assert_equal(len(np.array(np.where(i1>0))),1)
def test_ward_distance_1D_v2(self):
# Test effect non activation in limit between clusters
pyhrf.verbose.set_verbosity(0)
features = np.array([[10.3,10.1,10.7,5.1,5.3,5.2]]).T
var = np.ones_like(features)
mom_1 = np.array([1.,1.,1.,1.,1.,1.])
c_r = np.array([ 1, 2, 3, 4, 5])
c_c = np.array([ 0, 1, 2, 3, 4])
var = np.ones_like(features)
ini = np.array([0.,0.,0.,0.,0.])
act = np.array([1.,1.,0.1,0.1,1.,1.])
i1 = pm.compute_ward_dist(mom_1, features, c_r, c_c, var, act, ini)
pyhrf.verbose(1, 'inertia:')
pyhrf.verbose.printNdarray(1, i1)
assert_equal(np.argmax(i1),2)
def test_ward_distance_2D(self):
# Test
pyhrf.verbose.set_verbosity(0)
true_parcellation = np.array([[1,1,1,1],
[1,1,1,2],
[1,1,2,2],
[2,2,2,2]])
act_clusters = np.array([[1,1,0,0],
[1,1,0,0],
[1,1,1,0],
[1,1,1,0]])
mask = true_parcellation>0
true_parcellation_flat = true_parcellation[np.where(mask)]
act_clusters_flat = act_clusters[np.where(mask)]
# non-act act
feat_levels = {1 : ([1.], [10]),
2 : ([3.], [40]),}
features = pm.generate_features(true_parcellation_flat, act_clusters_flat,
feat_levels,noise_var=0.)
pyhrf.verbose.printNdarray(1, features[:,0].T)
var = np.ones_like(features)
# non-act act
act_levels = {1 : ([.3], [4.]),
2 : ([.3], [4.]),}
act = pm.generate_features(true_parcellation_flat, act_clusters_flat,
act_levels).squeeze()
pyhrf.verbose.printNdarray(1, act)
mom_1 = np.ones_like(act)
c_r = np.array([ 1, 2, 3, 4, 5, 5, 6, 6, 7, 7, 8, 9, 9,10,10,11,11,12,13,13,14,14,15,15])
c_c = np.array([ 0, 1, 2, 0, 1, 4, 2, 5, 3, 6, 4, 5, 8, 6, 9, 7,10, 8, 9,12,10,13,11,14])
ini = np.zeros((c_r.shape[0], 1))
i1 = pm.compute_ward_dist(mom_1, features, c_r, c_c, var, act, ini)
pyhrf.verbose(1, 'inertia:')
pyhrf.verbose.printNdarray(1, i1.T)
#assert_array_almost_equal(act_clusters(np.where(act_clusters==0)), inertia())
def test_parcellation_spatialWard_5_sklearn(self):
pyhrf.verbose.set_verbosity(0)
features0 = np.ones((25,1))
features0[10:] = 2
n_samples, n_features = features0.shape
noise = 0
var = np.random.rand(n_samples,n_features)*noise
var_ini = np.random.rand(n_samples,n_features)*noise
act = np.ones_like(features0[:,0])
act_ini = np.ones_like(features0[:,0])
features = features0 + var
graph = pgraph.graph_from_lattice(np.ones((5,5)), pgraph.kerMask2D_4n)
p2 = pm.spatial_ward(features, graph, nb_clusters=2)
assert_array_equal(p2, features0.squeeze())
def test_parcellation_spatialWard_400_nonoise(self):
pyhrf.verbose.set_verbosity(0)
n_samples = 400.
n_features = 1.
im = np.concatenate((np.zeros(math.ceil(n_samples/2))+1, \
np.zeros(math.floor(n_samples/2))+2)).reshape(n_samples,1).astype(np.int)
n = 0
features = im + np.random.randn(*im.shape) * n
graph = pgraph.graph_from_lattice(np.ones((20,20)), pgraph.kerMask2D_4n)
var = np.ones_like(features)
var_ini = np.ones_like(features)
act = np.ones_like(features[:,0])
act_ini = np.ones_like(features[:,0])
p0 = pm.spatial_ward(features, graph, nb_clusters=2)
p = pm.spatial_ward_with_uncertainty(features, graph, var, act,
var_ini, act_ini, nb_clusters=2)
dist_total, common_parcels = parcellation_dist(p.labels_, im.squeeze()+1)
assert_array_equal(dist_total, 0)
def test_hemodynamic_parcellation_wpu_2D_high_SNR(self):
"""
test WPU on features extracted from a 2D artificial fMRI data set,
at high SNR
"""
pyhrf.verbose.set_verbosity(0)
data0 = simulate_fmri_data()
method = 'glm_deriv'
dt = data0.simulation[0]['dt']
if pyhrf.verbose.verbosity > 1:
print 'fdata:'
print data0
time_length = data0.simulation[0]['duration']
ncond = len(data0.simulation[0]['condition_defs'])
ampl, feats, bvars = pm.feature_extraction(data0, method, dt,
time_length, ncond)
data0.build_graphs()
ww = pm.spatial_ward_with_uncertainty(feats, data0.graphs[1], bvars, ampl,
nb_clusters=4)
print 'ww: '
print ww
p = ww.labels_
print p
true_parcellation_flat = data0.simulation[0]['hrf_territories']
if len(data0.simulation[0]['labels']) > 1:
act_clusters_flat = np.bitwise_or(*(a for a in data0.simulation[0]['labels']))
else:
act_clusters_flat = data0.simulation[0]['labels'][0]
if pyhrf.verbose.verbosity > 0:
mask = data0.roiMask
print 'true parcellation:'
print expand_array_in_mask(true_parcellation_flat, mask)
print 'WPU parcellation:'
print expand_array_in_mask(p, mask)
print 'act labels:'
print expand_array_in_mask(act_clusters_flat, mask)
pm.assert_parcellation_equal(p, true_parcellation_flat.astype(int),
tol_pos=act_clusters_flat==0)
def test_hemodynamic_parcellation_GMM_2D_high_SNR(self):
"""
test GMM-based parcellation on features extracted from a
2D artificial fMRI data set, at high SNR
"""
np.random.seed(5438)
data0 = simulate_fmri_data('high_snr', self.tmp_path)
pyhrf.verbose.set_verbosity(0)
method = 'glm_deriv'
dt = data0.simulation[0]['dt']
if pyhrf.verbose.verbosity > 5:
print 'fdata:'
print data0
time_length = data0.simulation[0]['duration']
ncond = len(data0.simulation[0]['condition_defs'])
ampl, pvals, feats, bvars = pm.feature_extraction(data0, method, dt,
time_length, ncond)
data0.build_graphs()
ww = pm.spatial_ward_with_uncertainty(feats, data0.graphs[1], bvars,
1-pvals, nb_clusters=3,
dist_type='mixt',
save_history=False)
p = ww.labels_
true_parcellation_flat = data0.simulation[0]['hrf_territories']
if len(data0.simulation[0]['labels']) > 1:
labs = data0.simulation[0]['labels']
act_clusters_flat = np.bitwise_or(*(a for a in labs))
else:
act_clusters_flat = data0.simulation[0]['labels'][0]
if pyhrf.verbose.verbosity > 0:
mask = data0.roiMask
print 'true parcellation:'
print expand_array_in_mask(true_parcellation_flat, mask)
print 'MMP parcellation:'
print expand_array_in_mask(p, mask)
print 'act labels:'
print expand_array_in_mask(act_clusters_flat, mask)
if 1:
self.save_parcellation_outputs(ww, mask)
tp = expand_array_in_mask(true_parcellation_flat, mask)
fn_tp = op.join(self.tmp_path, 'true_parcellation.nii')
xndarray(tp, axes_names=MRI3Daxes[:mask.ndim]).save(fn_tp)
# fn = op.join(self.tmp_path, 'parcellation_tree.png')
# pyhrf.verbose(1, 'fig parcellation tree: %s' %fn)
# lab_colors = [('black','red')[l] \
# for l in data0.simulation[0]['labels'][0]]
# pm.render_ward_tree(ww, fn, leave_colors=lab_colors)
print 'pyhrf_view %s/*' %self.tmp_path
self.clean_tmp = False #hack
pm.assert_parcellation_equal(p, true_parcellation_flat.astype(int),
tol_pos=act_clusters_flat==0)
def test_parcellation_spatialWard_400_variance(self):
pyhrf.verbose.set_verbosity(0)
n_samples = 400.
n_features = 1.
im = np.concatenate((np.zeros(math.ceil(n_samples/2))+1, \
np.zeros(math.floor(n_samples/2))+2)).reshape(n_samples,1).astype(np.int)
n = 0.5
var = np.random.randn(*im.shape) * n
features = im + var
var_ini = np.ones_like(features)
act = np.ones_like(features[:,0])
act_ini = np.ones_like(features[:,0])
graph = pgraph.graph_from_lattice(np.ones((20,20)), pgraph.kerMask2D_4n)
p0 = pm.spatial_ward(features, graph, nb_clusters=2)
p = pm.spatial_ward_with_uncertainty(features, graph, var, act,
var_ini, act_ini, nb_clusters=2)
from pyhrf.parcellation import parcellation_dist
dist_total1, common_parcels = parcellation_dist(p0,
im.squeeze()+1)
dist_total2, common_parcels = parcellation_dist(p.labels_,
im.squeeze()+1)
print dist_total1
print dist_total2
assert_array_less(dist_total2, dist_total1)
#p2 = pm.align_parcellation(im.squeeze(), p)
#assert_array_equal(im.squeeze(), p2)
def test_parcellation_history(self):
tp, feats, g, v, act, act_labs, mask = create_features()
nc = len(np.unique(tp))
ww = pm.spatial_ward_with_uncertainty(feats, g, v, act,
nb_clusters=nc)
nvoxels = feats.shape[0]
self.assertEqual(ww.history.shape, (nvoxels-nc, nvoxels))
self.assertEqual(ww.history_choices.shape[0], nvoxels-nc)
self.assertEqual(ww.history_choices.shape[2], nvoxels)
#c_hist = ww.history.expand(mask, 'voxel', target_axes=MRI3Daxes)
def test_uward_tree_save(self):
pyhrf.verbose.set_verbosity(0)
tp, feats, g, v, act, act_labs, mask = create_features()
nc = len(np.unique(tp))
ww = pm.spatial_ward_with_uncertainty(feats, g, v, act,
nb_clusters=nc)
pm.ward_tree_save(ww, self.tmp_path, mask)
assert_file_exists(op.join(self.tmp_path,
'parcellation_uward_features.nii'))
assert_file_exists(op.join(self.tmp_path,
'parcellation_uward_variances.nii'))
assert_file_exists(op.join(self.tmp_path,
'parcellation_uward_activations.nii'))
assert_file_exists(op.join(self.tmp_path,
'parcellation_uward_history.nii'))
assert_file_exists(op.join(self.tmp_path,
'parcellation_uward_choice_history.nii'))
assert_file_exists(op.join(self.tmp_path,
'parcellation_uward_history.nii'))
if 0:
self.clean_tmp = False #hack
print 'pyhrf_view %s/*nii' %self.tmp_path
def test_ward_tree_save(self):
pyhrf.verbose.set_verbosity(0)
tp, feats, g, v, act, act_labs, mask = create_features()
nc = len(np.unique(tp))
ww = pm.spatial_ward_with_uncertainty(feats, g, v, act,
nb_clusters=nc)
pm.ward_tree_save(ww, self.tmp_path, mask)
assert_file_exists(op.join(self.tmp_path,
'parcellation_uward_features.nii'))
assert_file_exists(op.join(self.tmp_path,
'parcellation_uward_history.nii'))
assert_file_exists(op.join(self.tmp_path,
'parcellation_uward_choice_history.nii'))
assert_file_exists(op.join(self.tmp_path,
'parcellation_uward_history.nii'))
if 0:
self.clean_tmp = False #hack
print 'pyhrf_view %s/*nii' %self.tmp_path
def test_mixtdist(self):
"""
Check that merge is in favour of non-activ at the same feature level,
starting from singleton clusters.
"""
pyhrf.verbose.set_verbosity(0)
features = np.array([[1.1, 9, 1.1]]).T
alphas = np.array([ .1,.9,.9])
c0 = np.array([1,0,0], dtype=int)
c1 = np.array([0,1,0], dtype=int)
c2 = np.array([0,0,1], dtype=int)
cmasks = [c0, c1, c2]
#merge 0,1
dist_0_1 = pm.compute_mixt_dist(features, alphas, np.array([0]),
np.array([1]), cmasks, [None])[0]
pyhrf.verbose(1, 'merge dist(0,1): %f' %dist_0_1)
#merge 1,2
c1 = np.array([0,1,0], dtype=int)
c2 = np.array([0,0,1], dtype=int)
dist_1_2 = pm.compute_mixt_dist(features, alphas, np.array([1]),
np.array([2]), cmasks, [None])[0]
pyhrf.verbose(1, 'merge dist(1,2): %f'%dist_1_2)
assert_array_less(dist_0_1, dist_1_2)
| gpl-3.0 | -1,238,709,987,190,271,500 | 37.898965 | 125 | 0.524088 | false |
jerkern/nxt_slam | python/sonar.py | 1 | 4434 | import numpy as np
import math
class FOVCone(object):
""" Represent a FOV measurment """
def __init__(self, pos, theta, prec, dist):
self.xpos = pos[0]
self.ypos = pos[1]
self.theta = theta
self.prec = prec
self.dist = dist
def __call__(self, y, x, optdata = None):
""" Check if inside FOV, if so return coordinate and angle distance
for each y,x pair. If optdata is provided then the corresponding
entries are also returned.
y,x (and opdata) must have the same length """
x = np.asarray(x)
y = np.asarray(y)
# Absolute angle, world coordinates
a_w = np.arctan2(y-self.ypos, x-self.xpos)
# Angle relative sensor, use mod to enforce interval [-pi,pi]
tmp = a_w - (self.theta - math.pi)
a = np.mod(tmp, 2*math.pi) - math.pi
ind = np.abs(a) <= self.prec/2.0
if (not ind.any()):
return None
# Throw away all pairs outside FOV
newx = x[ind]-self.xpos
newy = y[ind]-self.ypos
newa = a[ind]
newa_w = a_w[ind]
# Calculating this manually instead of using norm
# and apply_along_axis is _much_ faster
r = np.sqrt((newx ** 2) + (newy ** 2))
rind = (r <= self.dist)
cnt = newa[rind].shape[0]
if (cnt == 0):
return None
# Create structure array contanings cells within FOV
if (optdata != None):
rval = np.empty(cnt, dtype=[('a', float),
('r', float),
('a_w', float),
('opt', optdata.dtype,
optdata.shape[1])])
rval['opt'] = (optdata[ind, :])[rind, :]
else:
rval = np.empty([cnt, 2], dtype=[('a', float),
('r', float),
('a_w', float)])
rval['a'] = newa[rind]
rval['r'] = r[rind]
rval['a_w'] = newa_w[rind]
return rval
def get_bounding_rect(self):
""" Find bounding rectange of FOV cone """
tmp1 = self.xpos + self.dist*math.cos(self.theta+self.prec/2.0)
tmp2 = self.xpos + self.dist*math.cos(self.theta-self.prec/2.0)
xmin = np.min((self.xpos, tmp1, tmp2))
xmax = np.max((self.xpos, tmp1, tmp2))
tmp1 = self.ypos + self.dist * math.sin(self.theta + self.prec / 2.0)
tmp2 = self.ypos + self.dist * math.sin(self.theta - self.prec / 2.0)
ymin = min((self.ypos, tmp1, tmp2))
ymax = max((self.ypos, tmp1, tmp2))
return (xmin, xmax, ymin, ymax)
class SonarMeasurement(object):
""" Class for handling sonar measurements,
converts from range to probability field """
def own_pdf(self, r, mu, scale):
pdf = 0.0*r
ind1 = np.abs(r-mu) < scale / 2
ind2 = np.abs(r-mu) < scale
max = 0.99
min = 0.01
pdf[ind1] = max
interpolate_inds = (~ind1)*ind2
pdf[interpolate_inds] = max* (1 - 2*(np.abs(r[interpolate_inds] - mu) - scale/2)/scale)
pdf[pdf < min] = min
return pdf
def __init__(self, cells, dist, prec, r_coeff, a_coeff, cov_offset, num_angles, norm=True):
""" Create probabilty field evaluated in 'cells' for distance 'dist' """
tmp = (cells['a_w'] + math.pi)*num_angles/(2.0*math.pi)
tmp = np.floor(tmp)
# Wrap-around
tmp[tmp >= num_angles] = num_angles-1
tmp = np.reshape(tmp,(-1,1))
self.indices = np.concatenate((cells['opt'], tmp), 1)
r = cells['r']
a = cells['a']
self.prob = self.own_pdf(r, dist, prec)
# Probabilty must sum to one
# However, there is a small chance of a spurios measurment, also numerically
# we run into trouble if any single cell would get p=1.0
if (norm):
total = np.sum(self.prob)
if (total > 0.0):
self.prob = 0.99*self.prob / total
else:
self.prob = self.prob + 0.01
self.var = r_coeff*r + a_coeff*abs(a) + cov_offset
self.var[r > (dist+prec/2.0)] = 1000
| gpl-3.0 | 7,736,525,555,743,298,000 | 33.107692 | 95 | 0.489851 | false |
momijiame/diagram-autobuild | diagram_autobuild/tool.py | 1 | 2493 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import abc
import os
from future.utils import with_metaclass
class BuildCommand(with_metaclass(abc.ABCMeta)):
def __init__(self, src_file, dst_dir, opts=None):
self.src_file = src_file
self.dst_dir = dst_dir
self.opts = opts or ''
@abc.abstractproperty
def destination(self):
pass
@abc.abstractmethod
def __str__(self):
pass
class GraphvizBuild(BuildCommand):
@property
def destination(self):
return os.path.join(self.dst_dir, 'out.png')
def __str__(self):
command = 'dot {opts} -T png -o {destination} {src_file}'.format(
destination=self.destination,
src_file=self.src_file,
opts=self.opts,
)
return command
class ERAlchemyBuild(BuildCommand):
@property
def destination(self):
return os.path.join(self.dst_dir, 'out.png')
def __str__(self):
command = 'eralchemy {opts} -i {src_file} -o {destination}'.format(
destination=self.destination,
src_file=self.src_file,
opts=self.opts,
)
return command
class BlockdiagSeriesBuild(BuildCommand):
@abc.abstractproperty
def command(self):
pass
@property
def destination(self):
return os.path.join(self.dst_dir, 'out.png')
def __str__(self):
command = '{command} {opts} -o {destination} {src_file}'.format(
command=self.command,
destination=self.destination,
src_file=self.src_file,
opts=self.opts,
)
return command
class BlockdiagBuild(BlockdiagSeriesBuild):
@property
def command(self):
return 'blockdiag'
class NwdiagBuild(BlockdiagSeriesBuild):
@property
def command(self):
return 'nwdiag'
class SeqdiagBuild(BlockdiagSeriesBuild):
@property
def command(self):
return 'seqdiag'
class ActdiagBuild(BlockdiagSeriesBuild):
@property
def command(self):
return 'actdiag'
_MAPPINGS = {
'graphviz': GraphvizBuild,
'blockdiag': BlockdiagBuild,
'nwdiag': NwdiagBuild,
'seqdiag': SeqdiagBuild,
'actdiag': ActdiagBuild,
'eralchemy': ERAlchemyBuild,
}
def get_tools():
return _MAPPINGS.keys()
def get_command(tool_name, src_file, dst_dir, opts=None):
class_ = _MAPPINGS.get(tool_name)
instance = class_(src_file, dst_dir, opts)
return instance
| apache-2.0 | -557,459,658,530,651,140 | 19.603306 | 75 | 0.610911 | false |
kg-bot/SupyBot | plugins/Izmeri/plugin.py | 1 | 2100 | ###
# Copyright (c) 2013, KG-Bot
# All rights reserved.
#
#
###
import supybot.utils as utils
from supybot.commands import *
import supybot.plugins as plugins
import supybot.ircutils as ircutils
import supybot.callbacks as callbacks
import random
import time
import re
try:
from supybot.i18n import PluginInternationalization
_ = PluginInternationalization('Izmeri')
except:
# Placeholder that allows to run the plugin on a bot
# without the i18n module
_ = lambda x:x
class Izmeri(callbacks.Plugin):
"""Add the help for "@plugin help Izmeri" here
This should describe *how* to use this plugin."""
pass
_penis_responses = ('ima penis od 6cm.', 'ima penis od 15cm.', 'ima penis od 24cm.', 'ima penis od 9cm.', 'ima penis od 18cm.',
'ima penis od 22cm.', 'ima penis od 14cm.', 'ima penis od 17cm.', 'ima penis od 4cm.', 'ima penis od 12cm.', 'ima penis od 13cm.', 'ima enormno veliki penis i da se sa njim nije zajebavati, deco cuvajte se, stigo kuronja u grad')
_sike_odgovori = ('su ove sise velicine zrna graska.', 'su ove sise velicine decije glave.', 'da su ove sise taman kako treba.', 'da ova osoba uopste nema sisa.', 'mozes jednu u usta drugu pod glavu.', 'nije nasao nista, jad i beda.', 'ova osoba ima rak desne dojke.')
def penis(self, irc, msg, args, text, channel):
"""<nick>
Meri velicinu necijeg penisa.
"""
irc.reply(('KG-Bot vadi svoju strucnu spravu za merenje penisa, skida gace \x02%s\x02, meri i dolazi do zakljucka da ova osoba \x02%s\x02') %
(text, utils.iter.choice(self._penis_responses)))
penis = wrap(penis, ['nickInChannel', 'channel'])
def sike(self, irc, msg, args, name, channel):
"""<nick>
Meri velicinu siki. xD"""
irc.reply("KG-Bot vadi svoju strucnu spravu za merenje sisica, zaviruje \x02%s\x02 u grudjnak i zakljucuje da \x02%s\x02" % (name, utils.iter.choice(self._sike_odgovori)))
sike = wrap(sike, ['nickInChannel', 'channel'])
Class = Izmeri
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| gpl-3.0 | -2,426,150,339,966,610,000 | 37.888889 | 272 | 0.670952 | false |
tensorflow/models | official/vision/beta/data/process_coco_few_shot_json_files.py | 1 | 6042 | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Processes the JSON files for COCO few-shot.
We assume that `workdir` mirrors the contents of
http://dl.yf.io/fs-det/datasets/cocosplit/, which contains the official JSON
files for the few-shot COCO evaluation procedure that Wang et al. (2020)'s
"Frustratingly Simple Few-Shot Object Detection" paper uses.
"""
import collections
import itertools
import json
import logging
import os
from absl import app
from absl import flags
import tensorflow as tf
logger = tf.get_logger()
logger.setLevel(logging.INFO)
flags.DEFINE_string('workdir', None, 'Working directory.')
FLAGS = flags.FLAGS
CATEGORIES = ['airplane', 'apple', 'backpack', 'banana', 'baseball bat',
'baseball glove', 'bear', 'bed', 'bench', 'bicycle', 'bird',
'boat', 'book', 'bottle', 'bowl', 'broccoli', 'bus', 'cake',
'car', 'carrot', 'cat', 'cell phone', 'chair', 'clock', 'couch',
'cow', 'cup', 'dining table', 'dog', 'donut', 'elephant',
'fire hydrant', 'fork', 'frisbee', 'giraffe', 'hair drier',
'handbag', 'horse', 'hot dog', 'keyboard', 'kite', 'knife',
'laptop', 'microwave', 'motorcycle', 'mouse', 'orange', 'oven',
'parking meter', 'person', 'pizza', 'potted plant',
'refrigerator', 'remote', 'sandwich', 'scissors', 'sheep',
'sink', 'skateboard', 'skis', 'snowboard', 'spoon', 'sports ball',
'stop sign', 'suitcase', 'surfboard', 'teddy bear',
'tennis racket', 'tie', 'toaster', 'toilet', 'toothbrush',
'traffic light', 'train', 'truck', 'tv', 'umbrella', 'vase',
'wine glass', 'zebra']
SEEDS = list(range(10))
SHOTS = [10, 30]
FILE_SUFFIXES = collections.defaultdict(list)
for _seed, _shots in itertools.product(SEEDS, SHOTS):
for _category in CATEGORIES:
FILE_SUFFIXES[(_seed, _shots)].append(
'{}full_box_{}shot_{}_trainval.json'.format(
# http://dl.yf.io/fs-det/datasets/cocosplit/ is organized like so:
#
# datasplit/
# trainvalno5k.json
# 5k.json
# full_box_{1,2,3,5,10,30}shot_{category}_trainval.json
# seed{1-9}/
# full_box_{1,2,3,5,10,30}shot_{category}_trainval.json
#
# This means that the JSON files for seed0 are located in the root
# directory rather than in a `seed?/` subdirectory, hence the
# conditional expression below.
'' if _seed == 0 else 'seed{}/'.format(_seed),
_shots,
_category))
# Base class IDs, as defined in
# https://github.com/ucbdrive/few-shot-object-detection/blob/master/fsdet/evaluation/coco_evaluation.py#L60-L65
BASE_CLASS_IDS = [8, 10, 11, 13, 14, 15, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34,
35, 36, 37, 38, 39, 40, 41, 42, 43, 46, 47, 48, 49, 50, 51,
52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 65, 70, 73, 74, 75,
76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]
def main(unused_argv):
workdir = FLAGS.workdir
# Filter novel class annotations from the training and validation sets.
for name in ('trainvalno5k', '5k'):
file_path = os.path.join(workdir, 'datasplit', '{}.json'.format(name))
with tf.io.gfile.GFile(file_path, 'r') as f:
json_dict = json.load(f)
json_dict['annotations'] = [a for a in json_dict['annotations']
if a['category_id'] in BASE_CLASS_IDS]
output_path = os.path.join(
workdir, 'datasplit', '{}_base.json'.format(name))
with tf.io.gfile.GFile(output_path, 'w') as f:
json.dump(json_dict, f)
for seed, shots in itertools.product(SEEDS, SHOTS):
# Retrieve all examples for a given seed and shots setting.
file_paths = [os.path.join(workdir, suffix)
for suffix in FILE_SUFFIXES[(seed, shots)]]
json_dicts = []
for file_path in file_paths:
with tf.io.gfile.GFile(file_path, 'r') as f:
json_dicts.append(json.load(f))
# Make sure that all JSON files for a given seed and shots setting have the
# same metadata. We count on this to fuse them later on.
metadata_dicts = [{'info': d['info'], 'licenses': d['licenses'],
'categories': d['categories']} for d in json_dicts]
if not all(d == metadata_dicts[0] for d in metadata_dicts[1:]):
raise RuntimeError(
'JSON files for {} shots (seed {}) '.format(shots, seed) +
'have different info, licences, or categories fields')
# Retrieve images across all JSON files.
images = sum((d['images'] for d in json_dicts), [])
# Remove duplicate image entries.
images = list({image['id']: image for image in images}.values())
output_dict = {
'info': json_dicts[0]['info'],
'licenses': json_dicts[0]['licenses'],
'categories': json_dicts[0]['categories'],
'images': images,
'annotations': sum((d['annotations'] for d in json_dicts), [])
}
output_path = os.path.join(workdir,
'{}shot_seed{}.json'.format(shots, seed))
with tf.io.gfile.GFile(output_path, 'w') as f:
json.dump(output_dict, f)
logger.info('Processed %d shots (seed %d) and saved to %s',
shots, seed, output_path)
if __name__ == '__main__':
flags.mark_flag_as_required('workdir')
app.run(main)
| apache-2.0 | 2,241,717,489,347,854,600 | 40.958333 | 111 | 0.602781 | false |
frePPLe/frePPLe | freppledb/input/admin.py | 1 | 29772 | #
# Copyright (C) 2007-2020 by frePPLe bv
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero
# General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from django.utils.translation import gettext_lazy as _
from freppledb.input.models import Resource, Operation, Location, SetupMatrix, SetupRule
from freppledb.input.models import Buffer, Customer, Demand, Item, OperationResource
from freppledb.input.models import OperationMaterial, Skill, ResourceSkill, Supplier
from freppledb.input.models import (
Calendar,
CalendarBucket,
ManufacturingOrder,
SubOperation,
)
from freppledb.input.models import ItemSupplier, ItemDistribution, DistributionOrder
from freppledb.input.models import PurchaseOrder, DeliveryOrder, OperationPlanResource
from freppledb.input.models import OperationPlanMaterial
from freppledb.common.adminforms import MultiDBModelAdmin, MultiDBTabularInline
from freppledb.admin import data_site
class CalendarBucket_inline(MultiDBTabularInline):
model = CalendarBucket
extra = 0
exclude = ("source",)
class CalendarBucket_admin(MultiDBModelAdmin):
model = CalendarBucket
raw_id_fields = ("calendar",)
save_on_top = True
fieldsets = (
(None, {"fields": ("calendar", ("startdate", "enddate"), "value", "priority")}),
(
_("repeating pattern"),
{
"fields": (
("starttime", "endtime"),
(
"monday",
"tuesday",
"wednesday",
"thursday",
"friday",
"saturday",
"sunday",
),
)
},
),
)
tabs = [
{
"name": "edit",
"label": _("edit"),
"view": "admin:input_calendarbucket_change",
"permissions": "input.change_calendarbucket",
},
{
"name": "comments",
"label": _("comments"),
"view": "admin:input_calendarbucket_comment",
},
{
"name": "history",
"label": _("History"),
"view": "admin:input_calendarbucket_history",
},
]
data_site.register(CalendarBucket, CalendarBucket_admin)
class Calendar_admin(MultiDBModelAdmin):
model = Calendar
save_on_top = True
inlines = [CalendarBucket_inline]
exclude = ("source",)
tabs = [
{
"name": "edit",
"label": _("edit"),
"view": "admin:input_calendar_change",
"permissions": "input.change_calendar",
},
{
"name": "comments",
"label": _("comments"),
"view": "admin:input_calendar_comment",
},
{
"name": "history",
"label": _("History"),
"view": "admin:input_calendar_history",
},
]
data_site.register(Calendar, Calendar_admin)
class Location_admin(MultiDBModelAdmin):
model = Location
raw_id_fields = ("available", "owner")
save_on_top = True
exclude = ("source",)
tabs = [
{
"name": "edit",
"label": _("edit"),
"view": "admin:input_location_change",
"permissions": "input.change_location",
},
{
"name": "inboundorders",
"label": _("inbound distribution"),
"view": "input_distributionorder_in_by_location",
},
{
"name": "outboundorders",
"label": _("outbound distribution"),
"view": "input_distributionorder_out_by_location",
},
{
"name": "manufacturingorders",
"label": _("manufacturing orders"),
"view": "input_manufacturingorder_by_location",
},
{
"name": "purchaseorders",
"label": _("purchase orders"),
"view": "input_purchaseorder_by_location",
},
{
"name": "comments",
"label": _("comments"),
"view": "admin:input_location_comment",
},
{
"name": "history",
"label": _("History"),
"view": "admin:input_location_history",
},
]
data_site.register(Location, Location_admin)
class Customer_admin(MultiDBModelAdmin):
model = Customer
raw_id_fields = ("owner",)
save_on_top = True
exclude = ("source",)
tabs = [
{
"name": "edit",
"label": _("edit"),
"view": "admin:input_customer_change",
"permissions": "input.change_customer",
},
{
"name": "comments",
"label": _("comments"),
"view": "admin:input_customer_comment",
},
{
"name": "history",
"label": _("History"),
"view": "admin:input_customer_history",
},
]
data_site.register(Customer, Customer_admin)
class ItemSupplier_inline(MultiDBTabularInline):
model = ItemSupplier
fk_name = "item"
raw_id_fields = ("supplier", "location", "resource")
extra = 0
exclude = ("source",)
class Supplier_admin(MultiDBModelAdmin):
model = Supplier
raw_id_fields = ("available", "owner")
save_on_top = True
exclude = ("source",)
tabs = [
{
"name": "edit",
"label": _("edit"),
"view": "admin:input_supplier_change",
"permissions": "input.change_supplier",
},
{
"name": "purchaseorders",
"label": _("purchase orders"),
"view": "input_purchaseorder_by_supplier",
},
{
"name": "comments",
"label": _("comments"),
"view": "admin:input_supplier_comment",
},
{
"name": "history",
"label": _("History"),
"view": "admin:input_supplier_history",
},
]
data_site.register(Supplier, Supplier_admin)
class OperationMaterial_inline(MultiDBTabularInline):
model = OperationMaterial
fields = (
"item",
"operation",
"quantity",
"quantity_fixed",
"type",
"transferbatch",
"offset",
"effective_start",
"effective_end",
)
raw_id_fields = ("operation", "item")
extra = 0
exclude = ("source",)
class OperationResource_inline(MultiDBTabularInline):
model = OperationResource
raw_id_fields = ("operation", "resource", "skill")
fields = (
"resource",
"operation",
"quantity",
"quantity_fixed",
"effective_start",
"effective_end",
"skill",
"setup",
"search",
)
extra = 0
exclude = ("source",)
class Item_admin(MultiDBModelAdmin):
model = Item
save_on_top = True
raw_id_fields = ("owner",)
inlines = [ItemSupplier_inline, OperationMaterial_inline]
exclude = ("source",)
tabs = [
{
"name": "edit",
"label": _("edit"),
"view": "admin:input_item_change",
"permissions": "input.change_item",
},
{"name": "supplypath", "label": _("supply path"), "view": "supplypath_item"},
{"name": "whereused", "label": _("where used"), "view": "whereused_item"},
{"name": "plan", "label": _("plan"), "view": "output_demand_plandetail"},
{
"name": "inventory",
"label": _("inventory"),
"view": "output_buffer_plandetail_by_item",
},
{
"name": "inventorydetail",
"label": _("inventory detail"),
"view": "input_operationplanmaterial_plandetail_by_item",
},
{
"name": "comments",
"label": _("comments"),
"view": "admin:input_item_comment",
},
{"name": "history", "label": _("History"), "view": "admin:input_item_history"},
]
data_site.register(Item, Item_admin)
class ItemSupplier_admin(MultiDBModelAdmin):
model = ItemSupplier
save_on_top = True
raw_id_fields = ("item", "supplier", "resource")
exclude = ("source", "id")
tabs = [
{
"name": "edit",
"label": _("edit"),
"view": "admin:input_itemsupplier_change",
"permissions": "input.change_itemsupplier",
},
{
"name": "comments",
"label": _("comments"),
"view": "admin:input_itemsupplier_comment",
},
{
"name": "history",
"label": _("History"),
"view": "admin:input_itemsupplier_history",
},
]
data_site.register(ItemSupplier, ItemSupplier_admin)
class ItemDistribution_admin(MultiDBModelAdmin):
model = ItemDistribution
save_on_top = True
raw_id_fields = ("item", "resource")
exclude = ("source", "id")
tabs = [
{
"name": "edit",
"label": _("edit"),
"view": "admin:input_itemdistribution_change",
"permissions": "input.change_itemdistribution",
},
{
"name": "comments",
"label": _("comments"),
"view": "admin:input_itemdistribution_comment",
},
{
"name": "history",
"label": _("History"),
"view": "admin:input_itemdistribution_history",
},
]
data_site.register(ItemDistribution, ItemDistribution_admin)
class ChildOperation_inline(MultiDBTabularInline):
model = Operation
fk_name = "owner"
extra = 1
# raw_id_fields = ("owner",)
fields = (
"priority",
"name",
"effective_start",
"effective_end",
"location",
"type",
"duration",
"duration_per",
)
exclude = ("source",)
class SubOperation_inline(MultiDBTabularInline):
model = SubOperation
verbose_name = _("child operation")
verbose_name_plural = _("child suboperations")
fk_name = "operation"
extra = 1
raw_id_fields = ("suboperation",)
exclude = ("source",)
class ResourceSkill_inline(MultiDBTabularInline):
model = ResourceSkill
fk_name = "resource"
raw_id_fields = ("skill",)
extra = 1
exclude = ("source",)
class Operation_admin(MultiDBModelAdmin):
model = Operation
raw_id_fields = ("location", "item", "available", "owner")
save_on_top = True
inlines = [
OperationMaterial_inline,
OperationResource_inline,
ChildOperation_inline,
SubOperation_inline,
]
fieldsets = (
(
None,
{
"fields": (
"name",
"type",
"item",
"location",
"description",
"category",
"subcategory",
)
},
),
(
_("planning parameters"),
{
"fields": (
"fence",
"posttime",
"sizeminimum",
"sizemultiple",
"sizemaximum",
"cost",
"duration",
"duration_per",
"available",
)
},
),
(
_("alternate selection"),
{
"fields": (
"search",
"priority",
"effective_start",
"effective_end",
"owner",
)
},
),
)
tabs = [
{
"name": "edit",
"label": _("edit"),
"view": "admin:input_operation_change",
"permissions": "input.change_operation",
},
{
"name": "supplypath",
"label": _("supply path"),
"view": "supplypath_operation",
},
{"name": "whereused", "label": _("where used"), "view": "whereused_operation"},
{"name": "plan", "label": _("plan"), "view": "output_operation_plandetail"},
{
"name": "plandetail",
"label": _("manufacturing orders"),
"view": "input_manufacturingorder_by_operation",
},
{
"name": "constraint",
"label": _("constrained demand"),
"view": "output_constraint_operation",
},
{
"name": "comments",
"label": _("comments"),
"view": "admin:input_operation_comment",
},
{
"name": "history",
"label": _("History"),
"view": "admin:input_operation_history",
},
]
data_site.register(Operation, Operation_admin)
class SubOperation_admin(MultiDBModelAdmin):
model = SubOperation
raw_id_fields = ("operation", "suboperation")
save_on_top = True
exclude = ("source", "id")
data_site.register(SubOperation, SubOperation_admin)
class Buffer_admin(MultiDBModelAdmin):
raw_id_fields = ("location", "item", "minimum_calendar")
fieldsets = (
(
None,
{
"fields": (
"item",
"location",
"batch",
"description",
"category",
"subcategory",
)
},
),
(_("inventory"), {"fields": ("onhand",)}),
(
_("planning parameters"),
{"fields": ("type", "minimum", "minimum_calendar", "min_interval")},
),
)
save_on_top = True
tabs = [
{
"name": "edit",
"label": _("edit"),
"view": "admin:input_buffer_change",
"permissions": "input.change_buffer",
},
{"name": "supplypath", "label": _("supply path"), "view": "supplypath_buffer"},
{"name": "whereused", "label": _("where used"), "view": "whereused_buffer"},
{"name": "plan", "label": _("plan"), "view": "output_buffer_plandetail"},
{
"name": "plandetail",
"label": _("plan detail"),
"view": "input_operationplanmaterial_plandetail_by_buffer",
},
{
"name": "constraint",
"label": _("constrained demand"),
"view": "output_constraint_buffer",
},
{
"name": "comments",
"label": _("comments"),
"view": "admin:input_buffer_comment",
},
{
"name": "history",
"label": _("History"),
"view": "admin:input_buffer_history",
},
]
data_site.register(Buffer, Buffer_admin)
class SetupRule_inline(MultiDBTabularInline):
model = SetupRule
extra = 3
exclude = ("source",)
class SetupRule_admin(MultiDBModelAdmin):
model = SetupRule
raw_id_fields = ("setupmatrix",)
save_on_top = True
exclude = ("source",)
tabs = [
{
"name": "edit",
"label": _("edit"),
"view": "admin:input_setuprule_change",
"permissions": "input.change_setuprule",
},
{
"name": "comments",
"label": _("comments"),
"view": "admin:input_setuprule_comment",
},
{
"name": "history",
"label": _("History"),
"view": "admin:input_setuprule_history",
},
]
data_site.register(SetupRule, SetupRule_admin)
class SetupMatrix_admin(MultiDBModelAdmin):
model = SetupMatrix
save_on_top = True
inlines = [SetupRule_inline]
exclude = ("source",)
tabs = [
{
"name": "edit",
"label": _("edit"),
"view": "admin:input_setupmatrix_change",
"permissions": "input.change_setupmatrix",
},
{
"name": "comments",
"label": _("comments"),
"view": "admin:input_setupmatrix_comment",
},
{
"name": "history",
"label": _("History"),
"view": "admin:input_setupmatrix_history",
},
]
data_site.register(SetupMatrix, SetupMatrix_admin)
class Skill_admin(MultiDBModelAdmin):
model = Skill
save_on_top = True
exclude = ("source",)
tabs = [
{
"name": "edit",
"label": _("edit"),
"view": "admin:input_skill_change",
"permissions": "input.change_skill",
},
{
"name": "comments",
"label": _("comments"),
"view": "admin:input_skill_comment",
},
{"name": "history", "label": _("History"), "view": "admin:input_skill_history"},
]
data_site.register(Skill, Skill_admin)
class ResourceSkill_admin(MultiDBModelAdmin):
model = ResourceSkill
raw_id_fields = ("resource", "skill")
save_on_top = True
exclude = ("source",)
tabs = [
{
"name": "edit",
"label": _("edit"),
"view": "admin:input_resourceskill_change",
"permissions": "input.change_resoureskill",
},
{
"name": "comments",
"label": _("comments"),
"view": "admin:input_resourceskill_comment",
},
{
"name": "history",
"label": _("History"),
"view": "admin:input_resourceskill_history",
},
]
data_site.register(ResourceSkill, ResourceSkill_admin)
class Resource_admin(MultiDBModelAdmin):
model = Resource
raw_id_fields = (
"maximum_calendar",
"location",
"setupmatrix",
"owner",
"available",
"efficiency_calendar",
)
save_on_top = True
inlines = [OperationResource_inline, ResourceSkill_inline]
exclude = ("source",)
tabs = [
{
"name": "edit",
"label": _("edit"),
"view": "admin:input_resource_change",
"permissions": "input.change_resource",
},
{
"name": "supplypath",
"label": _("supply path"),
"view": "supplypath_resource",
},
{"name": "whereused", "label": _("where used"), "view": "whereused_resource"},
{"name": "plan", "label": _("plan"), "view": "output_resource_plandetail"},
{
"name": "plandetail",
"label": _("plan detail"),
"view": "input_operationplanresource_plandetail",
},
{
"name": "constraint",
"label": _("constrained demand"),
"view": "output_constraint_resource",
},
{
"name": "comments",
"label": _("comments"),
"view": "admin:input_resource_comment",
},
{
"name": "history",
"label": _("History"),
"view": "admin:input_resource_history",
},
]
data_site.register(Resource, Resource_admin)
class OperationMaterial_admin(MultiDBModelAdmin):
model = OperationMaterial
raw_id_fields = ("operation", "item")
save_on_top = True
exclude = ("id",)
fieldsets = (
(
None,
{
"fields": (
"item",
"operation",
"type",
"quantity",
"quantity_fixed",
"transferbatch",
"offset",
("effective_start", "effective_end"),
)
},
),
(_("alternates"), {"fields": ("name", "priority", "search")}),
)
tabs = [
{
"name": "edit",
"label": _("edit"),
"view": "admin:input_operationmaterial_change",
"permissions": "input.change_operationmaterial",
},
{
"name": "comments",
"label": _("comments"),
"view": "admin:input_operationmaterial_comment",
},
{
"name": "history",
"label": _("History"),
"view": "admin:input_operationmaterial_history",
},
]
data_site.register(OperationMaterial, OperationMaterial_admin)
class OperationResource_admin(MultiDBModelAdmin):
model = OperationResource
raw_id_fields = ("operation", "resource", "skill")
save_on_top = True
exclude = ("id",)
fieldsets = (
(
None,
{
"fields": (
"resource",
"operation",
"quantity",
"quantity_fixed",
"skill",
"setup",
("effective_start", "effective_end"),
)
},
),
(_("alternates"), {"fields": ("name", "priority", "search")}),
)
tabs = [
{
"name": "edit",
"label": _("edit"),
"view": "admin:input_operationresource_change",
"permissions": "input.change_operationresource",
},
{
"name": "comments",
"label": _("comments"),
"view": "admin:input_operationresource_comment",
},
{
"name": "history",
"label": _("History"),
"view": "admin:input_operationresource_history",
},
]
data_site.register(OperationResource, OperationResource_admin)
class ManufacturingOrder_admin(MultiDBModelAdmin):
model = ManufacturingOrder
raw_id_fields = ("operation", "owner")
save_on_top = True
fieldsets = (
(
None,
{
"fields": (
"reference",
"operation",
"quantity",
"startdate",
"enddate",
"owner",
"status",
)
},
),
)
exclude = (
"type",
"source",
"criticality",
"delay",
"origin",
"destination",
"item",
"supplier",
"location",
"demand",
"name",
"due",
"color",
)
tabs = [
{
"name": "edit",
"label": _("edit"),
"view": "admin:input_manufacturingorder_change",
"permissions": "input.change_manufacturingorder",
}
]
data_site.register(ManufacturingOrder, ManufacturingOrder_admin)
class DistributionOrder_admin(MultiDBModelAdmin):
model = DistributionOrder
raw_id_fields = ("item",)
save_on_top = True
fieldsets = (
(
None,
{
"fields": (
"reference",
"item",
"origin",
"destination",
"quantity",
"shipping_date",
"receipt_date",
"status",
"batch",
)
},
),
)
exclude = (
"type",
"source",
"criticality",
"delay",
"operation",
"owner",
"color",
"supplier",
"location",
"demand",
"name",
"due",
"startdate",
"enddate",
)
tabs = [
{
"name": "edit",
"label": _("edit"),
"view": "admin:input_distributionorder_change",
"permissions": "input.change_distributionorder",
}
]
data_site.register(DistributionOrder, DistributionOrder_admin)
class PurchaseOrder_admin(MultiDBModelAdmin):
model = PurchaseOrder
raw_id_fields = ("item", "supplier")
save_on_top = True
fieldsets = (
(
None,
{
"fields": (
"reference",
"item",
"location",
"supplier",
"quantity",
"ordering_date",
"receipt_date",
"status",
"batch",
)
},
),
)
exclude = (
"type",
"source",
"criticality",
"delay",
"operation",
"owner",
"color",
"origin",
"destination",
"demand",
"name",
"due",
"startdate",
"enddate",
)
tabs = [
{
"name": "edit",
"label": _("edit"),
"view": "admin:input_purchaseorder_change",
"permissions": "input.change_purchaseorder",
}
]
data_site.register(PurchaseOrder, PurchaseOrder_admin)
class DeliveryOrder_admin(MultiDBModelAdmin):
model = DeliveryOrder
raw_id_fields = ("item", "demand")
save_on_top = True
fieldsets = (
(
None,
{
"fields": (
"reference",
"demand",
"item",
"location",
"quantity",
"status",
"batch",
)
},
),
)
exclude = (
"type",
"source",
"criticality",
"delay",
"operation",
"owner",
"color",
"origin",
"destination",
"name",
"supplier",
)
tabs = [
{
"name": "edit",
"label": _("edit"),
"view": "admin:input_deliveryorder_change",
"permissions": "input.change_deliveryorder",
}
]
data_site.register(DeliveryOrder, DeliveryOrder_admin)
class Demand_admin(MultiDBModelAdmin):
model = Demand
raw_id_fields = ("customer", "item", "operation", "owner")
fieldsets = (
(
None,
{
"fields": (
"name",
"item",
"location",
"customer",
"due",
"quantity",
"batch",
"priority",
"status",
"description",
"category",
"subcategory",
"owner",
)
},
),
(
_("planning parameters"),
{"fields": ("operation", "minshipment", "maxlateness")},
),
)
save_on_top = True
tabs = [
{
"name": "edit",
"label": _("edit"),
"view": "admin:input_demand_change",
"permissions": "input.change_demand",
},
{"name": "supplypath", "label": _("supply path"), "view": "supplypath_demand"},
{
"name": "constraint",
"label": _("why short or late?"),
"view": "output_constraint_demand",
},
{"name": "plan", "label": _("plan"), "view": "output_demand_pegging"},
{
"name": "comments",
"label": _("comments"),
"view": "admin:input_demand_comment",
},
{
"name": "history",
"label": _("History"),
"view": "admin:input_demand_history",
},
]
data_site.register(Demand, Demand_admin)
class OperationPlanResource_admin(MultiDBModelAdmin):
model = OperationPlanResource
raw_id_fields = (
"operationplan",
) # TODO a foreign key to OperationPlan doesn't work because it's an abstract class without admin
save_on_top = True
fieldsets = (
(None, {"fields": ("operationplan", "resource", "status")}),
(_("computed fields"), {"fields": ("quantity", "startdate", "enddate")}),
)
tabs = [
{
"name": "edit",
"label": _("edit"),
"view": "admin:input_operationplanresource_change",
"permissions": "input.change_operationplanresource",
}
]
data_site.register(OperationPlanResource, OperationPlanResource_admin)
class OperationPlanMaterial_admin(MultiDBModelAdmin):
model = OperationPlanMaterial
raw_id_fields = (
"operationplan",
"item",
) # TODO a foreign key to OperationPlan doesn't work because it's an abstract class without admin
save_on_top = True
fieldsets = (
(
None,
{
"fields": (
"operationplan",
"item",
"location",
"status",
"quantity",
"flowdate",
)
},
),
)
tabs = [
{
"name": "edit",
"label": _("edit"),
"view": "admin:input_operationplanmaterial_change",
"permissions": "input.change_operationplanmaterial",
}
]
data_site.register(OperationPlanMaterial, OperationPlanMaterial_admin)
| agpl-3.0 | 1,851,207,254,629,476,900 | 25.300353 | 102 | 0.465505 | false |
mabuchilab/Instrumental | instrumental/drivers/util.py | 1 | 14998 | # -*- coding: utf-8 -*-
# Copyright 2015-2019 Nate Bogdanowicz
"""
Helpful utilities for writing drivers.
"""
import copy
import contextlib
from inspect import getargspec
import pint
from past.builtins import basestring
from . import decorator
from .. import Q_, u
from ..log import get_logger
log = get_logger(__name__)
__all__ = ['check_units', 'unit_mag', 'check_enums', 'as_enum', 'visa_timeout_context']
def to_quantity(value):
"""Convert to a pint.Quantity
This function handles offset units in strings slightly better than Q_ does. It uses caching to
avoid reparsing strings.
"""
try:
quantity = copy.copy(to_quantity.cache[value])
except (KeyError, TypeError): # key is missing or unhashable
quantity = _to_quantity(value)
if isinstance(value, basestring):
to_quantity.cache[value] = copy.copy(quantity) # Guard against mutation
return quantity
to_quantity.cache = {}
def _to_quantity(value):
"""Convert to a pint.Quantity
This function handles offset units in strings slightly better than Q_ does.
"""
try:
return Q_(value)
except Exception as e:
log.info(e)
try:
mag_str, units = value.split()
try:
mag = int(mag_str)
except ValueError:
mag = float(mag_str)
return Q_(mag, units)
except Exception as e:
raise ValueError('Could not construct Quantity from {}'.format(value))
def as_enum(enum_type, arg):
"""Check if arg is an instance or key of enum_type, and return that enum"""
if isinstance(arg, enum_type):
return arg
try:
return enum_type[arg]
except KeyError:
raise ValueError("{} is not a valid {} enum".format(arg, enum_type.__name__))
def check_units(*pos, **named):
"""Decorator to enforce the dimensionality of input args and return values.
Allows strings and anything that can be passed as a single arg to `pint.Quantity`.
::
@check_units(value='V')
def set_voltage(value):
pass # `value` will be a pint.Quantity with Volt-like units
"""
def inout_map(arg, unit_info, name=None):
if unit_info is None:
return arg
use_units_msg = (" Make sure you're passing in a unitful value, either as a string or by "
"using `instrumental.u` or `instrumental.Q_()`")
optional, units = unit_info
if optional and arg is None:
return None
elif arg == 0:
# Allow naked zeroes as long as we're using absolute units (e.g. not degF)
# It's a bit dicey using this private method; works in 0.6 at least
if units._ok_for_muldiv():
return Q_(arg, units)
else:
if name is not None:
extra_msg = " for argument '{}'.".format(name) + use_units_msg
raise pint.DimensionalityError(u.dimensionless.units, units.units,
extra_msg=extra_msg)
else:
extra_msg = " for return value." + use_units_msg
raise pint.DimensionalityError(u.dimensionless.units, units.units,
extra_msg=extra_msg)
else:
q = to_quantity(arg)
if q.dimensionality != units.dimensionality:
extra_info = '' if isinstance(arg, Q_) else use_units_msg
if name is not None:
extra_msg = " for argument '{}'.".format(name) + extra_info
raise pint.DimensionalityError(q.units, units.units, extra_msg=extra_msg)
else:
extra_msg = " for return value." + extra_info
raise pint.DimensionalityError(q.units, units.units, extra_msg=extra_msg)
return q
return _unit_decorator(inout_map, inout_map, pos, named)
def unit_mag(*pos, **named):
"""Decorator to extract the magnitudes of input args and return values.
Allows strings and anything that can be passed as a single arg to `pint.Quantity`.
::
@unit_mag(value='V')
def set_voltage(value):
pass # The input must be in Volt-like units and `value` will be a raw number
# expressing the magnitude in Volts
"""
def in_map(arg, unit_info, name):
if unit_info is None:
return arg
optional, units = unit_info
if optional and arg is None:
return None
elif arg == 0:
# Allow naked zeroes as long as we're using absolute units (e.g. not degF)
# It's a bit dicey using this private method; works in 0.6 at least
if units._ok_for_muldiv():
return arg
else:
if name is not None:
raise pint.DimensionalityError(u.dimensionless.units, units.units,
extra_msg=" for argument '{}'".format(name))
else:
raise pint.DimensionalityError(u.dimensionless.units, units.units,
extra_msg=" for return value")
else:
q = to_quantity(arg)
try:
if q.units == units:
return q.magnitude # Speed up the common case
else:
return q.to(units).magnitude
except pint.DimensionalityError:
raise pint.DimensionalityError(q.units, units.units,
extra_msg=" for argument '{}'".format(name))
def out_map(res, unit_info):
if unit_info is None:
return res
optional, units = unit_info
if optional and res is None:
return None
else:
q = to_quantity(res)
try:
return q
except pint.DimensionalityError:
raise pint.DimensionalityError(q.units, units.units, extra_msg=" for return value")
return _unit_decorator(in_map, out_map, pos, named)
def check_enums(**kw_args):
"""Decorator to type-check input arguments as enums.
Allows strings and anything that can be passed to `~instrumental.drivers.util.as_enum`.
::
@check_enums(mode=SampleMode)
def set_mode(mode):
pass # `mode` will be of type SampleMode
"""
def checker_factory(enum_type, arg_name):
def checker(arg):
return as_enum(enum_type, arg)
return checker
return arg_decorator(checker_factory, (), kw_args)
def arg_decorator(checker_factory, dec_pos_args, dec_kw_args):
"""Produces a decorator that checks the arguments to the function in wraps.
Parameters
----------
checker_factory : function
Takes the args (decorator_arg_val, arg_name) and produces a 'checker' function, which takes
and returns a single value. When acting simply as a checker, it takes the arg, checks that
it is valid (using the ``decorator_arg_val`` and/or ``arg_name``), raises an Exception if
it is not, and returns the value unchanged if it is. Additionally, the checker may return a
different value, e.g. a ``str`` which has been converted to a ``Quantity`` as in
``check_units()``.
dec_pos_args : tuple
The positional args (i.e. *args) passed to the decorator constructor
dec_kw_args : dict
The keyword args (i.e. **kwargs) passed to the decorator constructor
"""
def wrap(func):
"""Function that actually wraps the function to be decorated"""
arg_names, vargs, kwds, default_vals = getargspec(func)
default_vals = default_vals or ()
pos_arg_names = {i: name for i, name in enumerate(arg_names)}
# Put everything in one dict
for dec_arg_val, arg_name in zip(dec_pos_args, arg_names):
if arg_name in dec_kw_args:
raise TypeError("Argument specified twice, by both position and name")
dec_kw_args[arg_name] = dec_arg_val
checkers = {}
new_defaults = {}
num_nondefs = len(arg_names) - len(default_vals)
for default_val, arg_name in zip(default_vals, arg_names[num_nondefs:]):
if arg_name in dec_kw_args:
checker = checker_factory(dec_kw_args[arg_name], arg_name)
checkers[arg_name] = checker
new_defaults[arg_name] = checker(default_val)
for arg_name in arg_names[:num_nondefs]:
if arg_name in dec_kw_args:
checkers[arg_name] = checker_factory(dec_kw_args[arg_name], arg_name)
def wrapper(func, *args, **kwds):
checked = new_defaults.copy()
checked.update({name: (checkers[name](arg) if name in checkers else arg) for name, arg
in kwds.items()})
for i, arg in enumerate(args):
name = pos_arg_names[i]
checked[name] = checkers[name](arg) if name in checkers else arg
result = func(**checked)
return result
return decorator.decorate(func, wrapper)
return wrap
def _unit_decorator(in_map, out_map, pos_args, named_args):
def wrap(func):
ret = named_args.pop('ret', None)
if ret is None:
ret_units = None
elif isinstance(ret, tuple):
ret_units = []
for arg in ret:
if arg is None:
unit = None
elif isinstance(arg, basestring):
optional = arg.startswith('?')
if optional:
arg = arg[1:]
unit = (optional, to_quantity(arg))
ret_units.append(unit)
ret_units = tuple(ret_units)
else:
optional = ret.startswith('?')
if optional:
arg = ret[1:]
ret_units = to_quantity(arg)
arg_names, vargs, kwds, defaults = getargspec(func)
pos_units = []
for arg in pos_args:
if arg is None:
unit = None
elif isinstance(arg, basestring):
optional = arg.startswith('?')
if optional:
arg = arg[1:]
unit = (optional, to_quantity(arg))
else:
raise TypeError("Each arg spec must be a string or None")
pos_units.append(unit)
named_units = {}
for name, arg in named_args.items():
if arg is None:
unit = None
elif isinstance(arg, basestring):
optional = arg.startswith('?')
if optional:
arg = arg[1:]
unit = (optional, to_quantity(arg))
else:
raise TypeError("Each arg spec must be a string or None")
named_units[name] = unit
# Add positional units to named units
for i, units in enumerate(pos_units):
name = arg_names[i]
if name in named_units:
raise Exception("Units of {} specified by position and by name".format(name))
named_units[name] = units
# Pad out the rest of the positional units with None
pos_units.extend([None] * (len(arg_names) - len(pos_args)))
# Add named units to positional units
for name, units in named_units.items():
try:
i = arg_names.index(name)
pos_units[i] = units
except ValueError:
pass
defaults = tuple() if defaults is None else defaults
# Convert the defaults
new_defaults = {}
ndefs = len(defaults)
for d, unit, n in zip(defaults, pos_units[-ndefs:], arg_names[-ndefs:]):
new_defaults[n] = d if unit is None else in_map(d, unit, n)
def wrapper(func, *args, **kwargs):
# Convert the input arguments
new_args = [in_map(a, u, n) for a, u, n in zip(args, pos_units, arg_names)]
new_kwargs = {n: in_map(a, named_units.get(n, None), n) for n, a in kwargs.items()}
# Fill in converted defaults
for name in arg_names[max(len(args), len(arg_names)-len(defaults)):]:
if name not in new_kwargs:
new_kwargs[name] = new_defaults[name]
result = func(*new_args, **new_kwargs)
# Allow for unit checking of multiple return values
if isinstance(ret_units, tuple):
return tuple(map(out_map, result, ret_units))
else:
return out_map(result, ret_units)
return decorator.decorate(func, wrapper)
return wrap
@contextlib.contextmanager
def visa_timeout_context(resource, timeout):
"""Context manager for temporarily setting a visa resource's timeout.
::
with visa_timeout_context(rsrc, 100):
... # `rsrc` will have a timeout of 100 ms within this block
"""
old_timeout = resource.timeout
resource.timeout = timeout
yield
resource.timeout = old_timeout
_ALLOWED_VISA_ATTRS = ['timeout', 'read_termination', 'write_termination', 'end_input', 'parity',
'baud_rate']
@contextlib.contextmanager
def visa_context(resource, **settings):
"""Context manager for temporarily setting a visa resource's settings
The settings will be set at the beginning, then reset to their previous values at the end of the
context. Only the settings mentioned below are supported, and they must be specified as keyword
arguments.
If the resource does not have a given setting, it will be ignored.
Parameters
----------
resource : VISA resource
The resource to temporarily modify
timeout :
read_termination :
write_termination :
end_input :
parity :
baud_rate :
"""
old_values = {}
attr_names = list(key for key in settings.keys() if hasattr(resource, key))
for attr_name in attr_names:
if attr_name not in _ALLOWED_VISA_ATTRS:
raise AttributeError("VISA attribute '{}' is not supported by this context manager")
for attr_name in attr_names:
old_values[attr_name] = getattr(resource, attr_name)
setattr(resource, attr_name, settings[attr_name])
yield
for attr_name in reversed(attr_names):
setattr(resource, attr_name, old_values[attr_name])
| gpl-3.0 | -6,475,790,318,701,356,000 | 34.940887 | 100 | 0.55274 | false |
ingadhoc/odoo-infrastructure | infrastructure/wizard/rename_db_wizard.py | 1 | 1333 | # -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import fields, api, models
class infrastructure_rename_db_name(models.TransientModel):
_name = "infrastructure.rename_database.name"
_description = "Infrastructure Rename Database Name Wizard"
name = fields.Char(
'New Database Name',
size=64,
required=True
)
# database_type_id = fields.Many2one(
# 'infrastructure.database_type',
# string='Database Type',
# required=True,
# )
# TODO rmeove as we no longer use db prefix
# @api.onchange('database_type_id')
# def onchange_database_type_id(self):
# if self.database_type_id:
# self.name = self.database_type_id.prefix + '_'
# TODO send suggested backup data
@api.multi
def action_confirm(self):
active_id = self._context.get('active_id')
if not active_id:
return False
active_record = self.env['infrastructure.database'].browse(active_id)
active_record.rename_db(self.name)
# active_record.database_type_id = self.database_type_id
| agpl-3.0 | 8,406,193,038,402,414,000 | 34.078947 | 78 | 0.562641 | false |
halftk/OpenShareNow | OpenShareNow/OpenShareNow/settings.py | 1 | 2380 | """
Django settings for OpenShareNow project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import os.path
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'etlrhar2t*+sbit%hoibvxftrvy%#6%)&9#x6@p()94cqr%i-v'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(os.path.dirname(__file__),'../osnow/templates'),
)
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'osnow',
'django.contrib.admin',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'OpenShareNow.urls'
WSGI_APPLICATION = 'OpenShareNow.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'basededades.db',
'USER': '',
'PASSWORD': '',
#'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
| gpl-2.0 | -430,860,475,850,630,200 | 24.319149 | 85 | 0.714286 | false |
hgijeon/the_PLAY | Structure/View/SelectScreenView.py | 1 | 5849 | from .View import *
import os
from ..Scene.GameScene import GameScene
from tkinter.filedialog import askopenfilename
class SelectScreenView(View):
def onInit(self):
self.red = gameapi.Color(255,0,0)
self.fileSelected = False
self.background = self.resizeImage(gameapi.image.load(os.path.join("Image","easy_background.jpg")), (800,340))
self.icon_pressed = self.resizeImage(gameapi.image.load(os.path.join("Image","icon_pressed.png")), (150,150))
self.icon_unpressed = self.resizeImage(gameapi.image.load(os.path.join("Image","icon_unpressed.png")), (150,150))
self.easy = self.resizeImage(gameapi.image.load(os.path.join("Image","easy.png")), (300,150))
self.normal = self.resizeImage(gameapi.image.load(os.path.join("Image","normal.png")), (300,150))
self.hard = self.resizeImage(gameapi.image.load(os.path.join("Image","hard.png")), (300,150))
self.title1 = self.resizeImage(gameapi.image.load(os.path.join("Image","title1.png")), (300,170))
self.title2 = self.resizeImage(gameapi.image.load(os.path.join("Image","title2.png")), (300,200))
self.title3 = self.resizeImage(gameapi.image.load(os.path.join("Image","title3.png")), (300,200))
self.title4 = self.resizeImage(gameapi.image.load(os.path.join("Image","title3.png")), (300,200))
self.title5 = self.resizeImage(gameapi.image.load(os.path.join("Image","title1.png")), (300,170))
self.title6 = self.resizeImage(gameapi.image.load(os.path.join("Image","title2.png")), (300,200))
self.title7 = self.resizeImage(gameapi.image.load(os.path.join("Image","title7.png")), (300,200))
self.title8 = self.resizeImage(gameapi.image.load(os.path.join("Image","title2.png")), (300,200))
self.title9 = self.resizeImage(gameapi.image.load(os.path.join("Image","title3.png")), (300,200))
self.mode = 1
self.icon = 1
def onDraw(self):
self.fill((200,200,200))
#self.drawRect(self.red, (0, 0, 800, 600))
leftTop = (0,0)
self.drawImage (self.background, leftTop)
if self.mode == 1:
leftTop = (0,0)
self.drawImage (self.easy, leftTop)
elif self.mode == 2:
leftTop = (250,0)
self.drawImage (self.normal, leftTop)
elif self.mode == 3:
leftTop = (500,0)
self.drawImage (self.hard, leftTop)
self.drawIcons()
def onUpdateTime(self, time):
if self.fileSelected:
self.scene.sceneManager.pushGameScene(self.scene.filename)
if self.keyMiddle.check(self.keyMiddle.key['5']):
self.mode = 1
elif self.keyMiddle.check(self.keyMiddle.key['6']):
self.mode = 2
elif self.keyMiddle.check(self.keyMiddle.key['7']):
self.mode = 3
elif self.keyMiddle.check(self.keyMiddle.key['r']):
self.icon = 1
elif self.keyMiddle.check(self.keyMiddle.key['t']):
self.icon = 2
elif self.keyMiddle.check(self.keyMiddle.key['y']):
self.icon = 3
elif self.keyMiddle.check(81):
self.fileopen()
def fileopen(self):
print ('Fileopen')
print (self.icon)
if self.mode == 1:
if self.icon == 1:
self.scene.filename = "MIDI/Music/Twinkle Twinkle Little Star.mid"
elif self.icon == 2:
self.scene.filename = "MIDI/Music/Happy Birthday.mid"
elif self.icon == 3:
self.scene.filename = "MIDI/Music/Amazing Grace.mid"
if self.mode == 2:
if self.icon == 1:
self.scene.filename = "MIDI/Music/moonlight-movement1.mid"
elif self.icon == 2:
self.scene.filename = "MIDI/Music/wagner-bridal-chorus.mid"
elif self.icon == 3:
self.scene.filename = "MIDI/Music/pachelbels-canon-arranged.mid"
if self.mode == 3:
if self.icon == 1:
self.scene.filename = "MIDI/Music/Minuet.mid"
elif self.icon == 2:
self.scene.filename = "MIDI/Music/idina_menzel-let_it_go.mid"
elif self.icon == 3:
self.scene.filename = "MIDI/Music/The-Entertainer.mid"
self.fileSelected = True
def drawIcons(self):
leftTop = (80,50)
self.drawImage (self.icon_unpressed, leftTop)
leftTop = (330,50)
self.drawImage (self.icon_unpressed, leftTop)
leftTop = (580,50)
self.drawImage (self.icon_unpressed, leftTop)
if self.icon == 1:
leftTop = (80, 50)
self.drawImage (self.icon_pressed, leftTop)
elif self.icon == 2:
leftTop = (330,50)
self.drawImage (self.icon_pressed, leftTop)
elif self.icon == 3:
leftTop = (580,50)
self.drawImage (self.icon_pressed, leftTop)
if self.mode == 1:
leftTop = (0,200)
self.drawImage (self.title1, leftTop)
leftTop = (250,200)
self.drawImage (self.title2, leftTop)
leftTop = (500,200)
self.drawImage (self.title3, leftTop)
if self.mode == 2:
leftTop = (0,200)
self.drawImage (self.title4, leftTop)
leftTop = (250,200)
self.drawImage (self.title5, leftTop)
leftTop = (500,200)
self.drawImage (self.title6, leftTop)
if self.mode == 3:
leftTop = (0,200)
self.drawImage (self.title7, leftTop)
leftTop = (250,200)
self.drawImage (self.title8, leftTop)
leftTop = (500,200)
self.drawImage (self.title9, leftTop)
| mit | 6,012,414,013,244,064,000 | 42.007353 | 121 | 0.570525 | false |
CollabQ/CollabQ | actor/urls.py | 1 | 2017 | # Copyright 2010 http://www.collabq.com
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.conf.urls.defaults import *
urlpatterns = patterns('actor.views',
(r'^invite$', 'actor_invite'),
(r'^findpeople$', 'find_contacts'),
(r'^contacts$', 'actor_contacts'),
(r'^contacts/(?P<format>json|xml|atom)$', 'actor_contacts'),
(r'^followers$', 'actor_followers'),
(r'^followers/(?P<format>json|xml|atom)$', 'actor_followers'),
(r'^presence/(?P<item>[\da-f]+|last)/(?P<format>json|xml|atom)$', 'actor_item'),
(r'^presence/(?P<item>[\da-f]+|last)$', 'actor_item'),
#(r'^presence/(?P<format>json|xml|atom)$', 'presence_current'),
#(r'^presence$', 'presence_current'),
(r'^(?P<format>json|xml|atom|rss)$', 'actor_history'),
(r'^feed/(?P<format>json|xml|atom|rss)$', 'actor_history'),
(r'^contacts/feed/(?P<format>json|xml|atom|rss)$', 'actor_overview'),
(r'^overview/(?P<format>json|xml|atom|rss)$', 'actor_overview'),
(r'^mentions/(?P<format>json|xml|atom|rss)$', 'actor_mentions'),
(r'^twitter', 'actor_twitter'),
(r'^twitter/(?P<format>json|xml|atom|rss)$', 'actor_twitter'),
(r'^overview$', 'actor_overview', {"format": "html"}),
(r'^mentions$', 'actor_mentions', {"format": "html"}),
(r'^$', 'actor_history', {'format': 'html'}),
(r'^settings$', 'actor_settings'),
(r'^settings/(?P<page>\w+)$', 'actor_settings'),
)
handler404 = 'common.views.common_404'
handler500 = 'common.views.common_500'
| apache-2.0 | 272,979,295,601,128,500 | 42.847826 | 84 | 0.647 | false |
chaudum/crate-top | cstat/widgets.py | 1 | 8226 | # vi: set encoding=utf-8
# Licensed to CRATE Technology GmbH ("Crate") under one or more contributor
# license agreements. See the NOTICE file distributed with this work for
# additional information regarding copyright ownership. Crate licenses
# this file to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may
# obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# However, if you have executed another commercial license agreement
# with Crate these terms will supersede the license and you may use the
# software solely pursuant to the terms of the relevant commercial agreement.
import urwid
from datetime import datetime
from .utils import byte_size
from .log import get_logger
logger = get_logger(__name__)
class BarWidgetBase(urwid.Text):
START = '['
END = ']'
SINGLE = '='
DOUBLE = '#'
WATERMARK_LOW = 0.80
WATERMARK_HIGH = 0.95
def __init__(self, label, symbol):
self.label = '{0:<10}'.format(label[:9])
self.symbol = symbol
super().__init__(self.label)
def rows(self, size, focus=False):
return 1
class HorizontalBar(BarWidgetBase):
def __init__(self, label, current=0.0, total=100.0, symbol=BarWidgetBase.DOUBLE):
super().__init__(label, symbol)
self.set_progress(current, total)
def set_progress(self, current=0.0, total=100.0):
self.progress = total > 0 and current / total or 0.0
self.current = current
self.total = total
self._invalidate()
def color(self):
if self.progress < self.WATERMARK_LOW:
return 'text_green'
elif self.progress < self.WATERMARK_HIGH:
return 'text_yellow'
return 'text_red'
def render(self, size, focus=False):
(maxcol, ) = size
label_len = len(self.label)
steps = maxcol - 2 - label_len
chars = round(float(steps) * self.progress)
bar = self.symbol * chars
text = self.progress_text()
base = bar + ' ' * (steps - chars)
base = base[:len(base)-len(text)] + text
line_attr = [('default', label_len + 1)]
if chars:
line_attr += [(self.color(), chars)]
line_attr += [('default', 1 + steps - chars)]
line = self.label + self.START + base + self.END
return urwid.TextCanvas([line.encode('utf-8'), ],
attr=[line_attr],
maxcol=maxcol)
class HorizontalPercentBar(HorizontalBar):
def progress_text(self):
return '{:.1%}'.format(self.progress)
class HorizontalBytesBar(HorizontalBar):
def progress_text(self):
return '{}/{}'.format(byte_size(self.current), byte_size(self.total))
class MultiBarWidget(urwid.Pile):
def __init__(self, title, bar_cls=HorizontalPercentBar, **bar_options):
self.title = title
self.bar_cls = bar_cls
self.bar = bar_cls('', **bar_options)
self.details = urwid.Pile([])
widgets = [
self.bar,
self.details,
]
self._history = []
super().__init__(widgets)
def toggle_details(self):
if len(self.details.contents):
self.details.contents = []
else:
self.append_node_bars()
def append_node_bars(self):
bars = []
for value in self._history:
bar = self.bar_cls(value[2], value[0], value[1],
symbol=HorizontalBar.SINGLE)
bars.append((bar, ('pack', None)))
self.details.contents = bars
return len(bars)
def sum(self, values=[]):
logger.debug('%s', [sum([x[0] for x in values]), sum([x[1] for x in values])])
return (sum([x[0] for x in values]), sum([x[1] for x in values]))
def set_data(self, values=[]):
self._history = values
self.bar.set_progress(*self.sum(values))
if len(self.details.contents) and \
self.append_node_bars():
for idx, widget in enumerate(self.details.contents):
bar = widget[0]
bar.set_progress(*values[idx][:2])
class IOBar(BarWidgetBase):
"""
Tx ... sent/written/outbound
Rx ... received/read/inbound
"""
def __init__(self, label, suffix='p/s'):
super().__init__(label, 'x')
self.tpl = '{0}: {1:>11}'
self.suffix = suffix
self.set_progress(0.0, 0.0)
def set_progress(self, tx=0.0, rx=0.0):
self.tx = tx
self.rx = rx
self._invalidate()
def render(self, size, focus=False):
"""
LABEL [ Tx: 0.0 b/s Rx: 0.0b/s ]
+----------+-+-+----+----------+...-+----+----------+-+-+
10 1 1 4 11 1 4 11 1 1
+--------------+---------------+...-+---------------+---+
12 15 1 15 2
+-------------------------------...---------------------+
43
"""
(maxcol, ) = size
label_len = len(self.label) # sanity check. should always be 10
var = maxcol - 45
if var < 1:
raise AssertionError('IOBar requires a minimum width of 45 columns!')
text = ' '
text += self.tpl.format('Tx', byte_size(self.tx, suffix=self.suffix, k=1000))
text += ' ' * var
text += self.tpl.format('Rx', byte_size(self.rx, suffix=self.suffix, k=1000))
text += ' '
line_attr = [
('default', 12),
('tx', 15),
('default', var),
('rx', 15),
('default', 2),
]
line = self.label + self.START + text + self.END
return urwid.TextCanvas([line.encode('utf-8'), ],
attr=[line_attr],
maxcol=maxcol)
class IOStatWidget(MultiBarWidget):
def __init__(self, title, suffix):
super().__init__(title, bar_cls=IOBar, suffix=suffix)
self.suffix = suffix
def append_node_bars(self):
bars = []
for ts, packets, name in self._history:
bar = self.bar_cls(name, suffix=self.suffix)
bars.append((bar, ('pack', None)))
self.details.contents = bars
return len(bars)
def sum(self, values=[]):
tx_total = 0.0
rx_total = 0.0
if len(self._history):
for idx, value in enumerate(values):
if self._history[idx][0] < values[idx][0]:
tx, rx = self._calculate(values[idx], self._history[idx])
tx_total += tx
rx_total += rx
return tx_total, rx_total
def set_data(self, values=[]):
"""
:param values: a list of [timestamp, {'tx': ..., 'rx': ...}, node_name]
"""
if len(self._history) and \
len(self.details.contents) and \
self.append_node_bars():
for idx, widget in enumerate(self.details.contents):
bar = widget[0]
if self._history[idx][0] >= values[idx][0]:
tx, rx = bar.tx, bar.rx
else:
tx, rx = self._calculate(values[idx], self._history[idx])
bar.set_progress(tx, rx)
self.bar.set_progress(*self.sum(values))
self._history = values
def _calculate(self, value, last_value):
prev_timestamp, prev_values, prev_name = last_value
timestamp, values, name = value
assert prev_name == name
diff = (timestamp - prev_timestamp).total_seconds()
tx = (values['tx'] - prev_values['tx']) / diff
rx = (values['rx'] - prev_values['rx']) / diff
return tx, rx
| apache-2.0 | -6,437,177,651,761,129,000 | 33.275 | 86 | 0.532093 | false |
gennaios/alfred-gnosis | src/playhouse/sqlite_ext.py | 1 | 43137 | import json
import math
import re
import struct
import sys
from peewee import *
from peewee import ColumnBase
from peewee import EnclosedNodeList
from peewee import Entity
from peewee import Expression
from peewee import Node
from peewee import NodeList
from peewee import OP
from peewee import VirtualField
from peewee import merge_dict
from peewee import sqlite3
try:
from playhouse._sqlite_ext import (
backup,
backup_to_file,
Blob,
ConnectionHelper,
register_bloomfilter,
register_hash_functions,
register_rank_functions,
sqlite_get_db_status,
sqlite_get_status,
TableFunction,
ZeroBlob,
)
CYTHON_SQLITE_EXTENSIONS = True
except ImportError:
CYTHON_SQLITE_EXTENSIONS = False
if sys.version_info[0] == 3:
basestring = str
FTS3_MATCHINFO = 'pcx'
FTS4_MATCHINFO = 'pcnalx'
if sqlite3 is not None:
FTS_VERSION = 4 if sqlite3.sqlite_version_info[:3] >= (3, 7, 4) else 3
else:
FTS_VERSION = 3
FTS5_MIN_SQLITE_VERSION = (3, 9, 0)
class RowIDField(AutoField):
auto_increment = True
column_name = name = required_name = 'rowid'
def bind(self, model, name, *args):
if name != self.required_name:
raise ValueError('%s must be named "%s".' %
(type(self), self.required_name))
super(RowIDField, self).bind(model, name, *args)
class DocIDField(RowIDField):
column_name = name = required_name = 'docid'
class AutoIncrementField(AutoField):
def ddl(self, ctx):
node_list = super(AutoIncrementField, self).ddl(ctx)
return NodeList((node_list, SQL('AUTOINCREMENT')))
class JSONPath(ColumnBase):
def __init__(self, field, path=None):
super(JSONPath, self).__init__()
self._field = field
self._path = path or ()
@property
def path(self):
return Value('$%s' % ''.join(self._path))
def __getitem__(self, idx):
if isinstance(idx, int):
item = '[%s]' % idx
else:
item = '.%s' % idx
return JSONPath(self._field, self._path + (item,))
def set(self, value, as_json=None):
if as_json or isinstance(value, (list, dict)):
value = fn.json(self._field._json_dumps(value))
return fn.json_set(self._field, self.path, value)
def update(self, value):
return self.set(fn.json_patch(self, self._field._json_dumps(value)))
def remove(self):
return fn.json_remove(self._field, self.path)
def json_type(self):
return fn.json_type(self._field, self.path)
def length(self):
return fn.json_array_length(self._field, self.path)
def children(self):
return fn.json_each(self._field, self.path)
def tree(self):
return fn.json_tree(self._field, self.path)
def __sql__(self, ctx):
return ctx.sql(fn.json_extract(self._field, self.path)
if self._path else self._field)
class JSONField(TextField):
field_type = 'JSON'
def __init__(self, json_dumps=None, json_loads=None, **kwargs):
self._json_dumps = json_dumps or json.dumps
self._json_loads = json_loads or json.loads
super(JSONField, self).__init__(**kwargs)
def python_value(self, value):
if value is not None:
try:
return self._json_loads(value)
except (TypeError, ValueError):
return value
def db_value(self, value):
if value is not None:
if not isinstance(value, Node):
value = fn.json(self._json_dumps(value))
return value
def _e(op):
def inner(self, rhs):
if isinstance(rhs, (list, dict)):
rhs = Value(rhs, converter=self.db_value, unpack=False)
return Expression(self, op, rhs)
return inner
__eq__ = _e(OP.EQ)
__ne__ = _e(OP.NE)
__gt__ = _e(OP.GT)
__ge__ = _e(OP.GTE)
__lt__ = _e(OP.LT)
__le__ = _e(OP.LTE)
__hash__ = Field.__hash__
def __getitem__(self, item):
return JSONPath(self)[item]
def set(self, value, as_json=None):
return JSONPath(self).set(value, as_json)
def update(self, data):
return JSONPath(self).update(data)
def remove(self):
return JSONPath(self).remove()
def json_type(self):
return fn.json_type(self)
def length(self):
return fn.json_array_length(self)
def children(self):
"""
Schema of `json_each` and `json_tree`:
key,
value,
type TEXT (object, array, string, etc),
atom (value for primitive/scalar types, NULL for array and object)
id INTEGER (unique identifier for element)
parent INTEGER (unique identifier of parent element or NULL)
fullkey TEXT (full path describing element)
path TEXT (path to the container of the current element)
json JSON hidden (1st input parameter to function)
root TEXT hidden (2nd input parameter, path at which to start)
"""
return fn.json_each(self)
def tree(self):
return fn.json_tree(self)
class SearchField(Field):
def __init__(self, unindexed=False, column_name=None, **k):
if k:
raise ValueError('SearchField does not accept these keyword '
'arguments: %s.' % sorted(k))
super(SearchField, self).__init__(unindexed=unindexed,
column_name=column_name, null=True)
def match(self, term):
return match(self, term)
class VirtualTableSchemaManager(SchemaManager):
def _create_virtual_table(self, safe=True, **options):
options = self.model.clean_options(
merge_dict(self.model._meta.options, options))
# Structure:
# CREATE VIRTUAL TABLE <model>
# USING <extension_module>
# ([prefix_arguments, ...] fields, ... [arguments, ...], [options...])
ctx = self._create_context()
ctx.literal('CREATE VIRTUAL TABLE ')
if safe:
ctx.literal('IF NOT EXISTS ')
(ctx
.sql(self.model)
.literal(' USING '))
ext_module = self.model._meta.extension_module
if isinstance(ext_module, Node):
return ctx.sql(ext_module)
ctx.sql(SQL(ext_module)).literal(' ')
arguments = []
meta = self.model._meta
if meta.prefix_arguments:
arguments.extend([SQL(a) for a in meta.prefix_arguments])
# Constraints, data-types, foreign and primary keys are all omitted.
for field in meta.sorted_fields:
if isinstance(field, (RowIDField)) or field._hidden:
continue
field_def = [Entity(field.column_name)]
if field.unindexed:
field_def.append(SQL('UNINDEXED'))
arguments.append(NodeList(field_def))
if meta.arguments:
arguments.extend([SQL(a) for a in meta.arguments])
if options:
arguments.extend(self._create_table_option_sql(options))
return ctx.sql(EnclosedNodeList(arguments))
def _create_table(self, safe=True, **options):
if issubclass(self.model, VirtualModel):
return self._create_virtual_table(safe, **options)
return super(VirtualTableSchemaManager, self)._create_table(
safe, **options)
class VirtualModel(Model):
class Meta:
arguments = None
extension_module = None
prefix_arguments = None
primary_key = False
schema_manager_class = VirtualTableSchemaManager
@classmethod
def clean_options(cls, options):
return options
class BaseFTSModel(VirtualModel):
@classmethod
def clean_options(cls, options):
content = options.get('content')
prefix = options.get('prefix')
tokenize = options.get('tokenize')
if isinstance(content, basestring) and content == '':
# Special-case content-less full-text search tables.
options['content'] = "''"
elif isinstance(content, Field):
# Special-case to ensure fields are fully-qualified.
options['content'] = Entity(content.model._meta.table_name,
content.column_name)
if prefix:
if isinstance(prefix, (list, tuple)):
prefix = ','.join([str(i) for i in prefix])
options['prefix'] = "'%s'" % prefix.strip("' ")
if tokenize and cls._meta.extension_module.lower() == 'fts5':
# Tokenizers need to be in quoted string for FTS5, but not for FTS3
# or FTS4.
options['tokenize'] = '"%s"' % tokenize
return options
class FTSModel(BaseFTSModel):
"""
VirtualModel class for creating tables that use either the FTS3 or FTS4
search extensions. Peewee automatically determines which version of the
FTS extension is supported and will use FTS4 if possible.
"""
# FTS3/4 uses "docid" in the same way a normal table uses "rowid".
docid = DocIDField()
class Meta:
extension_module = 'FTS%s' % FTS_VERSION
@classmethod
def _fts_cmd(cls, cmd):
tbl = cls._meta.table_name
res = cls._meta.database.execute_sql(
"INSERT INTO %s(%s) VALUES('%s');" % (tbl, tbl, cmd))
return res.fetchone()
@classmethod
def optimize(cls):
return cls._fts_cmd('optimize')
@classmethod
def rebuild(cls):
return cls._fts_cmd('rebuild')
@classmethod
def integrity_check(cls):
return cls._fts_cmd('integrity-check')
@classmethod
def merge(cls, blocks=200, segments=8):
return cls._fts_cmd('merge=%s,%s' % (blocks, segments))
@classmethod
def automerge(cls, state=True):
return cls._fts_cmd('automerge=%s' % (state and '1' or '0'))
@classmethod
def match(cls, term):
"""
Generate a `MATCH` expression appropriate for searching this table.
"""
return match(cls._meta.entity, term)
@classmethod
def rank(cls, *weights):
matchinfo = fn.matchinfo(cls._meta.entity, FTS3_MATCHINFO)
return fn.fts_rank(matchinfo, *weights)
@classmethod
def bm25(cls, *weights):
match_info = fn.matchinfo(cls._meta.entity, FTS4_MATCHINFO)
return fn.fts_bm25(match_info, *weights)
@classmethod
def bm25f(cls, *weights):
match_info = fn.matchinfo(cls._meta.entity, FTS4_MATCHINFO)
return fn.fts_bm25f(match_info, *weights)
@classmethod
def lucene(cls, *weights):
match_info = fn.matchinfo(cls._meta.entity, FTS4_MATCHINFO)
return fn.fts_lucene(match_info, *weights)
@classmethod
def _search(cls, term, weights, with_score, score_alias, score_fn,
explicit_ordering):
if not weights:
rank = score_fn()
elif isinstance(weights, dict):
weight_args = []
for field in cls._meta.sorted_fields:
# Attempt to get the specified weight of the field by looking
# it up using it's field instance followed by name.
field_weight = weights.get(field, weights.get(field.name, 1.0))
weight_args.append(field_weight)
rank = score_fn(*weight_args)
else:
rank = score_fn(*weights)
selection = ()
order_by = rank
if with_score:
selection = (cls, rank.alias(score_alias))
if with_score and not explicit_ordering:
order_by = SQL(score_alias)
return (cls
.select(*selection)
.where(cls.match(term))
.order_by(order_by))
@classmethod
def search(cls, term, weights=None, with_score=False, score_alias='score',
explicit_ordering=False):
"""Full-text search using selected `term`."""
return cls._search(
term,
weights,
with_score,
score_alias,
cls.rank,
explicit_ordering)
@classmethod
def search_bm25(cls, term, weights=None, with_score=False,
score_alias='score', explicit_ordering=False):
"""Full-text search for selected `term` using BM25 algorithm."""
return cls._search(
term,
weights,
with_score,
score_alias,
cls.bm25,
explicit_ordering)
@classmethod
def search_bm25f(cls, term, weights=None, with_score=False,
score_alias='score', explicit_ordering=False):
"""Full-text search for selected `term` using BM25 algorithm."""
return cls._search(
term,
weights,
with_score,
score_alias,
cls.bm25f,
explicit_ordering)
@classmethod
def search_lucene(cls, term, weights=None, with_score=False,
score_alias='score', explicit_ordering=False):
"""Full-text search for selected `term` using BM25 algorithm."""
return cls._search(
term,
weights,
with_score,
score_alias,
cls.lucene,
explicit_ordering)
_alphabet = 'abcdefghijklmnopqrstuvwxyz'
_alphanum = (set('\t ,"(){}*:_+0123456789') |
set(_alphabet) |
set(_alphabet.upper()) |
set((chr(26),)))
_invalid_ascii = set(chr(p) for p in range(128) if chr(p) not in _alphanum)
_quote_re = re.compile('(?:[^\s"]|"(?:\\.|[^"])*")+')
class FTS5Model(BaseFTSModel):
"""
Requires SQLite >= 3.9.0.
Table options:
content: table name of external content, or empty string for "contentless"
content_rowid: column name of external content primary key
prefix: integer(s). Ex: '2' or '2 3 4'
tokenize: porter, unicode61, ascii. Ex: 'porter unicode61'
The unicode tokenizer supports the following parameters:
* remove_diacritics (1 or 0, default is 1)
* tokenchars (string of characters, e.g. '-_'
* separators (string of characters)
Parameters are passed as alternating parameter name and value, so:
{'tokenize': "unicode61 remove_diacritics 0 tokenchars '-_'"}
Content-less tables:
If you don't need the full-text content in it's original form, you can
specify a content-less table. Searches and auxiliary functions will work
as usual, but the only values returned when SELECT-ing can be rowid. Also
content-less tables do not support UPDATE or DELETE.
External content tables:
You can set up triggers to sync these, e.g.
-- Create a table. And an external content fts5 table to index it.
CREATE TABLE tbl(a INTEGER PRIMARY KEY, b);
CREATE VIRTUAL TABLE ft USING fts5(b, content='tbl', content_rowid='a');
-- Triggers to keep the FTS index up to date.
CREATE TRIGGER tbl_ai AFTER INSERT ON tbl BEGIN
INSERT INTO ft(rowid, b) VALUES (new.a, new.b);
END;
CREATE TRIGGER tbl_ad AFTER DELETE ON tbl BEGIN
INSERT INTO ft(fts_idx, rowid, b) VALUES('delete', old.a, old.b);
END;
CREATE TRIGGER tbl_au AFTER UPDATE ON tbl BEGIN
INSERT INTO ft(fts_idx, rowid, b) VALUES('delete', old.a, old.b);
INSERT INTO ft(rowid, b) VALUES (new.a, new.b);
END;
Built-in auxiliary functions:
* bm25(tbl[, weight_0, ... weight_n])
* highlight(tbl, col_idx, prefix, suffix)
* snippet(tbl, col_idx, prefix, suffix, ?, max_tokens)
"""
# FTS5 does not support declared primary keys, but we can use the
# implicit rowid.
rowid = RowIDField()
class Meta:
extension_module = 'fts5'
_error_messages = {
'field_type': ('Besides the implicit `rowid` column, all columns must '
'be instances of SearchField'),
'index': 'Secondary indexes are not supported for FTS5 models',
'pk': 'FTS5 models must use the default `rowid` primary key',
}
@classmethod
def validate_model(cls):
# Perform FTS5-specific validation and options post-processing.
if cls._meta.primary_key.name != 'rowid':
raise ImproperlyConfigured(cls._error_messages['pk'])
for field in cls._meta.fields.values():
if not isinstance(field, (SearchField, RowIDField)):
raise ImproperlyConfigured(cls._error_messages['field_type'])
if cls._meta.indexes:
raise ImproperlyConfigured(cls._error_messages['index'])
@classmethod
def fts5_installed(cls):
if sqlite3.sqlite_version_info[:3] < FTS5_MIN_SQLITE_VERSION:
return False
# Test in-memory DB to determine if the FTS5 extension is installed.
tmp_db = sqlite3.connect(':memory:')
try:
tmp_db.execute('CREATE VIRTUAL TABLE fts5test USING fts5 (data);')
except:
try:
tmp_db.enable_load_extension(True)
tmp_db.load_extension('fts5')
except:
return False
else:
cls._meta.database.load_extension('fts5')
finally:
tmp_db.close()
return True
@staticmethod
def validate_query(query):
"""
Simple helper function to indicate whether a search query is a
valid FTS5 query. Note: this simply looks at the characters being
used, and is not guaranteed to catch all problematic queries.
"""
tokens = _quote_re.findall(query)
for token in tokens:
if token.startswith('"') and token.endswith('"'):
continue
if set(token) & _invalid_ascii:
return False
return True
@staticmethod
def clean_query(query, replace=chr(26)):
"""
Clean a query of invalid tokens.
"""
accum = []
any_invalid = False
tokens = _quote_re.findall(query)
for token in tokens:
if token.startswith('"') and token.endswith('"'):
accum.append(token)
continue
token_set = set(token)
invalid_for_token = token_set & _invalid_ascii
if invalid_for_token:
any_invalid = True
for c in invalid_for_token:
token = token.replace(c, replace)
accum.append(token)
if any_invalid:
return ' '.join(accum)
return query
@classmethod
def match(cls, term):
"""
Generate a `MATCH` expression appropriate for searching this table.
"""
return match(cls._meta.entity, term)
@classmethod
def rank(cls, *args):
return cls.bm25(*args) if args else SQL('rank')
@classmethod
def bm25(cls, *weights):
return fn.bm25(cls._meta.entity, *weights)
@classmethod
def search(cls, term, weights=None, with_score=False, score_alias='score',
explicit_ordering=False):
"""Full-text search using selected `term`."""
return cls.search_bm25(
FTS5Model.clean_query(term),
weights,
with_score,
score_alias,
explicit_ordering)
@classmethod
def search_bm25(cls, term, weights=None, with_score=False,
score_alias='score', explicit_ordering=False):
"""Full-text search using selected `term`."""
if not weights:
rank = SQL('rank')
elif isinstance(weights, dict):
weight_args = []
for field in cls._meta.sorted_fields:
if isinstance(field, SearchField) and not field.unindexed:
weight_args.append(
weights.get(field, weights.get(field.name, 1.0)))
rank = fn.bm25(cls._meta.entity, *weight_args)
else:
rank = fn.bm25(cls._meta.entity, *weights)
selection = ()
order_by = rank
if with_score:
selection = (cls, rank.alias(score_alias))
if with_score and not explicit_ordering:
order_by = SQL(score_alias)
return (cls
.select(*selection)
.where(cls.match(FTS5Model.clean_query(term)))
.order_by(order_by))
@classmethod
def _fts_cmd_sql(cls, cmd, **extra_params):
tbl = cls._meta.entity
columns = [tbl]
values = [cmd]
for key, value in extra_params.items():
columns.append(Entity(key))
values.append(value)
return NodeList((
SQL('INSERT INTO'),
cls._meta.entity,
EnclosedNodeList(columns),
SQL('VALUES'),
EnclosedNodeList(values)))
@classmethod
def _fts_cmd(cls, cmd, **extra_params):
query = cls._fts_cmd_sql(cmd, **extra_params)
return cls._meta.database.execute(query)
@classmethod
def automerge(cls, level):
if not (0 <= level <= 16):
raise ValueError('level must be between 0 and 16')
return cls._fts_cmd('automerge', rank=level)
@classmethod
def merge(cls, npages):
return cls._fts_cmd('merge', rank=npages)
@classmethod
def set_pgsz(cls, pgsz):
return cls._fts_cmd('pgsz', rank=pgsz)
@classmethod
def set_rank(cls, rank_expression):
return cls._fts_cmd('rank', rank=rank_expression)
@classmethod
def delete_all(cls):
return cls._fts_cmd('delete-all')
@classmethod
def VocabModel(cls, table_type='row', table=None):
if table_type not in ('row', 'col', 'instance'):
raise ValueError('table_type must be either "row", "col" or '
'"instance".')
attr = '_vocab_model_%s' % table_type
if not hasattr(cls, attr):
class Meta:
database = cls._meta.database
table_name = table or cls._meta.table_name + '_v'
extension_module = fn.fts5vocab(
cls._meta.entity,
SQL(table_type))
attrs = {
'term': VirtualField(TextField),
'doc': IntegerField(),
'cnt': IntegerField(),
'rowid': RowIDField(),
'Meta': Meta,
}
if table_type == 'col':
attrs['col'] = VirtualField(TextField)
elif table_type == 'instance':
attrs['offset'] = VirtualField(IntegerField)
class_name = '%sVocab' % cls.__name__
setattr(cls, attr, type(class_name, (VirtualModel,), attrs))
return getattr(cls, attr)
def ClosureTable(model_class, foreign_key=None, referencing_class=None,
referencing_key=None):
"""Model factory for the transitive closure extension."""
if referencing_class is None:
referencing_class = model_class
if foreign_key is None:
for field_obj in model_class._meta.refs:
if field_obj.rel_model is model_class:
foreign_key = field_obj
break
else:
raise ValueError('Unable to find self-referential foreign key.')
source_key = model_class._meta.primary_key
if referencing_key is None:
referencing_key = source_key
class BaseClosureTable(VirtualModel):
depth = VirtualField(IntegerField)
id = VirtualField(IntegerField)
idcolumn = VirtualField(TextField)
parentcolumn = VirtualField(TextField)
root = VirtualField(IntegerField)
tablename = VirtualField(TextField)
class Meta:
extension_module = 'transitive_closure'
@classmethod
def descendants(cls, node, depth=None, include_node=False):
query = (model_class
.select(model_class, cls.depth.alias('depth'))
.join(cls, on=(source_key == cls.id))
.where(cls.root == node)
.objects())
if depth is not None:
query = query.where(cls.depth == depth)
elif not include_node:
query = query.where(cls.depth > 0)
return query
@classmethod
def ancestors(cls, node, depth=None, include_node=False):
query = (model_class
.select(model_class, cls.depth.alias('depth'))
.join(cls, on=(source_key == cls.root))
.where(cls.id == node)
.objects())
if depth:
query = query.where(cls.depth == depth)
elif not include_node:
query = query.where(cls.depth > 0)
return query
@classmethod
def siblings(cls, node, include_node=False):
if referencing_class is model_class:
# self-join
fk_value = node.__data__.get(foreign_key.name)
query = model_class.select().where(foreign_key == fk_value)
else:
# siblings as given in reference_class
siblings = (referencing_class
.select(referencing_key)
.join(cls, on=(foreign_key == cls.root))
.where((cls.id == node) & (cls.depth == 1)))
# the according models
query = (model_class
.select()
.where(source_key << siblings)
.objects())
if not include_node:
query = query.where(source_key != node)
return query
class Meta:
database = referencing_class._meta.database
options = {
'tablename': referencing_class._meta.table_name,
'idcolumn': referencing_key.column_name,
'parentcolumn': foreign_key.column_name}
primary_key = False
name = '%sClosure' % model_class.__name__
return type(name, (BaseClosureTable,), {'Meta': Meta})
class LSMTable(VirtualModel):
class Meta:
extension_module = 'lsm1'
filename = None
@classmethod
def clean_options(cls, options):
filename = cls._meta.filename
if not filename:
raise ValueError('LSM1 extension requires that you specify a '
'filename for the LSM database.')
else:
if len(filename) >= 2 and filename[0] != '"':
filename = '"%s"' % filename
if not cls._meta.primary_key:
raise ValueError('LSM1 models must specify a primary-key field.')
key = cls._meta.primary_key
if isinstance(key, AutoField):
raise ValueError('LSM1 models must explicitly declare a primary '
'key field.')
if not isinstance(key, (TextField, BlobField, IntegerField)):
raise ValueError('LSM1 key must be a TextField, BlobField, or '
'IntegerField.')
key._hidden = True
if isinstance(key, IntegerField):
data_type = 'UINT'
elif isinstance(key, BlobField):
data_type = 'BLOB'
else:
data_type = 'TEXT'
cls._meta.prefix_arguments = [filename, '"%s"' % key.name, data_type]
# Does the key map to a scalar value, or a tuple of values?
if len(cls._meta.sorted_fields) == 2:
cls._meta._value_field = cls._meta.sorted_fields[1]
else:
cls._meta._value_field = None
return options
@classmethod
def load_extension(cls, path='lsm.so'):
cls._meta.database.load_extension(path)
@staticmethod
def slice_to_expr(key, idx):
if idx.start is not None and idx.stop is not None:
return key.between(idx.start, idx.stop)
elif idx.start is not None:
return key >= idx.start
elif idx.stop is not None:
return key <= idx.stop
@staticmethod
def _apply_lookup_to_query(query, key, lookup):
if isinstance(lookup, slice):
expr = LSMTable.slice_to_expr(key, lookup)
if expr is not None:
query = query.where(expr)
return query, False
elif isinstance(lookup, Expression):
return query.where(lookup), False
else:
return query.where(key == lookup), True
@classmethod
def get_by_id(cls, pk):
query, is_single = cls._apply_lookup_to_query(
cls.select().namedtuples(),
cls._meta.primary_key,
pk)
if is_single:
try:
row = query.get()
except cls.DoesNotExist:
raise KeyError(pk)
return row[1] if cls._meta._value_field is not None else row
else:
return query
@classmethod
def set_by_id(cls, key, value):
if cls._meta._value_field is not None:
data = {cls._meta._value_field: value}
elif isinstance(value, tuple):
data = {}
for field, fval in zip(cls._meta.sorted_fields[1:], value):
data[field] = fval
elif isinstance(value, dict):
data = value
elif isinstance(value, cls):
data = value.__dict__
data[cls._meta.primary_key] = key
cls.replace(data).execute()
@classmethod
def delete_by_id(cls, pk):
query, is_single = cls._apply_lookup_to_query(
cls.delete(),
cls._meta.primary_key,
pk)
return query.execute()
OP.MATCH = 'MATCH'
def _sqlite_regexp(regex, value):
return re.search(regex, value) is not None
class SqliteExtDatabase(SqliteDatabase):
def __init__(self, database, c_extensions=None, rank_functions=True,
hash_functions=False, regexp_function=False,
bloomfilter=False, json_contains=False, *args, **kwargs):
super(SqliteExtDatabase, self).__init__(database, *args, **kwargs)
self._row_factory = None
if c_extensions and not CYTHON_SQLITE_EXTENSIONS:
raise ImproperlyConfigured('SqliteExtDatabase initialized with '
'C extensions, but shared library was '
'not found!')
prefer_c = CYTHON_SQLITE_EXTENSIONS and (c_extensions is not False)
if rank_functions:
if prefer_c:
register_rank_functions(self)
else:
self.register_function(bm25, 'fts_bm25')
self.register_function(rank, 'fts_rank')
self.register_function(bm25, 'fts_bm25f') # Fall back to bm25.
self.register_function(bm25, 'fts_lucene')
if hash_functions:
if not prefer_c:
raise ValueError('C extension required to register hash '
'functions.')
register_hash_functions(self)
if regexp_function:
self.register_function(_sqlite_regexp, 'regexp', 2)
if bloomfilter:
if not prefer_c:
raise ValueError('C extension required to use bloomfilter.')
register_bloomfilter(self)
if json_contains:
self.register_function(_json_contains, 'json_contains')
self._c_extensions = prefer_c
def _add_conn_hooks(self, conn):
super(SqliteExtDatabase, self)._add_conn_hooks(conn)
if self._row_factory:
conn.row_factory = self._row_factory
def row_factory(self, fn):
self._row_factory = fn
if CYTHON_SQLITE_EXTENSIONS:
SQLITE_STATUS_MEMORY_USED = 0
SQLITE_STATUS_PAGECACHE_USED = 1
SQLITE_STATUS_PAGECACHE_OVERFLOW = 2
SQLITE_STATUS_SCRATCH_USED = 3
SQLITE_STATUS_SCRATCH_OVERFLOW = 4
SQLITE_STATUS_MALLOC_SIZE = 5
SQLITE_STATUS_PARSER_STACK = 6
SQLITE_STATUS_PAGECACHE_SIZE = 7
SQLITE_STATUS_SCRATCH_SIZE = 8
SQLITE_STATUS_MALLOC_COUNT = 9
SQLITE_DBSTATUS_LOOKASIDE_USED = 0
SQLITE_DBSTATUS_CACHE_USED = 1
SQLITE_DBSTATUS_SCHEMA_USED = 2
SQLITE_DBSTATUS_STMT_USED = 3
SQLITE_DBSTATUS_LOOKASIDE_HIT = 4
SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE = 5
SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL = 6
SQLITE_DBSTATUS_CACHE_HIT = 7
SQLITE_DBSTATUS_CACHE_MISS = 8
SQLITE_DBSTATUS_CACHE_WRITE = 9
SQLITE_DBSTATUS_DEFERRED_FKS = 10
#SQLITE_DBSTATUS_CACHE_USED_SHARED = 11
def __status__(flag, return_highwater=False):
"""
Expose a sqlite3_status() call for a particular flag as a property of
the Database object.
"""
def getter(self):
result = sqlite_get_status(flag)
return result[1] if return_highwater else result
return property(getter)
def __dbstatus__(flag, return_highwater=False, return_current=False):
"""
Expose a sqlite3_dbstatus() call for a particular flag as a property of
the Database instance. Unlike sqlite3_status(), the dbstatus properties
pertain to the current connection.
"""
def getter(self):
if self._state.conn is None:
raise ImproperlyConfigured('database connection not opened.')
result = sqlite_get_db_status(self._state.conn, flag)
if return_current:
return result[0]
return result[1] if return_highwater else result
return property(getter)
class CSqliteExtDatabase(SqliteExtDatabase):
def __init__(self, *args, **kwargs):
self._conn_helper = None
self._commit_hook = self._rollback_hook = self._update_hook = None
self._replace_busy_handler = False
super(CSqliteExtDatabase, self).__init__(*args, **kwargs)
def init(self, database, replace_busy_handler=False, **kwargs):
super(CSqliteExtDatabase, self).init(database, **kwargs)
self._replace_busy_handler = replace_busy_handler
def _close(self, conn):
if self._commit_hook:
self._conn_helper.set_commit_hook(None)
if self._rollback_hook:
self._conn_helper.set_rollback_hook(None)
if self._update_hook:
self._conn_helper.set_update_hook(None)
return super(CSqliteExtDatabase, self)._close(conn)
def _add_conn_hooks(self, conn):
super(CSqliteExtDatabase, self)._add_conn_hooks(conn)
self._conn_helper = ConnectionHelper(conn)
if self._commit_hook is not None:
self._conn_helper.set_commit_hook(self._commit_hook)
if self._rollback_hook is not None:
self._conn_helper.set_rollback_hook(self._rollback_hook)
if self._update_hook is not None:
self._conn_helper.set_update_hook(self._update_hook)
if self._replace_busy_handler:
timeout = self._timeout or 5
self._conn_helper.set_busy_handler(timeout * 1000)
def on_commit(self, fn):
self._commit_hook = fn
if not self.is_closed():
self._conn_helper.set_commit_hook(fn)
return fn
def on_rollback(self, fn):
self._rollback_hook = fn
if not self.is_closed():
self._conn_helper.set_rollback_hook(fn)
return fn
def on_update(self, fn):
self._update_hook = fn
if not self.is_closed():
self._conn_helper.set_update_hook(fn)
return fn
def changes(self):
return self._conn_helper.changes()
@property
def last_insert_rowid(self):
return self._conn_helper.last_insert_rowid()
@property
def autocommit(self):
return self._conn_helper.autocommit()
def backup(self, destination, pages=None, name=None, progress=None):
return backup(self.connection(), destination.connection(),
pages=pages, name=name, progress=progress)
def backup_to_file(self, filename, pages=None, name=None,
progress=None):
return backup_to_file(self.connection(), filename, pages=pages,
name=name, progress=progress)
def blob_open(self, table, column, rowid, read_only=False):
return Blob(self, table, column, rowid, read_only)
# Status properties.
memory_used = __status__(SQLITE_STATUS_MEMORY_USED)
malloc_size = __status__(SQLITE_STATUS_MALLOC_SIZE, True)
malloc_count = __status__(SQLITE_STATUS_MALLOC_COUNT)
pagecache_used = __status__(SQLITE_STATUS_PAGECACHE_USED)
pagecache_overflow = __status__(SQLITE_STATUS_PAGECACHE_OVERFLOW)
pagecache_size = __status__(SQLITE_STATUS_PAGECACHE_SIZE, True)
scratch_used = __status__(SQLITE_STATUS_SCRATCH_USED)
scratch_overflow = __status__(SQLITE_STATUS_SCRATCH_OVERFLOW)
scratch_size = __status__(SQLITE_STATUS_SCRATCH_SIZE, True)
# Connection status properties.
lookaside_used = __dbstatus__(SQLITE_DBSTATUS_LOOKASIDE_USED)
lookaside_hit = __dbstatus__(SQLITE_DBSTATUS_LOOKASIDE_HIT, True)
lookaside_miss = __dbstatus__(SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE,
True)
lookaside_miss_full = __dbstatus__(SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL,
True)
cache_used = __dbstatus__(SQLITE_DBSTATUS_CACHE_USED, False, True)
#cache_used_shared = __dbstatus__(SQLITE_DBSTATUS_CACHE_USED_SHARED,
# False, True)
schema_used = __dbstatus__(SQLITE_DBSTATUS_SCHEMA_USED, False, True)
statement_used = __dbstatus__(SQLITE_DBSTATUS_STMT_USED, False, True)
cache_hit = __dbstatus__(SQLITE_DBSTATUS_CACHE_HIT, False, True)
cache_miss = __dbstatus__(SQLITE_DBSTATUS_CACHE_MISS, False, True)
cache_write = __dbstatus__(SQLITE_DBSTATUS_CACHE_WRITE, False, True)
def match(lhs, rhs):
return Expression(lhs, OP.MATCH, rhs)
def _parse_match_info(buf):
# See http://sqlite.org/fts3.html#matchinfo
bufsize = len(buf) # Length in bytes.
return [struct.unpack('@I', buf[i:i+4])[0] for i in range(0, bufsize, 4)]
def get_weights(ncol, raw_weights):
if not raw_weights:
return [1] * ncol
else:
weights = [0] * ncol
for i, weight in enumerate(raw_weights):
weights[i] = weight
return weights
# Ranking implementation, which parse matchinfo.
def rank(raw_match_info, *raw_weights):
# Handle match_info called w/default args 'pcx' - based on the example rank
# function http://sqlite.org/fts3.html#appendix_a
match_info = _parse_match_info(raw_match_info)
score = 0.0
p, c = match_info[:2]
weights = get_weights(c, raw_weights)
# matchinfo X value corresponds to, for each phrase in the search query, a
# list of 3 values for each column in the search table.
# So if we have a two-phrase search query and three columns of data, the
# following would be the layout:
# p0 : c0=[0, 1, 2], c1=[3, 4, 5], c2=[6, 7, 8]
# p1 : c0=[9, 10, 11], c1=[12, 13, 14], c2=[15, 16, 17]
for phrase_num in range(p):
phrase_info_idx = 2 + (phrase_num * c * 3)
for col_num in range(c):
weight = weights[col_num]
if not weight:
continue
col_idx = phrase_info_idx + (col_num * 3)
# The idea is that we count the number of times the phrase appears
# in this column of the current row, compared to how many times it
# appears in this column across all rows. The ratio of these values
# provides a rough way to score based on "high value" terms.
row_hits = match_info[col_idx]
all_rows_hits = match_info[col_idx + 1]
if row_hits > 0:
score += weight * (float(row_hits) / all_rows_hits)
return -score
# Okapi BM25 ranking implementation (FTS4 only).
def bm25(raw_match_info, *args):
"""
Usage:
# Format string *must* be pcnalx
# Second parameter to bm25 specifies the index of the column, on
# the table being queries.
bm25(matchinfo(document_tbl, 'pcnalx'), 1) AS rank
"""
match_info = _parse_match_info(raw_match_info)
K = 1.2
B = 0.75
score = 0.0
P_O, C_O, N_O, A_O = range(4) # Offsets into the matchinfo buffer.
term_count = match_info[P_O] # n
col_count = match_info[C_O]
total_docs = match_info[N_O] # N
L_O = A_O + col_count
X_O = L_O + col_count
weights = get_weights(col_count, args)
for i in range(term_count):
for j in range(col_count):
weight = weights[j]
if weight == 0:
continue
x = X_O + (3 * (j + i * col_count))
term_frequency = float(match_info[x]) # f(qi, D)
docs_with_term = float(match_info[x + 2]) # n(qi)
# log( (N - n(qi) + 0.5) / (n(qi) + 0.5) )
idf = math.log(
(total_docs - docs_with_term + 0.5) /
(docs_with_term + 0.5))
if idf <= 0.0:
idf = 1e-6
doc_length = float(match_info[L_O + j]) # |D|
avg_length = float(match_info[A_O + j]) or 1. # avgdl
ratio = doc_length / avg_length
num = term_frequency * (K + 1)
b_part = 1 - B + (B * ratio)
denom = term_frequency + (K * b_part)
pc_score = idf * (num / denom)
score += (pc_score * weight)
return -score
def _json_contains(src_json, obj_json):
stack = []
try:
stack.append((json.loads(obj_json), json.loads(src_json)))
except:
# Invalid JSON!
return False
while stack:
obj, src = stack.pop()
if isinstance(src, dict):
if isinstance(obj, dict):
for key in obj:
if key not in src:
return False
stack.append((obj[key], src[key]))
elif isinstance(obj, list):
for item in obj:
if item not in src:
return False
elif obj not in src:
return False
elif isinstance(src, list):
if isinstance(obj, dict):
return False
elif isinstance(obj, list):
try:
for i in range(len(obj)):
stack.append((obj[i], src[i]))
except IndexError:
return False
elif obj not in src:
return False
elif obj != src:
return False
return True
| mit | -35,110,409,590,999,170 | 33.208565 | 79 | 0.568236 | false |
arunkgupta/gramps | gramps/gen/utils/file.py | 1 | 9733 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2009 Gary Burton
# Copyright (C) 2011 Tim G L Lyons
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
"""
File and folder related utility functions
"""
#-------------------------------------------------------------------------
#
# Python modules
#
#-------------------------------------------------------------------------
import os
import sys
import locale
import shutil
import logging
LOG = logging.getLogger(".gen.utils.file")
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from ..constfunc import win, mac
from ..const import TEMP_DIR, USER_HOME
#-------------------------------------------------------------------------
#
# Constants
#
#-------------------------------------------------------------------------
_NEW_NAME_PATTERN = '%s%sUntitled_%d.%s'
#-------------------------------------------------------------------------
#
# Functions
#
#-------------------------------------------------------------------------
def find_file( filename):
# try the filename we got
try:
fname = filename
if os.path.isfile( filename):
return( filename)
except:
pass
# Build list of alternate encodings
encodings = set()
#Darwin returns "mac roman" for preferredencoding, but since it
#returns "UTF-8" for filesystemencoding, and that's first, this
#works.
for enc in [sys.getfilesystemencoding, locale.getpreferredencoding]:
try:
encodings.add(enc)
except:
pass
encodings.add('UTF-8')
encodings.add('ISO-8859-1')
for enc in encodings:
try:
fname = filename.encode(enc)
if os.path.isfile( fname):
return fname
except:
pass
# not found
return ''
def find_folder( filename):
# try the filename we got
try:
fname = filename
if os.path.isdir( filename):
return( filename)
except:
pass
# Build list of alternate encodings
try:
encodings = [sys.getfilesystemencoding(),
locale.getpreferredencoding(),
'UTF-8', 'ISO-8859-1']
except:
encodings = [sys.getfilesystemencoding(), 'UTF-8', 'ISO-8859-1']
encodings = list(set(encodings))
for enc in encodings:
try:
fname = filename.encode(enc)
if os.path.isdir( fname):
return fname
except:
pass
# not found
return ''
def get_unicode_path_from_file_chooser(path):
"""
Return the Unicode version of a path string.
:type path: str
:param path: The path to be converted to Unicode
:rtype: unicode
:returns: The Unicode version of path.
"""
# make only unicode of path of type 'str'
if not (isinstance(path, str)):
return path
if win():
# in windows filechooser returns officially utf-8, not filesystemencoding
try:
return unicode(path)
except:
LOG.warn("Problem encountered converting string: %s." % path)
return unicode(path, sys.getfilesystemencoding(), errors='replace')
else:
try:
return unicode(path, sys.getfilesystemencoding())
except:
LOG.warn("Problem encountered converting string: %s." % path)
return unicode(path, sys.getfilesystemencoding(), errors='replace')
def get_unicode_path_from_env_var(path):
"""
Return the Unicode version of a path string.
:type path: str
:param path: The path to be converted to Unicode
:rtype: unicode
:returns: The Unicode version of path.
"""
# make only unicode of path of type 'str'
if not (isinstance(path, str)):
return path
if win():
# In Windows path/filename returned from a environment variable is in filesystemencoding
try:
new_path = unicode(path, sys.getfilesystemencoding())
return new_path
except:
LOG.warn("Problem encountered converting string: %s." % path)
return unicode(path, sys.getfilesystemencoding(), errors='replace')
else:
try:
return unicode(path)
except:
LOG.warn("Problem encountered converting string: %s." % path)
return unicode(path, sys.getfilesystemencoding(), errors='replace')
def get_new_filename(ext, folder='~/'):
ix = 1
while os.path.isfile(os.path.expanduser(_NEW_NAME_PATTERN %
(folder, os.path.sep, ix, ext))):
ix = ix + 1
return os.path.expanduser(_NEW_NAME_PATTERN % (folder, os.path.sep, ix, ext))
def get_empty_tempdir(dirname):
""" Return path to TEMP_DIR/dirname, a guaranteed empty directory
makes intervening directories if required
fails if _file_ by that name already exists,
or for inadequate permissions to delete dir/files or create dir(s)
"""
dirpath = os.path.join(TEMP_DIR,dirname)
if os.path.isdir(dirpath):
shutil.rmtree(dirpath)
os.makedirs(dirpath)
dirpath = get_unicode_path_from_env_var(dirpath)
return dirpath
def rm_tempdir(path):
"""Remove a tempdir created with get_empty_tempdir"""
if path.startswith(TEMP_DIR) and os.path.isdir(path):
shutil.rmtree(path)
def relative_path(original, base):
"""
Calculate the relative path from base to original, with base a directory,
and original an absolute path
On problems, original is returned unchanged
"""
if not os.path.isdir(base):
return original
#original and base must be absolute paths
if not os.path.isabs(base):
return original
if not os.path.isabs(original):
return original
original = os.path.normpath(original)
base = os.path.normpath(base)
# If the db_dir and obj_dir are on different drives (win only)
# then there cannot be a relative path. Return original obj_path
(base_drive, base) = os.path.splitdrive(base)
(orig_drive, orig_name) = os.path.splitdrive(original)
if base_drive.upper() != orig_drive.upper():
return original
# Starting from the filepath root, work out how much of the filepath is
# shared by base and target.
base_list = (base).split(os.sep)
target_list = (orig_name).split(os.sep)
# make sure '/home/person' and 'c:/home/person' both give
# list ['home', 'person']
base_list = filter(None, base_list)
target_list = filter(None, target_list)
i = -1
for i in range(min(len(base_list), len(target_list))):
if base_list[i] <> target_list[i]: break
else:
#if break did not happen we are here at end, and add 1.
i += 1
rel_list = [os.pardir] * (len(base_list)-i) + target_list[i:]
return os.path.join(*rel_list)
def media_path(db):
"""
Given a database, return the mediapath to use as basedir for media
"""
mpath = db.get_mediapath()
if mpath is None:
#use home dir
mpath = USER_HOME
return mpath
def media_path_full(db, filename):
"""
Given a database and a filename of a media, return the media filename
is full form, eg 'graves/tomb.png' becomes '/home/me/genea/graves/tomb.png
"""
if os.path.isabs(filename):
return filename
mpath = media_path(db)
return os.path.join(mpath, filename)
def search_for(name):
if name.startswith( '"' ):
name = name.split('"')[1]
else:
name = name.split()[0]
if win():
for i in os.environ['PATH'].split(';'):
fname = os.path.join(i, name)
if os.access(fname, os.X_OK) and not os.path.isdir(fname):
return 1
if os.access(name, os.X_OK) and not os.path.isdir(name):
return 1
else:
for i in os.environ['PATH'].split(':'):
fname = os.path.join(i, name)
if os.access(fname, os.X_OK) and not os.path.isdir(fname):
return 1
return 0
def fix_encoding(value, errors='strict'):
# The errors argument specifies the response when the input string can't be
# converted according to the encoding's rules. Legal values for this
# argument are 'strict' (raise a UnicodeDecodeError exception), 'replace'
# (add U+FFFD, 'REPLACEMENT CHARACTER'), or 'ignore' (just leave the
# character out of the Unicode result).
if not isinstance(value, unicode):
try:
return unicode(value)
except:
try:
if mac():
codeset = locale.getlocale()[1]
else:
codeset = locale.getpreferredencoding()
except:
codeset = "UTF-8"
return unicode(value, codeset, errors)
else:
return value
| gpl-2.0 | 5,058,031,207,568,249,000 | 31.228477 | 96 | 0.577109 | false |
sthirugn/robottelo | tests/robottelo/test_datafactory.py | 1 | 6243 | """Tests for module ``robottelo.datafactory``."""
import itertools
import random
import six
import unittest2
from robottelo.config import settings
from robottelo.constants import STRING_TYPES
from robottelo.datafactory import (
generate_strings_list,
invalid_emails_list,
invalid_id_list,
invalid_names_list,
invalid_values_list,
invalid_usernames_list,
InvalidArgumentError,
valid_data_list,
valid_emails_list,
valid_environments_list,
valid_hosts_list,
valid_hostgroups_list,
valid_labels_list,
valid_names_list,
valid_org_names_list,
valid_usernames_list)
if six.PY2:
import mock
else:
from unittest import mock
class FilteredDataPointTestCase(unittest2.TestCase):
"""Tests for :meth:`robottelo.datafactory.filtered_datapoint` decorator"""
@classmethod
def setUpClass(cls):
"""Backup the config smoke property"""
cls.backup = settings.run_one_datapoint
def test_filtered_datapoint_True(self):
"""Tests if run_one_datapoint=false returns all data points"""
settings.run_one_datapoint = False
self.assertEqual(len(generate_strings_list()), 7)
self.assertEqual(len(invalid_id_list()), 4)
self.assertEqual(len(invalid_emails_list()), 10)
self.assertEqual(len(invalid_names_list()), 7)
self.assertEqual(len(invalid_values_list()), 10)
self.assertEqual(len(invalid_usernames_list()), 4)
self.assertEqual(len(valid_labels_list()), 2)
self.assertEqual(len(valid_data_list()), 7)
self.assertEqual(len(valid_emails_list()), 8)
self.assertEqual(len(valid_environments_list()), 3)
self.assertEqual(len(valid_hosts_list()), 3)
self.assertEqual(len(valid_hostgroups_list()), 7)
self.assertEqual(len(valid_names_list()), 15)
self.assertEqual(len(valid_org_names_list()), 7)
self.assertEqual(len(valid_usernames_list()), 6)
def test_filtered_datapoint_False(self):
"""Tests if run_one_datapoint=True returns one data point"""
settings.run_one_datapoint = True
self.assertEqual(len(generate_strings_list()), 1)
self.assertEqual(len(invalid_emails_list()), 1)
self.assertEqual(len(invalid_id_list()), 1)
self.assertEqual(len(invalid_names_list()), 1)
self.assertEqual(len(invalid_values_list()), 1)
self.assertEqual(len(valid_data_list()), 1)
self.assertEqual(len(valid_emails_list()), 1)
self.assertEqual(len(valid_environments_list()), 1)
self.assertEqual(len(valid_hosts_list()), 1)
self.assertEqual(len(valid_hostgroups_list()), 1)
self.assertEqual(len(valid_labels_list()), 1)
self.assertEqual(len(valid_names_list()), 1)
self.assertEqual(len(valid_org_names_list()), 1)
self.assertEqual(len(valid_usernames_list()), 1)
@mock.patch('robottelo.datafactory.gen_string')
def test_generate_strings_list_remove_str(self, gen_string):
gen_string.side_effect = lambda str_type, _: str_type
str_types = STRING_TYPES[:]
remove_type = random.choice(str_types)
str_types.remove(remove_type)
str_types.sort()
string_list = generate_strings_list(exclude_types=[remove_type])
string_list.sort()
self.assertEqual(string_list, str_types)
@classmethod
def tearDownClass(cls):
"""Reset the config smoke property"""
settings.run_one_datapoint = cls.backup
class TestReturnTypes(unittest2.TestCase):
"""Tests for validating return types for different data factory
functions."""
def test_return_type(self):
"""This test validates return types for functions:
1. :meth:`robottelo.datafactory.generate_strings_list`
2. :meth:`robottelo.datafactory.invalid_emails_list`
3. :meth:`robottelo.datafactory.invalid_names_list`
4. :meth:`robottelo.datafactory.valid_data_list`
5. :meth:`robottelo.datafactory.valid_emails_list`
6. :meth:`robottelo.datafactory.valid_environments_list`
7. :meth:`robottelo.datafactory.valid_hosts_list`
8. :meth:`robottelo.datafactory.valid_hostgroups_list`
9. :meth:`robottelo.datafactory.valid_labels_list`
10. :meth:`robottelo.datafactory.valid_names_list`
11. :meth:`robottelo.datafactory.valid_org_names_list`
12. :meth:`robottelo.datafactory.valid_usernames_list`
13. :meth:`robottelo.datafactory.invalid_id_list`
"""
for item in itertools.chain(
generate_strings_list(),
invalid_emails_list(),
invalid_names_list(),
valid_data_list(),
valid_emails_list(),
valid_environments_list(),
valid_hosts_list(),
valid_hostgroups_list(),
valid_labels_list(),
valid_names_list(),
valid_org_names_list(),
valid_usernames_list()):
self.assertIsInstance(item, six.text_type)
for item in invalid_id_list():
if not (isinstance(item, (six.text_type, int)) or item is None):
self.fail('Unexpected data type')
class InvalidValuesListTestCase(unittest2.TestCase):
"""Tests for :meth:`robottelo.datafactory.invalid_values_list`"""
def test_return_values(self):
"""Tests if invalid values list returns right values based on input"""
# Test valid values
for value in 'api', 'cli', 'ui', None:
return_value = invalid_values_list(value)
self.assertIsInstance(return_value, list)
if value == 'ui':
self.assertEqual(len(return_value), 9)
else:
self.assertEqual(len(return_value), 10)
# Test invalid values
self.assertRaises(InvalidArgumentError, invalid_values_list, ' ')
self.assertRaises(InvalidArgumentError, invalid_values_list, 'UI')
self.assertRaises(InvalidArgumentError, invalid_values_list, 'CLI')
self.assertRaises(InvalidArgumentError, invalid_values_list, 'API')
self.assertRaises(InvalidArgumentError, invalid_values_list, 'invalid')
| gpl-3.0 | 3,780,458,886,232,439,300 | 39.538961 | 79 | 0.645042 | false |
pikamar/scoop | config/settings/common.py | 1 | 9970 | # -*- coding: utf-8 -*-
"""
Django settings for Scoop project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
from __future__ import absolute_import, unicode_literals
import environ
ROOT_DIR = environ.Path(__file__) - 3 # (/a/b/myfile.py - 3 = /)
APPS_DIR = ROOT_DIR.path('scoop')
env = environ.Env()
# APP CONFIGURATION
# ------------------------------------------------------------------------------
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
'crispy_forms', # Form layouts
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
)
# Apps specific for this project go here.
LOCAL_APPS = (
'scoop.users', # custom users app
# Your stuff: custom apps go here
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES = (
# Make sure djangosecure.middleware.SecurityMiddleware is listed first
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# MIGRATIONS CONFIGURATION
# ------------------------------------------------------------------------------
MIGRATION_MODULES = {
'sites': 'scoop.contrib.sites.migrations'
}
# DEBUG
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool("DJANGO_DEBUG", False)
# FIXTURE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# EMAIL CONFIGURATION
# ------------------------------------------------------------------------------
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# MANAGER CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
("""pikamar""", 'Your email'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
'default': env.db("DATABASE_URL", default="postgres:///scoop"),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'UTC'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
# Your stuff: custom template context processors go here
],
},
},
]
# See: http://django-crispy-forms.readthedocs.org/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
str(APPS_DIR.path('static')),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# MEDIA CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# URL Configuration
# ------------------------------------------------------------------------------
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# AUTHENTICATION CONFIGURATION
# ------------------------------------------------------------------------------
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = 'username'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
# Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = 'users.User'
LOGIN_REDIRECT_URL = 'users:redirect'
LOGIN_URL = 'account_login'
# SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify'
# LOGGING CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True
},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console', 'mail_admins'],
'propagate': True
}
}
}
########## CELERY
INSTALLED_APPS += ('scoop.taskapp.celery.CeleryConfig',)
# if you are not using the django database broker (e.g. rabbitmq, redis, memcached), you can remove the next line.
INSTALLED_APPS += ('kombu.transport.django',)
BROKER_URL = env("CELERY_BROKER_URL", default='django://')
########## END CELERY
# Location of root django.contrib.admin URL, use {% url 'admin:index' %}
ADMIN_URL = r'^admin/'
# Your common stuff: Below this line define 3rd party library settings
| bsd-3-clause | 1,577,594,145,833,667,000 | 34.105634 | 114 | 0.591675 | false |
SEJeff/aurproxy | tellapart/aurproxytest/proxy.py | 1 | 2883 | # Copyright 2015 TellApart, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
import unittest
from tellapart.aurproxy.backends import ProxyBackendProvider
from tellapart.aurproxy.config import SourceEndpoint
from tellapart.aurproxy.proxy import ProxyUpdater
from tellapart.aurproxytest.backends.backend import (
build_proxy_configuration,
TstProxyBackend)
class ProxyUpdaterTests(unittest.TestCase):
def test_proxy_updater(self):
arg_sets = []
for add_s_a in [True, False]:
config, scope = build_proxy_configuration(add_share_adjusters=add_s_a)
arg_sets.append((config, scope, add_s_a))
try:
ProxyBackendProvider.register(TstProxyBackend)
for config, scope, add_share_adjusters in arg_sets:
config, scope = build_proxy_configuration(add_share_adjusters=True)
now = datetime.now()
proxy_updater = ProxyUpdater(backend=TstProxyBackend.NAME,
config=config,
update_period=0,
max_update_frequency=0)
# Newly initialized proxy updater should need to update
self.assertTrue(proxy_updater._should_update(now))
proxy_updater._try_update(now)
self.assertFalse(proxy_updater._should_update(now))
# Newly started proxy updater should need to update
proxy_updater.start(weight_adjustment_delay_seconds=0)
self.assertTrue(proxy_updater._should_update(now))
proxy_updater._try_update(now)
self.assertFalse(proxy_updater._should_update(now))
# Proxy updater that has been signaled by a source should need to
# update
scope.source.add(SourceEndpoint('127.0.0.1', 8080))
self.assertTrue(proxy_updater._should_update(now))
proxy_updater._try_update(now)
self.assertFalse(proxy_updater._should_update(now))
if add_share_adjusters:
# Proxy updater that has been signaled by a share_adjuster should
# need to update
scope.share_adjuster.set_share(.5)
self.assertTrue(proxy_updater._should_update(now))
proxy_updater._try_update(now)
self.assertFalse(proxy_updater._should_update(now))
finally:
ProxyBackendProvider.unregister(TstProxyBackend)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 3,237,293,110,633,695,000 | 39.605634 | 76 | 0.68505 | false |
e-koch/clean_masks | clean_mask_construct.py | 1 | 13421 |
import numpy as np
import scipy.ndimage as nd
from signal_id import RadioMask, Noise
from radio_beam import Beam
import astropy.units as u
from astropy.io import fits
from astropy.extern import six
import astropy
from skimage.morphology import reconstruction
'''
Routines for constructing a robust clean mask.
1) Pick two sigma levels, then dilate the higher into the lower.
2) Pick two sigma levels, remove any components in the lower cut if it
doesn't contain any pixels in the higher cut mask.
'''
class CleanMask(object):
"""
Creates a robust CLEAN mask.
Parameters
----------
cube : numpy.ndarray or astropy PrimaryHDU
low_cut : float or int
Lower sigma cut.
high_cut : float or int
Higher sigma cut.
beam : Beam
Object defining the beam.
pbcoverage : numpy.ndarray
Defines the beam coverage over the image for mosaics.
pb_thresh : float
Defines a threshold between 0 and 1 to remove regions with low beam
coverage in the image.
"""
def __init__(self, cube, low_cut, high_cut, beam=None, pbcoverage=None,
pb_thresh=0.7, iteraxis=0):
super(CleanMask, self).__init__()
self._cube = cube
self.low_cut = low_cut
self.high_cut = high_cut
if isinstance(beam, Beam):
self.beam = beam
elif beam is None:
self.beam = None
else:
raise TypeError("beam must be a Beam object or None.")
if pbcoverage is not None:
if isinstance(pbcoverage, six.string_types):
pbcoverage = fits.getdata(pbcoverage)
if not isinstance(pbcoverage, np.ndarray):
raise TypeError("pbcoverage must be a numpy array.")
if pb_thresh < 0.0 or pb_thresh > 1.0:
raise Warning("pb_thresh must be between 0 and 1.")
self.pb_mask = pbcoverage > pb_thresh
self.pb_flag = True
else:
self.pb_mask = np.ones_like(cube, dtype=bool)
self.pb_flag = False
if iteraxis > len(self.cube.shape):
raise IndexError(str(iteraxis)+"is greater than the total number"
" of axes.")
self.iteraxis = iteraxis
self.restor_dims = [np.newaxis if i == 1 else slice(None)
for i in self.cube.shape]
self.restor_dims.pop(self.iteraxis)
self._low_mask = None
self._high_mask = None
self._mask = None
self._pb_applied = False
self._smoothed = False
self._method = "None"
self._pb_thresh = pb_thresh
@property
def cube(self):
return Cube(self._cube)
def make_initial_masks(self, compute_slicewise=False):
'''
Calculate the initial masks.
'''
if compute_slicewise or self.cube.huge_flag:
sums = 0.0
num_finite = 0
for plane in self.cube.generate_slice(self.iteraxis):
sums += np.nansum(plane)
num_finite += np.isfinite(plane).sum()
mean = sums / num_finite
var = 0.0
for plane in self.cube.generate_slice(self.iteraxis):
var += np.nansum(np.power(plane - mean, 2), axis=None)
std = np.sqrt(var / (num_finite - 1))
print "Slice"
print mean
print std
low_thresh = mean + self.low_cut * std
high_thresh = mean + self.high_cut * std
self._low_mask = np.zeros(self.cube.shape, dtype=bool)
self._high_mask = np.zeros(self.cube.shape, dtype=bool)
for slices in self.cube.generate_slice(self.iteraxis,
return_slice=False):
self._low_mask[slices] = self.cube[slices] > low_thresh
self._high_mask[slices] = self.cube[slices] > high_thresh
else:
mean = np.nanmean(self.cube[:])
std = np.nanstd(self.cube[:])
print "Full"
print mean
print std
low_thresh = mean + self.low_cut * std
high_thresh = mean + self.high_cut * std
self._low_mask = self.cube > low_thresh
self._high_mask = self.cube > high_thresh
@property
def low_mask(self):
return self._low_mask
@property
def high_mask(self):
return self._high_mask
@property
def mask(self):
return self._mask
@property
def method(self):
return self._method
def to_RadioMask(self, which_mask='final'):
if which_mask is 'final':
return RadioMask(self._mask, wcs=None) # Load in WCS somehow
elif which_mask is 'low':
return RadioMask(self._low_mask, wcs=None) # Load in WCS somehow
elif which_mask is 'high':
return RadioMask(self._high_mask, wcs=None) # Load in WCS somehow
else:
raise TypeError("which_mask must be 'final', 'low', or 'high'.")
def dilate_into_low(self, verbose=False):
'''
Dilates the high mask into the low using morphological reconstruction.
'''
dilate_struct = nd.generate_binary_structure(2, 3)
for i, slices in enumerate(self.cube.generate_slice(self.iteraxis,
return_slice=False)):
# Skip empty channels
if self._high_mask[slices].max() is False:
continue
if verbose:
print "Iteration %s of %s" % (str(i+1),
self.cube.shape[self.iteraxis])
self.high_mask[slices] = \
reconstruction(self.high_mask[slices].squeeze(),
self.low_mask[slices].squeeze(),
selem=dilate_struct)[self.restor_dims]
self._mask = self._high_mask
self._method = "dilate"
def remove_high_components(self, min_pix=10, beam_check=False,
pixscale=None, verbose=False):
'''
Remove components in the low mask which are not
contained in the high mask.
The criteria is set by min_pix, or is based off of the beam area.
Note that if min_pix < beam area, min_pix has no effect.
'''
# 8-connectivity
connect = np.ones((3, 3))
# Objects must be at least the beam area to be kept.
if beam_check:
# Remove this when WCS object is added.
if pixscale is None:
raise TypeError("pixscale must be specified to use beamarea")
major = self.major.to(u.deg).value/pixscale
minor = self.minor.to(u.deg).value/pixscale
# Round down by default?
# Should this be made into an optional input?
beam_pix_area = np.floor(np.pi * major * minor)
else:
beam_pix_area = 0
for i, slices in enumerate(self.cube.generate_slice(self.iteraxis,
return_slice=False)):
if verbose:
print "Iteration %s of %s" % (str(i+1),
self.cube.shape[self.iteraxis])
# Skip empty channels
if self.high_mask[slices].max() is False:
continue
low_labels, low_num = nd.label(self._low_mask[slices], connect)
for j in range(1, low_num+1):
low_pix = zip(*np.where(low_labels == j))
high_pix = zip(*np.where(self._high_mask[slices] > 0))
# Now check for overlap
matches = list(set(low_pix) & set(high_pix))
if len(matches) >= min_pix:
continue
if len(matches) > beam_pix_area:
continue
x_pos = [x for x, y in low_pix]
y_pos = [y for x, y in low_pix]
# If less than match threshold, remove region in the low mask
self._low_mask[slices][x_pos, y_pos] = 0
self._mask = self._low_mask
self._method = "remove small"
def _smooth_it(self, kern_size='beam', pixscale=None):
'''
Apply median filter to smooth the edges of the mask.
'''
if kern_size is 'beam':
if pixscale is None:
raise TypeError("pixscale must be specified to use beamarea")
footprint = self.beam.as_tophat_kernel(pixscale)
elif isinstance(kern_size, float) or isinstance(kern_size, int):
major = kern_size
minor = kern_size
footprint = np.ones((major, minor))
else:
Warning("kern_size must be 'beam', or a float or integer.")
from scipy.ndimage import median_filter
for i, slices in enumerate(self.cube.generate_slice(self.iteraxis,
return_slice=False)):
self._mask[slices] = \
median_filter(self._mask[slices],
footprint=footprint)[self.restor_dims]
self._smoothed = True
def apply_pbmask(self):
'''
Apply the given primary beam coverage mask.
'''
if self.pb_flag:
self._mask *= self.pb_mask
self._pb_applied = True
def save_to_fits(self, filename, header=None, append_comments=True):
'''
Save the final mask as a FITS file. Optionally append the parameters
used to create the mask.
'''
if header is not None and append_comments:
header["COMMENT"] = "Settings used in CleanMask: "
header["COMMENT"] = "Mask created with method "+self.method
if self._smoothed:
header["COMMENT"] = "Mask smoothed with beam kernel."
if self.pb_flag:
header["COMMENT"] = \
"Mask corrected for pb coverage with a threshold of " + \
str(self._pb_thresh)
# Set BITPIX to 8 (unsigned integer)
header["BITPIX"] = 8
hdu = fits.PrimaryHDU(self.mask.astype(">i2"), header=header)
hdu.writeto(filename)
def make_mask(self, method="dilate", compute_slicewise=False,
smooth=False, kern_size='beam', pixscale=None,
verbose=False):
self.make_initial_masks(compute_slicewise=compute_slicewise)
if method == "dilate":
self.dilate_into_low(verbose=verbose)
elif method == "remove small":
self.remove_high_components(pixscale=pixscale, verbose=verbose)
else:
raise TypeError("method must be 'dilate' or 'remove small'.")
if smooth:
self._smooth_it(kern_size=kern_size, pixscale=pixscale)
self.apply_pbmask()
class Cube(object):
"""
Cube attempts to handle numpy arrays and FITS HDUs transparently. This
is useful for massive datasets, in particular. The data is loaded in only
for the requested slice.
It is certainly *NOT* robust or complete, but handles what is needed for
creating CLEAN masks.
"""
def __init__(self, cube, huge_flag=None, huge_thresh=5e9,
squeeze=True):
self.cube = cube
if huge_flag is not None:
self.huge_flag = huge_flag
else:
self.huge_flag = self.size > huge_thresh
@property
def cube(self):
return self._cube
@cube.setter
def cube(self, input_cube):
if isinstance(input_cube, six.string_types):
input_cube = self._load_fits(input_cube)
is_array = isinstance(input_cube, np.ndarray)
is_hdu = isinstance(input_cube, astropy.io.fits.hdu.image.PrimaryHDU)
if not is_array and not is_hdu:
raise TypeError("cube must be a numpy array or an astropy "
"PrimaryHDU. Input was of type " +
str(type(input_cube)))
self._cube = input_cube
def __getitem__(self, view):
if self.is_hdu:
return self.cube.data[view]
else:
return self.cube[view]
def _load_fits(self, fitsfile, ext=0):
return fits.open(fitsfile)[ext]
def _is_hdu(self):
if hasattr(self.cube, 'header'):
return True
return False
@property
def is_hdu(self):
return self._is_hdu()
@property
def shape(self):
return self.cube.shape
@property
def size(self):
return self.cube.size
def close(self):
'''
If an HDU, close it.
'''
if self.is_hdu:
self.cube.close()
def generate_slice(self, iteraxis, return_slice=True):
slices = [slice(None)] * len(self.shape)
for i in xrange(self.shape[iteraxis]):
slices[iteraxis] = i
if return_slice:
yield self[slices]
else:
yield slices
def __gt__(self, value):
return self[:] > value
def __lt__(self, value):
return self[:] < value
def __ge__(self, value):
return self[:] >= value
def __le__(self, value):
return self[:] <= value
| mit | -5,155,616,146,169,940,000 | 29.502273 | 81 | 0.543402 | false |
bsipocz/astropy | astropy/stats/tests/test_bayesian_blocks.py | 1 | 4916 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy.testing import assert_allclose
from astropy.stats import bayesian_blocks, RegularEvents
def test_single_change_point(rseed=0):
rng = np.random.RandomState(rseed)
x = np.concatenate([rng.rand(100),
1 + rng.rand(200)])
bins = bayesian_blocks(x)
assert (len(bins) == 3)
assert_allclose(bins[1], 1, rtol=0.02)
def test_duplicate_events(rseed=0):
rng = np.random.RandomState(rseed)
t = rng.rand(100)
t[80:] = t[:20]
x = np.ones_like(t)
x[:20] += 1
bins1 = bayesian_blocks(t)
bins2 = bayesian_blocks(t[:80], x[:80])
assert_allclose(bins1, bins2)
def test_measures_fitness_homoscedastic(rseed=0):
rng = np.random.RandomState(rseed)
t = np.linspace(0, 1, 11)
x = np.exp(-0.5 * (t - 0.5) ** 2 / 0.01 ** 2)
sigma = 0.05
x = x + sigma * rng.randn(len(x))
bins = bayesian_blocks(t, x, sigma, fitness='measures')
assert_allclose(bins, [0, 0.45, 0.55, 1])
def test_measures_fitness_heteroscedastic():
rng = np.random.RandomState(1)
t = np.linspace(0, 1, 11)
x = np.exp(-0.5 * (t - 0.5) ** 2 / 0.01 ** 2)
sigma = 0.02 + 0.02 * rng.rand(len(x))
x = x + sigma * rng.randn(len(x))
bins = bayesian_blocks(t, x, sigma, fitness='measures')
assert_allclose(bins, [0, 0.45, 0.55, 1])
def test_regular_events():
rng = np.random.RandomState(0)
dt = 0.01
steps = np.concatenate([np.unique(rng.randint(0, 500, 100)),
np.unique(rng.randint(500, 1000, 200))])
t = dt * steps
# string fitness
bins1 = bayesian_blocks(t, fitness='regular_events', dt=dt)
assert (len(bins1) == 3)
assert_allclose(bins1[1], 5, rtol=0.05)
# class name fitness
bins2 = bayesian_blocks(t, fitness=RegularEvents, dt=dt)
assert_allclose(bins1, bins2)
# class instance fitness
bins3 = bayesian_blocks(t, fitness=RegularEvents(dt=dt))
assert_allclose(bins1, bins3)
def test_errors():
rng = np.random.RandomState(0)
t = rng.rand(100)
# x must be integer or None for events
with pytest.raises(ValueError):
bayesian_blocks(t, fitness='events', x=t)
# x must be binary for regular events
with pytest.raises(ValueError):
bayesian_blocks(t, fitness='regular_events', x=10 * t, dt=1)
# x must be specified for measures
with pytest.raises(ValueError):
bayesian_blocks(t, fitness='measures')
# sigma cannot be specified without x
with pytest.raises(ValueError):
bayesian_blocks(t, fitness='events', sigma=0.5)
# length of x must match length of t
with pytest.raises(ValueError):
bayesian_blocks(t, fitness='measures', x=t[:-1])
# repeated values in t fail when x is specified
t2 = t.copy()
t2[1] = t2[0]
with pytest.raises(ValueError):
bayesian_blocks(t2, fitness='measures', x=t)
# sigma must be broadcastable with x
with pytest.raises(ValueError):
bayesian_blocks(t, fitness='measures', x=t, sigma=t[:-1])
def test_fitness_function_results():
"""Test results for several fitness functions"""
rng = np.random.RandomState(42)
# Event Data
t = rng.randn(100)
edges = bayesian_blocks(t, fitness='events')
assert_allclose(edges, [-2.6197451, -0.71094865, 0.36866702, 1.85227818])
# Event data with repeats
t[80:] = t[:20]
edges = bayesian_blocks(t, fitness='events', p0=0.01)
assert_allclose(edges, [-2.6197451, -0.47432431, -0.46202823, 1.85227818])
# Regular event data
dt = 0.01
t = dt * np.arange(1000)
x = np.zeros(len(t))
N = len(t) // 10
x[rng.randint(0, len(t), N)] = 1
x[rng.randint(0, len(t) // 2, N)] = 1
edges = bayesian_blocks(t, x, fitness='regular_events', dt=dt)
assert_allclose(edges, [0, 5.105, 9.99])
# Measured point data with errors
t = 100 * rng.rand(20)
x = np.exp(-0.5 * (t - 50) ** 2)
sigma = 0.1
x_obs = x + sigma * rng.randn(len(x))
edges = bayesian_blocks(t, x_obs, sigma, fitness='measures')
expected = [4.360377, 48.456895, 52.597917, 99.455051]
assert_allclose(edges, expected)
# Optional arguments are passed (p0)
p0_sel = 0.05
edges = bayesian_blocks(t, x_obs, sigma, fitness='measures', p0=p0_sel)
assert_allclose(edges, expected)
# Optional arguments are passed (ncp_prior)
ncp_prior_sel = 4 - np.log(73.53 * p0_sel * (len(t) ** -0.478))
edges = bayesian_blocks(t, x_obs, sigma, fitness='measures',
ncp_prior=ncp_prior_sel)
assert_allclose(edges, expected)
# Optional arguments are passed (gamma)
gamma_sel = np.exp(-ncp_prior_sel)
edges = bayesian_blocks(t, x_obs, sigma, fitness='measures',
gamma=gamma_sel)
assert_allclose(edges, expected)
| bsd-3-clause | -3,090,498,557,147,872,000 | 28.97561 | 78 | 0.616965 | false |
deepmind/streetlearn | streetlearn/python/environment/__init__.py | 1 | 1791 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python interface to the StreetLearn engine.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from streetlearn.python.environment.coin_game import CoinGame
from streetlearn.python.environment.courier_game import CourierGame
from streetlearn.python.environment.curriculum_courier_game import CurriculumCourierGame
from streetlearn.python.environment.default_config import ApplyDefaults
from streetlearn.python.environment.default_config import CreateGame
from streetlearn.python.environment.exploration_game import ExplorationGame
from streetlearn.python.environment.game import Game
from streetlearn.python.environment.goal_instruction_game import GoalInstructionGame
from streetlearn.python.environment.incremental_instruction_game import IncrementalInstructionGame
from streetlearn.python.environment.observations import Observation
from streetlearn.python.environment.step_by_step_instruction_game import StepByStepInstructionGame
from streetlearn.python.environment.streetlearn import get_action_set
from streetlearn.python.environment.streetlearn import StreetLearn
from streetlearn.python.environment.thumbnail_helper import ThumbnailHelper
| apache-2.0 | 6,626,431,771,030,395,000 | 50.171429 | 98 | 0.835288 | false |
filannim/Temporal-Footprint | temporal_footprint/predict.py | 1 | 9133 | # -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Copyright 2014 Michele Filannino
#
# gnTEAM, School of Computer Science, University of Manchester.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the GNU General Public License.
#
# author: Michele Filannino
# email: [email protected]
#
# For details, see www.cs.man.ac.uk/~filannim/
from __future__ import division
from collections import namedtuple
from collections import defaultdict
from datetime import date as Date
import re
import sys
import os
import subprocess
import tempfile
import time
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import numpy as np
from scipy.stats import norm
from wikipedia2text import wikipedia_text
from properties import properties
Gaussian = namedtuple('Gaussian', ['mu', 'sigma'])
TemporalFrame = namedtuple('TemporalFrame', ['start', 'end'])
TemporalFrameResult = namedtuple('TemporalFrameResult', ['source', 'dates', 'gaussian_curve', 'optimised_gaussian_curve', 'predicted_temporal_frame', 'error'])
def HeidelTime_annotate(text):
with tempfile.NamedTemporaryFile('w+t', delete=False) as f:
name = f.name
f.write(text)
os.chdir(properties['HEIDELTIME_DIR'])
process = subprocess.Popen(['java', '-jar', 'de.unihd.dbs.heideltime.standalone.jar', name, '-l', 'ENGLISH', '-t', 'NARRATIVES'], stdout=subprocess.PIPE)
output, err = process.communicate()
os.remove(name)
os.chdir(properties['MAIN_DIR'])
return str(output)
class Timer(object):
def __init__(self, name=None):
self.name = name
def __enter__(self):
self.tstart = time.time()
def __exit__(self, type, value, traceback):
if self.name:
print '%s' % self.name
print '(%.4fs)' % (time.time() - self.tstart)
class WikipediaPage(object):
def __init__(self, title, gold_start=None, gold_end=None):
self.title = title.strip()
self.text = re.sub(r'[<>]', '', wikipedia_text(title.strip(), fullURL=True)['text'])
self.HeidelTime_text = HeidelTime_annotate(self.text)
self.word_count = len(self.text.split())
self.DDDD_density = len(re.findall(r'[12][\d]{3}', self.text)) / len(self.text.split())
self.DDDD_sequences = len(re.findall(r'[12][\d]{3}', self.text))
self.temporal_frame = TemporalFrame(0.0, 0.0)
if gold_start and gold_end:
self.temporal_frame = TemporalFrame(float(gold_start), float(gold_end))
def __str__(self):
text = 'TEXT:' + self.text[0:100] + '\n'
text += '# DDDD sequences:' + str(len(re.findall(r'[12][\d]{3}', self.text))) + '\n'
text += '# characters :', str(len(self.text)) + '\n'
text += '# words :', str(len(self.text.split())) + '\n'
text += '# DDDD density :', str(len(re.findall(r'[12][\d]{3}', self.text)) / len(self.text.split()))
return text
class Predictor(object):
def __init__(self, Person, outlier_ray=7.9, gaussian_a=1.6, gaussian_b=-10):
self.person = Person
self.outlier_ray = outlier_ray
self.gaussian_a = gaussian_a
self.gaussian_b = gaussian_b
self.extraction_functions = (self.__extract_DDDD_dates, self.__extract_HeidelTime_dates)
#self.extraction_functions = (self.__extract_Baseline_dates, self.__extract_BaselineFiltered_dates, self.__extract_DDDD_dates, self.__extract_HeidelTime_dates)
self.results = self.__compute()
def __compute(self):
results = []
for function in self.extraction_functions:
source = re.findall(r'extract_([A-Za-z]+)_dates', str(function))[0]
results.append(self.__predict(source, function, self.outlier_ray, self.gaussian_a, self.gaussian_b))
return results
def __predict(self, source, function, outlier_ray, gaussian_a=1., gaussian_b=0.):
if source == 'Baseline':
dates = function(self.person.text)
predicted_temporal_frame = TemporalFrame(np.amin(dates), np.amax(dates))
error = self.__compute_error(self.person.temporal_frame, predicted_temporal_frame)
return TemporalFrameResult(source, dates, Gaussian(0,1), Gaussian(0,1), predicted_temporal_frame, error)
if source == 'BaselineFiltered':
try:
dates = function(self.person.text)
dates_filtered = self.__reject_outliers(dates, outlier_ray)
predicted_temporal_frame = TemporalFrame(np.amin(dates_filtered), np.amax(dates_filtered))
error = self.__compute_error(self.person.temporal_frame, predicted_temporal_frame)
return TemporalFrameResult(source, dates, Gaussian(0,1), Gaussian(0,1), predicted_temporal_frame, error)
except ValueError:
return TemporalFrameResult(source, dates, Gaussian(0,1), Gaussian(0,1), TemporalFrame(1000, Date.today().year), 1.0)
elif source == 'DDDD':
dates = function(self.person.text)
elif source == 'HeidelTime':
dates = function(self.person.HeidelTime_text)
else:
raise Exception('Function ' + source + 'not found!')
dates_filtered = self.__reject_outliers(dates, outlier_ray)
gaussian_curve = Gaussian._make(self.__normal_fit(dates_filtered))
optimised_gaussian_curve = Gaussian(gaussian_curve.mu+gaussian_b, gaussian_curve.sigma*gaussian_a)
predicted_temporal_frame = TemporalFrame(optimised_gaussian_curve.mu - optimised_gaussian_curve.sigma, optimised_gaussian_curve.mu + optimised_gaussian_curve.sigma)
error = self.__compute_error(self.person.temporal_frame, predicted_temporal_frame)
return TemporalFrameResult(source, dates, gaussian_curve, optimised_gaussian_curve, predicted_temporal_frame, error)
def __reject_outliers(self, dates, outlier_ray = 2.):
d = np.abs(dates - np.median(dates))
mdev = np.median(d)
s = d/mdev if mdev else 0
try:
r = dates[s<outlier_ray]
except IndexError:
r = np.array([])
if type(r) != np.ndarray:
return np.array([r])
else:
return r
def __normal_fit(self, dates):
y = map(float, dates) #y = [float(d) for d in dates]
return norm.fit(y) # returns (mu, sigma)
def __compute_error(self, gold_frame, predicted_frame):
upper_bound = np.amax((gold_frame.start, gold_frame.end, predicted_frame.start, predicted_frame.end)) #can be more elegantly rewritten
lower_bound = np.amin((gold_frame.start, gold_frame.end, predicted_frame.start, predicted_frame.end)) #can be more elegantly rewritten
union_frame = (upper_bound - lower_bound) + 1
try:
overlap = len(set(range(int(gold_frame.start), int(gold_frame.end)+1)) & set(range(int(predicted_frame.start), int(predicted_frame.end)+1)))#can I write something more NumPy-ish?
return 1 - (overlap/union_frame)
except ValueError:
return 1
def __extract_Baseline_dates(self, text):
result = np.array([float(y) for y in re.findall(r'[12][\d]{3}', text)])
if len(result)<2:
return np.array([1000,2014])
else:
return result
def __extract_BaselineFiltered_dates(self, text):
result = np.array([float(y) for y in re.findall(r'[12][\d]{3}', text)])
if len(result)<2:
return np.array([1000,2014])
else:
return result
def __extract_DDDD_dates(self, text):
result = np.array([float(y) for y in re.findall(r'[12][\d]{3}', text)])
if len(result)<2:
return np.array([1000,2014])
else:
return result
def __extract_HeidelTime_dates(self, text):
try:
dates = re.findall('value=\"([^\"]+)\"', text)
dates = [e[0:4] for e in dates if len(e)==4]
dates = [int(y) for y in dates if y.isdigit()]
return np.array(dates)
except:
return np.array([1000,2014])
def plot(self):
plt.close('all')
fig, (axarr) = plt.subplots(len(self.extraction_functions))
for i, result in enumerate(self.results):
try:
n, bins, patches = axarr[i].hist(result.dates, 100, normed=1, facecolor='blue', alpha=0.75)
axarr[i].plot(bins, mlab.normpdf(bins, result.gaussian_curve.mu, result.gaussian_curve.sigma), 'r--', linewidth=2)
axarr[i].axvspan(self.person.temporal_frame.start, self.person.temporal_frame.end, color='blue', alpha=0.3)
axarr[i].axvspan(result.predicted_temporal_frame.start, result.predicted_temporal_frame.end, color='red', alpha=0.3)
next_year = int(Date.today().year+1)
if i==0:
axarr[0].set_title(self.person.title.replace('_', ' ') + ' (' + str(int(self.person.temporal_frame.start)) + '-' + str(int(self.person.temporal_frame.end)) + ')\n' + result.source + ' prediction [' + str(int(result.predicted_temporal_frame.start)) + '-' + str(int(result.predicted_temporal_frame.end)) + '], E = ' + str(np.around(result.error, 4)))
else:
axarr[i].set_title(result.source + ' prediction [' + str(int(result.predicted_temporal_frame.start)) + '-' + str(int(result.predicted_temporal_frame.end)) + '], E = ' + str(np.around(result.error, 4)))
axarr[i].set_ylabel('freq')
axarr[i].yaxis.set_ticklabels([])
axarr[i].set_xticks(np.arange(1000,next_year, next_year/50))
axarr[i].set_xlim(1000,next_year)
print result.source, str(np.around(result.error, 4))
except:
continue
axarr[i].set_xlabel('Years (0 - ' + str(next_year) + ')')
plt.show(block=False)
#plt.savefig('pictures/' + self.person.title + '.png', dpi=300)
raw_input('Press Any Key To Exit')
def predict(title, start=None, end=None):
prediction = Predictor(WikipediaPage(title, gold_start=start, gold_end=end))
return prediction | apache-2.0 | -1,198,312,042,371,473,000 | 41.483721 | 353 | 0.688711 | false |
ibelikov/jimmy | jimmy/modules/throttle/tests/test_throttle.py | 1 | 15691 | # -*- coding: utf-8 -*-
# Copyright 2016 Mirantis, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import mock
import mockfs
import os
import pytest
import sys
import jsonschema
from jimmy import cli
from mock import call
from click.testing import CliRunner
from jimmy.lib.common import yaml_reader
from jimmy.tests import base
modules_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
jimmy_dir = os.path.dirname(modules_dir)
throttle_schema_path = os.path.join(modules_dir, 'throttle', 'resources', 'schema.yaml')
jenkins_yaml_path = os.path.join(jimmy_dir, 'sample', 'input', 'jenkins.yaml')
class TestThrottleModule(base.TestCase):
def setup_method(self, method):
self.runner = CliRunner()
def teardown_method(self, method):
mockfs.restore_builtins()
@mock.patch('jimmy.lib.core.load_py_modules')
@mock.patch('subprocess.call')
def test_cli_call(self, mock_subp, mock_modules):
with open(throttle_schema_path, 'r') as f:
mock_throttle_schema = f.read()
self.mfs = mockfs.replace_builtins()
self.mfs.add_entries({os.path.join(jimmy_dir, 'lib', 'schema.yaml'): self.jimmy_schema,
os.path.join(jimmy_dir, 'jimmy.yaml'): self.mock_jimmy_yaml,
throttle_schema_path: mock_throttle_schema,
jenkins_yaml_path: '\n'.join(
[
'jenkins:',
' throttle:',
' categories:',
' - category_name: category1',
' max_total_concurrent_builds: 1',
' max_concurrent_builds_per_node: 0',
' max_per_labeled_node:',
' - throttled_node_label: slave-label1',
' max_concurrent_per_labeled: 1',
' - throttled_node_label: slave-label2',
' max_concurrent_per_labeled: 1',
' - category_name: category2',
' max_total_concurrent_builds: 1',
' max_concurrent_builds_per_node: 0'
])
})
sys.path.insert(0, modules_dir)
import throttle
import read_source
sys.path.pop(0)
mock_modules.return_value = [throttle, read_source]
os.chdir(jimmy_dir)
self.runner.invoke(cli)
calls = [call(['java',
'-jar', '<< path to jenkins-cli.jar >>',
'-s', 'http://localhost:8080', 'groovy',
modules_dir + '/' + 'throttle/resources/jenkins.groovy',
'clearCategories'],
shell=False),
call(['java',
'-jar', '<< path to jenkins-cli.jar >>',
'-s', 'http://localhost:8080', 'groovy',
modules_dir + '/' + 'throttle/resources/jenkins.groovy',
'makeThrottleCategory',
'category1', '1', '0', 'slave-label1,slave-label2', '1,1'],
shell=False),
call(['java',
'-jar', '<< path to jenkins-cli.jar >>',
'-s', 'http://localhost:8080', 'groovy',
modules_dir + '/' + 'throttle/resources/jenkins.groovy',
'makeThrottleCategory',
'category2', '1', '0', '', ''],
shell=False)
]
mock_subp.assert_has_calls(calls, any_order=True)
assert 3 == mock_subp.call_count, "subprocess call should be equal to 3"
class TestThrottleSchema(object):
def setup_method(self, method):
with open(throttle_schema_path, 'r') as f:
mock_throttle_schema = f.read()
self.mfs = mockfs.replace_builtins()
self.mfs.add_entries({throttle_schema_path: mock_throttle_schema})
self.schema = yaml_reader.read(throttle_schema_path)
def teardown_method(self, method):
mockfs.restore_builtins()
def test_valid_repo_data(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'categories:',
'- category_name: category1',
' max_total_concurrent_builds: 1',
' max_concurrent_builds_per_node: 0',
' max_per_labeled_node:',
' - throttled_node_label: slave-label1',
' max_concurrent_per_labeled: 1'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
jsonschema.validate(repo_data, self.schema)
def test_validation_fail_if_category_name_is_not_string(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'categories:',
'- category_name: 123',
' max_total_concurrent_builds: 1',
' max_concurrent_builds_per_node: 0',
' max_per_labeled_node:',
' - throttled_node_label: slave-label1',
' max_concurrent_per_labeled: 1'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "123 is not of type 'string'"
def test_validation_fail_if_max_total_concurrent_builds_is_not_num(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'categories:',
'- category_name: category1',
' max_total_concurrent_builds: test',
' max_concurrent_builds_per_node: 0',
' max_per_labeled_node:',
' - throttled_node_label: slave-label1',
' max_concurrent_per_labeled: 1'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "'test' is not of type 'number'"
def test_validation_fail_if_max_concurrent_builds_per_node_is_not_num(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'categories:',
'- category_name: category1',
' max_total_concurrent_builds: 1',
' max_concurrent_builds_per_node: test',
' max_per_labeled_node:',
' - throttled_node_label: slave-label1',
' max_concurrent_per_labeled: 1'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "'test' is not of type 'number'"
def test_validation_fail_if_throttled_node_label_is_not_string(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'categories:',
'- category_name: category1',
' max_total_concurrent_builds: 1',
' max_concurrent_builds_per_node: 0',
' max_per_labeled_node:',
' - throttled_node_label: 123',
' max_concurrent_per_labeled: 1'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "123 is not of type 'string'"
def test_validation_fail_if_max_concurrent_per_labeled_is_not_num(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'categories:',
'- category_name: category1',
' max_total_concurrent_builds: 1',
' max_concurrent_builds_per_node: 0',
' max_per_labeled_node:',
' - throttled_node_label: slave-label1',
' max_concurrent_per_labeled: test'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "'test' is not of type 'number'"
def test_password_validation_fail_for_category_name_required_property(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'categories:',
'- max_total_concurrent_builds: 1',
' max_concurrent_builds_per_node: 0',
' max_per_labeled_node:',
' - throttled_node_label: slave-label1',
' max_concurrent_per_labeled: 1'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "'category_name' is a required property"
def test_password_validation_fail_for_max_total_conc_builds_required_property(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'categories:',
'- category_name: category1',
' max_concurrent_builds_per_node: 0',
' max_per_labeled_node:',
' - throttled_node_label: slave-label1',
' max_concurrent_per_labeled: 1'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "'max_total_concurrent_builds' is a required property"
def test_password_validation_fail_for_max_conc_builds_per_node_required_property(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'categories:',
'- category_name: category1',
' max_total_concurrent_builds: 1',
' max_per_labeled_node:',
' - throttled_node_label: slave-label1',
' max_concurrent_per_labeled: 1'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "'max_concurrent_builds_per_node' is a required property"
def test_password_validation_fail_for_throttled_node_label_required_property(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'categories:',
'- category_name: category1',
' max_total_concurrent_builds: 1',
' max_concurrent_builds_per_node: 0',
' max_per_labeled_node:',
' - max_concurrent_per_labeled: 1'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "'throttled_node_label' is a required property"
def test_password_validation_fail_for_max_concurrent_per_labeled_required_property(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'categories:',
'- category_name: category1',
' max_total_concurrent_builds: 1',
' max_concurrent_builds_per_node: 0',
' max_per_labeled_node:',
' - throttled_node_label: slave-label1'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "'max_concurrent_per_labeled' is a required property"
def test_validation_fail_if_categories_not_array(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'categories: 123'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "123 is not of type 'array'"
def test_validation_fail_if_max_per_labeled_node_not_array(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'categories:',
'- category_name: category1',
' max_total_concurrent_builds: 1',
' max_concurrent_builds_per_node: 0',
' max_per_labeled_node: 123'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "123 is not of type 'array'"
def test_validation_fail_for_categories_additional_properties(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'categories:',
'- category_name: category1',
' max_total_concurrent_builds: 1',
' max_concurrent_builds_per_node: 0',
' test: test'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "Additional properties are not allowed ('test' was unexpected)"
def test_validation_fail_for_max_per_labeled_node_additional_properties(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'categories:',
'- category_name: category1',
' max_total_concurrent_builds: 1',
' max_concurrent_builds_per_node: 0',
' max_per_labeled_node:',
' - throttled_node_label: slave-label1',
' max_concurrent_per_labeled: 1',
' test: test'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "Additional properties are not allowed ('test' was unexpected)"
| apache-2.0 | 8,527,842,525,457,008,000 | 43.450425 | 103 | 0.548467 | false |
ipittau/ldutil | ldutil.py | 1 | 13182 | #!/usr/bin/python
#
# ldutil helps you to manage library dependencies on a filesystem
#
# Copyright (C) 2014 Ilario Pittau (ilariopittau[at]gmail[dot]com)
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
import getopt, sys, subprocess, os, fnmatch, pickle, re, hashlib
from stat import *
def usage():
print "Usage: ldutil [-p <pickle_file>] [-o <output_file>] [-i <input_file> [c <check_file>] [s <search_file>]] [d <search_dir>] [vnml] [h]"
print " -o output_file : recurse the search_dir for reverse tree and write the tree in the file"
print " -p pickle_file : use pickle function to dump the lib_list dependency tree"
print " -i input_file : use the input_file as tree for the binaries, the file must be generated with the pickle feature"
print " -c check_file : print the list of libs that needs check_file"
print " -s search_file : print all occurences of search_file"
print " -d search_dir : use this dir as base for find and recurse"
print " -n : print all binaries that nobody use"
print " -m : print all binaries that have a unsatisfied dependency"
print " -l : manage symbolic links (EXPERIMENTAL)"
print " -v : verbose_flag mode on"
print "Examples:"
print " ldutil -d /usr/lib -p /tmp/pickle"
print " ldutil -d /usr/lib -i /tmp/pickle -s libc.so"
print " ldutil -d /usr/lib -i /tmp/pickle -n"
#Search a lib starting from search_dir
#@param filename: the file to search on the search_dir
#@param search_dir: the directory to use as root
#@param link_enable_flag: if true returns the link otherwise the linked file
#@return the file if founded, the linked file if flag is False, an empty string if not founded
def findout(filename, search_dir, link_enable_flag=False):
#print "finding " + filename + "..."
find_list = []
for root, dirs, files in os.walk(search_dir):
for basename in fnmatch.filter(files, filename):
found_lib = os.path.join(root, basename)
mode = os.lstat(found_lib).st_mode
if (link_enable_flag == False):
if S_ISLNK(mode):
refound = os.path.dirname(found_lib) +"/"+ os.readlink(found_lib)
#print "found " + filename + " -> " + refound
return refound
#print "found " + found_lib
return found_lib
return ""
#Function that analyze a binary and its dependencies recursively
#@param current: file to be analyzed, uses readelf -d to check dependecies, current is fullpath
#@param father: needed for the recursive step, is the father. father is "nobody" for the first step
#@return the list of dependencies of the current binary
def analyze(current, father):
#print "analyze " + current
lib_basename = os.path.basename(current)
if lib_basename in lib_list:
return []
lib_list[lib_basename] = []
#readelf regExp
re_readelf = re.compile('\(NEEDED\)[\s]+[A-Za-z\s]+: \[(.+)\]')
try:
readelf_output = subprocess.check_output("readelf -d "+current,stderr=subprocess.STDOUT,shell=True)
except subprocess.CalledProcessError, err:
#print "readelf error " + current
lib_list.pop(lib_basename)
return []
#print "analyzed " + current
#parse the output of the readelf command
sub_lib_list = re_readelf.findall(readelf_output)
#print str(len(sub_lib_list))
#analize the used libraries
for sub_lib in sub_lib_list:
#The lib has been already analyzed, we add it and its dependencies
if sub_lib in lib_list:
lib_list[lib_basename].append(sub_lib)
lib_list[lib_basename] += lib_list[sub_lib]
else:
#Search if the dependency is satisfied
found_lib = findout(sub_lib, search_dir);
#if yes add the lib and all dependencies calling a recursive step
if (found_lib != ""):
lib_list[lib_basename].append(os.path.basename(found_lib))
lib_list[lib_basename] += analyze(found_lib, current)
#otherwise write that the dependency is unsatisfied ("miss" + name of the lib)
else:
#print sub_lib + " miss for " + lib_basename
lib_list[lib_basename].append("miss " + sub_lib)
#print lib_list[lib_basename]
#this is useful to remove duplicates
lib_list[lib_basename] = list(set(lib_list[lib_basename]))
return lib_list[lib_basename]
#function that go deep on the directory and call ther recursive function analyze for each binary
#prints a dot for each file analyzed
#@param research_dir: directory to use as start point
def create_dependency_tree(research_dir):
#print subprocess.check_output("find "+search_dir, shell=True)
print "going.. " + research_dir
total_file_num = 0
file_num = 0
for root, dirs, files in os.walk(research_dir):
for new_file in files:
total_file_num = total_file_num + 1
for root, dirs, files in os.walk(research_dir):
#Analyze only files, not links
for new_file in files:
file_num = file_num + 1
print ("Analyze " + str(file_num) +"/"+ str(total_file_num) )
sys.stdout.write("\033[F")
pathname = os.path.join(root, new_file)
mode = os.lstat(pathname).st_mode
#links are skipped
if S_ISLNK(mode):
#print "link " + pathname + " " + str(mode)
pass
elif S_ISREG(mode):
# It's a file, call the recursive function to analyze it
#print "analyze " + pathname
analyze(pathname, "nobody")
else:
# Unknown file type, print a message
print 'Skipping %s' % pathname
pass
#Function to calculate the reverse tree starting from the dependency list
def reverse_analysis():
for lib in lib_list.keys():
#Add the lib to reverse_lib_list if it's not present
if lib not in reverse_lib_list:
reverse_lib_list[lib] = []
for father_lib in lib_list.keys():
if lib in lib_list[father_lib]:
reverse_lib_list[lib].append(father_lib)
def link_managment():
print "Duplicate libs"
for lib in lib_list.keys():
lib_found = findout(lib, search_dir, True)
if re.match('.+\.so.+',lib):#if is a lib
#filename = os.path.splitext(lib)
lib_no_version = lib.split(".so.")[0] + ".so"
for num_version in lib.split(".so.")[1].split("."):
fullname_lib_no_version = os.path.join(os.path.dirname(lib_found), lib_no_version)
#lib_no_version_found = findout(lib_no_version, search_dir, True)
print "Tring... " + fullname_lib_no_version
if not os.path.exists(fullname_lib_no_version) or not S_ISLNK(os.lstat(fullname_lib_no_version).st_mode):
print lib_no_version + " -> " + lib_found + " ?"
if os.path.exists(fullname_lib_no_version):
print fullname_lib_no_version + " exist, do you want replace it with the symlink?"
else:
print fullname_lib_no_version + " not exist, do you want create a new the symlink?"
response = raw_input()
if response == "y" :
print "create: " + fullname_lib_no_version
os.symlink(lib, fullname_lib_no_version)
else:
print "pass..."
else:
print fullname_lib_no_version + " link exist!"
lib_no_version += "." + num_version
#Main (this is the point where the program start)
try:
opts, args = getopt.getopt(sys.argv[1:], "hi:vc:nd:s:o:p:ml", ["input="])
except getopt.GetoptError, err:
print "error"
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
usage()
sys.exit(2)
#variable initialization
#input file prepared with the dependency tree
input_file = ""
#if enabled prints a lot of debug
verbose_flag = False
#if enabled print libs that nobody uses
nobody_flag = False
#if enabled print libs that have unsatisfied dependency
miss_flag = False
#if enabled start the interactive managment of symbolic links"
link_flag = False
#print the list of libs that needs check_file
check_file = ""
#Print lib_list and reverse_lib_list
print_all=False
#default fs directory to scan
search_dir="/tmp/asd"
#used to print all occurrences of the lib
search_file=""
#file output with the filter output
output_file=""
#file output with the prescanned dependency tree
pickle_file=""
try:
#option parsing
for o, a in opts:
if o == "-v":
verbose_flag = True
elif o in ("-h", "--help"):
usage()
sys.exit(0)
elif o in ("-i", "--input"):
input_file = a
elif o in ("-o", "--output"):
output_file = a
elif o in ("-c"):
check_file = a
elif o in ("-n"):
nobody_flag = True
elif o in ("-d"):
search_dir = a
elif o in ("-s"):
search_file = a
elif o in ("-p"):
pickle_file = a
elif o in ("-m"):
miss_flag = True
elif o in ("-l"):
link_flag = True
else:
assert False, "unhandled option"
#Contains all libs on the system and their dependencies
lib_list = dict()
#Contains all libs on the system and the bins/libs that depend on it
reverse_lib_list=dict()
#If the output file is set, create the dependency tree
if input_file == "":
create_dependency_tree(search_dir)
else:
#otherwise open the pickle file and load the lib_list
input_fd = open(input_file,"rb");
lib_list = pickle.load(input_fd);
input_fd.close()
#Open the pickle file and dump the list on it then exit
if pickle_file != "":
pickle_fd = open(pickle_file,"wb");
pickle.dump(lib_list,pickle_fd);
pickle_fd.close()
print ""
sys.exit(0)
#Perform the reverse analysis after the scan of the folder
reverse_analysis()
#Output file is used to save the output of the request feature
if output_file != "" :
output_fd = open(output_file, "w")
#MAIN SWITCH over the implemented features
#link managment is EXPERIMENTAL
if (link_flag == True):
link_managment()
elif( check_file != ""):
#Prints the bins that uses che check_file
if (check_file in reverse_lib_list):
#Print the checkfile full name path
print "This is the list of binaries that are using " + findout(check_file, search_dir)
#Print every lib in its reverse list
for lib in reverse_lib_list[check_file]:
if output_file != "" :
output_fd.write(findout(k, search_dir) + "\n")
print " " + findout(lib, search_dir)
else:
print "not found"
elif (nobody_flag):
#Prints the library that nobody is using and theoretically could be deleted
print "This is the list of libraries (.so) that nobody uses:"
for k, v in reverse_lib_list.iteritems():
if len(reverse_lib_list[k]) == 0 :
#print only the .so files
if re.match('.+\.so*',k):
if output_file != "" :
output_fd.write(findout(k, search_dir) + "\n")
lib_found = findout(k, search_dir)
if lib_found == "":
print k + " not found!"
else:
print lib_found
elif (search_file):
#Prints each occurence of the searched file
for lib_filter in fnmatch.filter(lib_list, "*"+search_file+"*"):
search_file_found = findout(lib_filter, search_dir)
print "###################################################################"
if S_ISLNK(os.lstat(search_file_found).st_mode):
print search_file_found + " is a link"
else:
print search_file_found + " is not a link"
print lib_filter + " is used by:"
print reverse_lib_list[lib_filter]
print lib_filter + " uses:"
print lib_list[lib_filter]
elif (miss_flag):
#Print a missing dependecy
for k, v in lib_list.iteritems():
for basename in fnmatch.filter(lib_list[k], "miss*"):
print k + " " + basename
elif (print_all):
#Print the list and reverse list
for k, v in lib_list.iteritems():
print k
print v
print "###################"
for k, v in reverse_lib_list.iteritems():
print k
print v
except KeyboardInterrupt:
print "Byee!"
#Close the file
if output_file != "" :
output_fd.close()
| gpl-2.0 | 2,598,428,152,028,018,000 | 34.723577 | 144 | 0.616826 | false |
nlehuby/OSM_snippets | navitia-to-OSM (bus routes)/route_to_html.py | 1 | 6357 | #-------------------------------------------------------------------------------
# Author: nlehuby
#
# Created: 28/01/2015
# Copyright: (c) nlehuby 2015
# Licence: MIT
#-------------------------------------------------------------------------------
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import datetime
import csv
def prepare_osm_routes():
"""
reconstruit les infos osm nécessaires
"""
source_csv = csv.reader(open("collecte/relations_routes.csv", "rb"))
result_list = []
for an_osm_route in source_csv :
if len(an_osm_route) < 6:
print ("il faut appeler Overpass pour récupérer les infos manquantes : TODO")
else :
result_list.append(an_osm_route)
#tri
result_int = []
result_other= []
for a_route in result_list:
try:
int(a_route[1])
result_int.append(a_route)
except ValueError :
result_other.append(a_route)
result_int.sort(key=lambda osm_route: int(osm_route[1]))
result_other.sort(key=lambda osm_route: osm_route[1])
result_list = result_int + result_other
with open("rendu/sources/osm_parcours.csv", "wb") as csv_file:
writer = csv.writer(csv_file, delimiter=',')
for line in result_list:
writer.writerow(line)
def prepare_navitia_routes():
"""
reconstruit les infos navitia nécessaires
"""
source_csv = csv.reader(open("rapprochement/osm_navitia.csv", "rb"))
result_list = []
for a_nav_route in source_csv :
if len(a_nav_route) < 5:
print ("il faut appeler navitia pour récupérer les infos manquantes : TODO")
else :
result_list.append(a_nav_route)
with open("rendu/sources/navitia_parcours.csv", "wb") as csv_file:
writer = csv.writer(csv_file, delimiter=',')
for line in result_list:
writer.writerow(line)
def to_html():
"""
crée la page d'index listant référençant les pages de chaque parcours OSM
"""
prepare_osm_routes()
prepare_navitia_routes()
osm_csv = csv.reader(open("rendu/sources/osm_parcours.csv", "rb"))
navitia_csv = list(csv.reader(open("rendu/sources/navitia_parcours.csv", "rb")))
autocomplete = {"parcours_osm":[]}
template_table = ''
for osm_route in osm_csv:
print osm_route[2]
#création de l'objet pour l'autocomplétion
parcours = {}
parcours['value'] = osm_route [0]
parcours['label'] = "[{}] {} > {}".format(osm_route[5], osm_route[1], osm_route[3])
rapp = [route for route in navitia_csv if route[0] == osm_route[0]] #rapprochement osm navitia
print (rapp)
if rapp != []:
print ('ok')
parcours['url'] = "bus_route.htm?osm={}&navitia={}".format(osm_route[0], rapp[0][1] )
#current_osm_route = {'id' : osm_route[0], 'name': osm_route[2], 'ref': osm_route[1], 'nb_stops': osm_route[4]}
#current_nav_route = {'id' : rapp[0][1], 'name' : rapp[0][2], 'nb_stops': rapp[0][3]}
#ajout dans l'index
liste_template = """
<tr>
<td> %%network%%
</td>
<td> %%route_code%%
</td>
<td>
<a href="bus_route.htm?osm=%%relation_id%%&navitia=%%navitia_id%%">%%relation_name%%</a>
</td>
<td>
%%OSM_nb_stops%%/%%navitia_nb_stops%%
</td>
<td>
<progress value="%%OSM_nb_stops%%" max="%%navitia_nb_stops%%">état de la carto de la route</progress>
</td>
<tr>
"""
liste_template = liste_template.replace("%%route_code%%", osm_route[1] )
liste_template = liste_template.replace("%%relation_id%%", osm_route[0] )
liste_template = liste_template.replace("%%relation_name%%", osm_route[2] )
liste_template = liste_template.replace("%%network%%", osm_route[5] )
liste_template = liste_template.replace("%%OSM_nb_stops%%", osm_route[4] )
liste_template = liste_template.replace("%%navitia_nb_stops%%", rapp[0][3] )
liste_template = liste_template.replace("%%navitia_id%%", rapp[0][1] )
else:
print ('ko')
parcours['url'] = "bus_route.htm?osm={}".format(osm_route[0])
liste_template = """
<tr>
<td> %%network%%
</td>
<td> %%route_code%%
</td>
<td>
<a href="bus_route.htm?osm=%%relation_id%%">%%relation_name%%</a>
</td>
<td colspan=2>
%%OSM_nb_stops%%
</td>
<tr>
"""
liste_template = liste_template.replace("%%route_code%%", osm_route[1] )
liste_template = liste_template.replace("%%relation_id%%", osm_route[0] )
liste_template = liste_template.replace("%%relation_name%%", osm_route[2] )
liste_template = liste_template.replace("%%network%%", osm_route[5] )
liste_template = liste_template.replace("%%OSM_nb_stops%%", osm_route[4] )
#persistance autocomplétion
autocomplete['parcours_osm'].append(parcours)
template_table += liste_template
#persistance de la page d'index
now = datetime.datetime.now()
mon_fichier = open("rendu/assets/template_liste.html", "r")
template = mon_fichier.read()
mon_fichier.close()
template = template.replace("%%tableau_des_routes%%", template_table )
template = template.replace("%%date_du_jour%%", now.strftime("%d/%m/%Y %H:%M") )
mon_fichier = open("rendu/index.html", "wb")
mon_fichier.write(template)
mon_fichier.close()
#persistance du fichier d'autocomplétion
json.dump(autocomplete, open('rendu/osm_parcours.json', "w"), indent=4)
if __name__ == '__main__':
to_html()
| mit | -1,326,381,942,221,232,000 | 34.436782 | 125 | 0.504732 | false |
spacy-io/spaCy | spacy/tests/test_misc.py | 1 | 11269 | import pytest
import os
import ctypes
from pathlib import Path
from spacy.about import __version__ as spacy_version
from spacy import util
from spacy import prefer_gpu, require_gpu, require_cpu
from spacy.ml._precomputable_affine import PrecomputableAffine
from spacy.ml._precomputable_affine import _backprop_precomputable_affine_padding
from spacy.util import dot_to_object, SimpleFrozenList
from thinc.api import Config, Optimizer, ConfigValidationError
from spacy.training.batchers import minibatch_by_words
from spacy.lang.en import English
from spacy.lang.nl import Dutch
from spacy.language import DEFAULT_CONFIG_PATH
from spacy.schemas import ConfigSchemaTraining
from thinc.api import get_current_ops, NumpyOps, CupyOps
from .util import get_random_doc
@pytest.fixture
def is_admin():
"""Determine if the tests are run as admin or not."""
try:
admin = os.getuid() == 0
except AttributeError:
admin = ctypes.windll.shell32.IsUserAnAdmin() != 0
return admin
@pytest.mark.parametrize("text", ["hello/world", "hello world"])
def test_util_ensure_path_succeeds(text):
path = util.ensure_path(text)
assert isinstance(path, Path)
@pytest.mark.parametrize(
"package,result", [("numpy", True), ("sfkodskfosdkfpsdpofkspdof", False)]
)
def test_util_is_package(package, result):
"""Test that an installed package via pip is recognised by util.is_package."""
assert util.is_package(package) is result
@pytest.mark.parametrize("package", ["thinc"])
def test_util_get_package_path(package):
"""Test that a Path object is returned for a package name."""
path = util.get_package_path(package)
assert isinstance(path, Path)
def test_PrecomputableAffine(nO=4, nI=5, nF=3, nP=2):
model = PrecomputableAffine(nO=nO, nI=nI, nF=nF, nP=nP).initialize()
assert model.get_param("W").shape == (nF, nO, nP, nI)
tensor = model.ops.alloc((10, nI))
Y, get_dX = model.begin_update(tensor)
assert Y.shape == (tensor.shape[0] + 1, nF, nO, nP)
dY = model.ops.alloc((15, nO, nP))
ids = model.ops.alloc((15, nF))
ids[1, 2] = -1
dY[1] = 1
assert not model.has_grad("pad")
d_pad = _backprop_precomputable_affine_padding(model, dY, ids)
assert d_pad[0, 2, 0, 0] == 1.0
ids.fill(0.0)
dY.fill(0.0)
dY[0] = 0
ids[1, 2] = 0
ids[1, 1] = -1
ids[1, 0] = -1
dY[1] = 1
ids[2, 0] = -1
dY[2] = 5
d_pad = _backprop_precomputable_affine_padding(model, dY, ids)
assert d_pad[0, 0, 0, 0] == 6
assert d_pad[0, 1, 0, 0] == 1
assert d_pad[0, 2, 0, 0] == 0
def test_prefer_gpu():
try:
import cupy # noqa: F401
prefer_gpu()
assert isinstance(get_current_ops(), CupyOps)
except ImportError:
assert not prefer_gpu()
def test_require_gpu():
try:
import cupy # noqa: F401
require_gpu()
assert isinstance(get_current_ops(), CupyOps)
except ImportError:
with pytest.raises(ValueError):
require_gpu()
def test_require_cpu():
require_cpu()
assert isinstance(get_current_ops(), NumpyOps)
try:
import cupy # noqa: F401
require_gpu()
assert isinstance(get_current_ops(), CupyOps)
except ImportError:
pass
require_cpu()
assert isinstance(get_current_ops(), NumpyOps)
def test_ascii_filenames():
"""Test that all filenames in the project are ASCII.
See: https://twitter.com/_inesmontani/status/1177941471632211968
"""
root = Path(__file__).parent.parent
for path in root.glob("**/*"):
assert all(ord(c) < 128 for c in path.name), path.name
def test_load_model_blank_shortcut():
"""Test that using a model name like "blank:en" works as a shortcut for
spacy.blank("en").
"""
nlp = util.load_model("blank:en")
assert nlp.lang == "en"
assert nlp.pipeline == []
with pytest.raises(ImportError):
util.load_model("blank:fjsfijsdof")
@pytest.mark.parametrize(
"version,constraint,compatible",
[
(spacy_version, spacy_version, True),
(spacy_version, f">={spacy_version}", True),
("3.0.0", "2.0.0", False),
("3.2.1", ">=2.0.0", True),
("2.2.10a1", ">=1.0.0,<2.1.1", False),
("3.0.0.dev3", ">=1.2.3,<4.5.6", True),
("n/a", ">=1.2.3,<4.5.6", None),
("1.2.3", "n/a", None),
("n/a", "n/a", None),
],
)
def test_is_compatible_version(version, constraint, compatible):
assert util.is_compatible_version(version, constraint) is compatible
@pytest.mark.parametrize(
"constraint,expected",
[
("3.0.0", False),
("==3.0.0", False),
(">=2.3.0", True),
(">2.0.0", True),
("<=2.0.0", True),
(">2.0.0,<3.0.0", False),
(">=2.0.0,<3.0.0", False),
("!=1.1,>=1.0,~=1.0", True),
("n/a", None),
],
)
def test_is_unconstrained_version(constraint, expected):
assert util.is_unconstrained_version(constraint) is expected
@pytest.mark.parametrize(
"a1,a2,b1,b2,is_match",
[
("3.0.0", "3.0", "3.0.1", "3.0", True),
("3.1.0", "3.1", "3.2.1", "3.2", False),
("xxx", None, "1.2.3.dev0", "1.2", False),
],
)
def test_minor_version(a1, a2, b1, b2, is_match):
assert util.get_minor_version(a1) == a2
assert util.get_minor_version(b1) == b2
assert util.is_minor_version_match(a1, b1) is is_match
assert util.is_minor_version_match(a2, b2) is is_match
@pytest.mark.parametrize(
"dot_notation,expected",
[
(
{"token.pos": True, "token._.xyz": True},
{"token": {"pos": True, "_": {"xyz": True}}},
),
(
{"training.batch_size": 128, "training.optimizer.learn_rate": 0.01},
{"training": {"batch_size": 128, "optimizer": {"learn_rate": 0.01}}},
),
],
)
def test_dot_to_dict(dot_notation, expected):
result = util.dot_to_dict(dot_notation)
assert result == expected
assert util.dict_to_dot(result) == dot_notation
def test_set_dot_to_object():
config = {"foo": {"bar": 1, "baz": {"x": "y"}}, "test": {"a": {"b": "c"}}}
with pytest.raises(KeyError):
util.set_dot_to_object(config, "foo.bar.baz", 100)
with pytest.raises(KeyError):
util.set_dot_to_object(config, "hello.world", 100)
with pytest.raises(KeyError):
util.set_dot_to_object(config, "test.a.b.c", 100)
util.set_dot_to_object(config, "foo.bar", 100)
assert config["foo"]["bar"] == 100
util.set_dot_to_object(config, "foo.baz.x", {"hello": "world"})
assert config["foo"]["baz"]["x"]["hello"] == "world"
assert config["test"]["a"]["b"] == "c"
util.set_dot_to_object(config, "foo", 123)
assert config["foo"] == 123
util.set_dot_to_object(config, "test", "hello")
assert dict(config) == {"foo": 123, "test": "hello"}
@pytest.mark.parametrize(
"doc_sizes, expected_batches",
[
([400, 400, 199], [3]),
([400, 400, 199, 3], [4]),
([400, 400, 199, 3, 200], [3, 2]),
([400, 400, 199, 3, 1], [5]),
([400, 400, 199, 3, 1, 1500], [5]), # 1500 will be discarded
([400, 400, 199, 3, 1, 200], [3, 3]),
([400, 400, 199, 3, 1, 999], [3, 3]),
([400, 400, 199, 3, 1, 999, 999], [3, 2, 1, 1]),
([1, 2, 999], [3]),
([1, 2, 999, 1], [4]),
([1, 200, 999, 1], [2, 2]),
([1, 999, 200, 1], [2, 2]),
],
)
def test_util_minibatch(doc_sizes, expected_batches):
docs = [get_random_doc(doc_size) for doc_size in doc_sizes]
tol = 0.2
batch_size = 1000
batches = list(
minibatch_by_words(docs, size=batch_size, tolerance=tol, discard_oversize=True)
)
assert [len(batch) for batch in batches] == expected_batches
max_size = batch_size + batch_size * tol
for batch in batches:
assert sum([len(doc) for doc in batch]) < max_size
@pytest.mark.parametrize(
"doc_sizes, expected_batches",
[
([400, 4000, 199], [1, 2]),
([400, 400, 199, 3000, 200], [1, 4]),
([400, 400, 199, 3, 1, 1500], [1, 5]),
([400, 400, 199, 3000, 2000, 200, 200], [1, 1, 3, 2]),
([1, 2, 9999], [1, 2]),
([2000, 1, 2000, 1, 1, 1, 2000], [1, 1, 1, 4]),
],
)
def test_util_minibatch_oversize(doc_sizes, expected_batches):
""" Test that oversized documents are returned in their own batch"""
docs = [get_random_doc(doc_size) for doc_size in doc_sizes]
tol = 0.2
batch_size = 1000
batches = list(
minibatch_by_words(docs, size=batch_size, tolerance=tol, discard_oversize=False)
)
assert [len(batch) for batch in batches] == expected_batches
def test_util_dot_section():
cfg_string = """
[nlp]
lang = "en"
pipeline = ["textcat"]
[components]
[components.textcat]
factory = "textcat"
[components.textcat.model]
@architectures = "spacy.TextCatBOW.v1"
exclusive_classes = true
ngram_size = 1
no_output_layer = false
"""
nlp_config = Config().from_str(cfg_string)
en_nlp = util.load_model_from_config(nlp_config, auto_fill=True)
default_config = Config().from_disk(DEFAULT_CONFIG_PATH)
default_config["nlp"]["lang"] = "nl"
nl_nlp = util.load_model_from_config(default_config, auto_fill=True)
# Test that creation went OK
assert isinstance(en_nlp, English)
assert isinstance(nl_nlp, Dutch)
assert nl_nlp.pipe_names == []
assert en_nlp.pipe_names == ["textcat"]
# not exclusive_classes
assert en_nlp.get_pipe("textcat").model.attrs["multi_label"] is False
# Test that default values got overwritten
assert en_nlp.config["nlp"]["pipeline"] == ["textcat"]
assert nl_nlp.config["nlp"]["pipeline"] == [] # default value []
# Test proper functioning of 'dot_to_object'
with pytest.raises(KeyError):
dot_to_object(en_nlp.config, "nlp.pipeline.tagger")
with pytest.raises(KeyError):
dot_to_object(en_nlp.config, "nlp.unknownattribute")
T = util.registry.resolve(nl_nlp.config["training"], schema=ConfigSchemaTraining)
assert isinstance(dot_to_object({"training": T}, "training.optimizer"), Optimizer)
def test_simple_frozen_list():
t = SimpleFrozenList(["foo", "bar"])
assert t == ["foo", "bar"]
assert t.index("bar") == 1 # okay method
with pytest.raises(NotImplementedError):
t.append("baz")
with pytest.raises(NotImplementedError):
t.sort()
with pytest.raises(NotImplementedError):
t.extend(["baz"])
with pytest.raises(NotImplementedError):
t.pop()
t = SimpleFrozenList(["foo", "bar"], error="Error!")
with pytest.raises(NotImplementedError):
t.append("baz")
def test_resolve_dot_names():
config = {
"training": {"optimizer": {"@optimizers": "Adam.v1"}},
"foo": {"bar": "training.optimizer", "baz": "training.xyz"},
}
result = util.resolve_dot_names(config, ["training.optimizer"])
assert isinstance(result[0], Optimizer)
with pytest.raises(ConfigValidationError) as e:
util.resolve_dot_names(config, ["training.xyz", "training.optimizer"])
errors = e.value.errors
assert len(errors) == 1
assert errors[0]["loc"] == ["training", "xyz"]
| mit | 5,117,938,492,506,105,000 | 31.289398 | 88 | 0.59961 | false |
NicoVarg99/daf-recipes | ckan/ckan/ckan/ckan/logic/validators.py | 1 | 27447 | # encoding: utf-8
import collections
import datetime
from itertools import count
import re
import mimetypes
import ckan.lib.navl.dictization_functions as df
import ckan.logic as logic
import ckan.lib.helpers as h
from ckan.model import (MAX_TAG_LENGTH, MIN_TAG_LENGTH,
PACKAGE_NAME_MIN_LENGTH, PACKAGE_NAME_MAX_LENGTH,
PACKAGE_VERSION_MAX_LENGTH,
VOCABULARY_NAME_MAX_LENGTH,
VOCABULARY_NAME_MIN_LENGTH)
import ckan.authz as authz
from ckan.common import _
Invalid = df.Invalid
StopOnError = df.StopOnError
Missing = df.Missing
missing = df.missing
def owner_org_validator(key, data, errors, context):
value = data.get(key)
if value is missing or value is None:
if not authz.check_config_permission('create_unowned_dataset'):
raise Invalid(_('An organization must be provided'))
data.pop(key, None)
raise df.StopOnError
model = context['model']
user = context['user']
user = model.User.get(user)
if value == '':
if not authz.check_config_permission('create_unowned_dataset'):
raise Invalid(_('An organization must be provided'))
return
group = model.Group.get(value)
if not group:
raise Invalid(_('Organization does not exist'))
group_id = group.id
if not(user.sysadmin or
authz.has_user_permission_for_group_or_org(
group_id, user.name, 'create_dataset')):
raise Invalid(_('You cannot add a dataset to this organization'))
data[key] = group_id
def package_id_not_changed(value, context):
package = context.get('package')
if package and value != package.id:
raise Invalid('Cannot change value of key from %s to %s. '
'This key is read-only' % (package.id, value))
return value
def int_validator(value, context):
'''
Return an integer for value, which may be a string in base 10 or
a numeric type (e.g. int, long, float, Decimal, Fraction). Return
None for None or empty/all-whitespace string values.
:raises: ckan.lib.navl.dictization_functions.Invalid for other
inputs or non-whole values
'''
if value is None:
return None
if hasattr(value, 'strip') and not value.strip():
return None
try:
whole, part = divmod(value, 1)
except TypeError:
try:
return int(value)
except ValueError:
pass
else:
if not part:
try:
return int(whole)
except TypeError:
pass # complex number: fail like int(complex) does
raise Invalid(_('Invalid integer'))
def natural_number_validator(value, context):
value = int_validator(value, context)
if value < 0:
raise Invalid(_('Must be a natural number'))
return value
def is_positive_integer(value, context):
value = int_validator(value, context)
if value < 1:
raise Invalid(_('Must be a postive integer'))
return value
def boolean_validator(value, context):
'''
Return a boolean for value.
Return value when value is a python bool type.
Return True for strings 'true', 'yes', 't', 'y', and '1'.
Return False in all other cases, including when value is an empty string or
None
'''
if value is missing or value is None:
return False
if isinstance(value, bool):
return value
if value.lower() in ['true', 'yes', 't', 'y', '1']:
return True
return False
def isodate(value, context):
if isinstance(value, datetime.datetime):
return value
if value == '':
return None
try:
date = h.date_str_to_datetime(value)
except (TypeError, ValueError), e:
raise Invalid(_('Date format incorrect'))
return date
def no_http(value, context):
model = context['model']
session = context['session']
if 'http:' in value:
raise Invalid(_('No links are allowed in the log_message.'))
return value
def package_id_exists(value, context):
model = context['model']
session = context['session']
result = session.query(model.Package).get(value)
if not result:
raise Invalid('%s: %s' % (_('Not found'), _('Dataset')))
return value
def package_id_does_not_exist(value, context):
model = context['model']
session = context['session']
result = session.query(model.Package).get(value)
if result:
raise Invalid(_('Dataset id already exists'))
return value
def package_name_exists(value, context):
model = context['model']
session = context['session']
result = session.query(model.Package).filter_by(name=value).first()
if not result:
raise Invalid(_('Not found') + ': %s' % value)
return value
def package_id_or_name_exists(package_id_or_name, context):
'''Return the given package_id_or_name if such a package exists.
:raises: ckan.lib.navl.dictization_functions.Invalid if there is no
package with the given id or name
'''
model = context['model']
session = context['session']
result = session.query(model.Package).get(package_id_or_name)
if result:
return package_id_or_name
result = session.query(model.Package).filter_by(
name=package_id_or_name).first()
if not result:
raise Invalid('%s: %s' % (_('Not found'), _('Dataset')))
return package_id_or_name
def resource_id_exists(value, context):
model = context['model']
session = context['session']
if not session.query(model.Resource).get(value):
raise Invalid('%s: %s' % (_('Not found'), _('Resource')))
return value
def user_id_exists(user_id, context):
'''Raises Invalid if the given user_id does not exist in the model given
in the context, otherwise returns the given user_id.
'''
model = context['model']
session = context['session']
result = session.query(model.User).get(user_id)
if not result:
raise Invalid('%s: %s' % (_('Not found'), _('User')))
return user_id
def user_id_or_name_exists(user_id_or_name, context):
'''Return the given user_id_or_name if such a user exists.
:raises: ckan.lib.navl.dictization_functions.Invalid if no user can be
found with the given id or user name
'''
model = context['model']
session = context['session']
result = session.query(model.User).get(user_id_or_name)
if result:
return user_id_or_name
result = session.query(model.User).filter_by(name=user_id_or_name).first()
if not result:
raise Invalid('%s: %s' % (_('Not found'), _('User')))
return user_id_or_name
def group_id_exists(group_id, context):
'''Raises Invalid if the given group_id does not exist in the model given
in the context, otherwise returns the given group_id.
'''
model = context['model']
session = context['session']
result = session.query(model.Group).get(group_id)
if not result:
raise Invalid('%s: %s' % (_('Not found'), _('Group')))
return group_id
def group_id_or_name_exists(reference, context):
'''
Raises Invalid if a group identified by the name or id cannot be found.
'''
model = context['model']
result = model.Group.get(reference)
if not result:
raise Invalid(_('That group name or ID does not exist.'))
return reference
def activity_type_exists(activity_type):
'''Raises Invalid if there is no registered activity renderer for the
given activity_type. Otherwise returns the given activity_type.
This just uses object_id_validators as a lookup.
very safe.
'''
if activity_type in object_id_validators:
return activity_type
else:
raise Invalid('%s: %s' % (_('Not found'), _('Activity type')))
# A dictionary mapping activity_type values from activity dicts to functions
# for validating the object_id values from those same activity dicts.
object_id_validators = {
'new package' : package_id_exists,
'changed package' : package_id_exists,
'deleted package' : package_id_exists,
'follow dataset' : package_id_exists,
'new user' : user_id_exists,
'changed user' : user_id_exists,
'follow user' : user_id_exists,
'new group' : group_id_exists,
'changed group' : group_id_exists,
'deleted group' : group_id_exists,
'new organization' : group_id_exists,
'changed organization' : group_id_exists,
'deleted organization' : group_id_exists,
'follow group' : group_id_exists,
}
def object_id_validator(key, activity_dict, errors, context):
'''Validate the 'object_id' value of an activity_dict.
Uses the object_id_validators dict (above) to find and call an 'object_id'
validator function for the given activity_dict's 'activity_type' value.
Raises Invalid if the model given in context contains no object of the
correct type (according to the 'activity_type' value of the activity_dict)
with the given ID.
Raises Invalid if there is no object_id_validator for the activity_dict's
'activity_type' value.
'''
activity_type = activity_dict[('activity_type',)]
if object_id_validators.has_key(activity_type):
object_id = activity_dict[('object_id',)]
return object_id_validators[activity_type](object_id, context)
else:
raise Invalid('There is no object_id validator for '
'activity type "%s"' % activity_type)
name_match = re.compile('[a-z0-9_\-]*$')
def name_validator(value, context):
'''Return the given value if it's a valid name, otherwise raise Invalid.
If it's a valid name, the given value will be returned unmodified.
This function applies general validation rules for names of packages,
groups, users, etc.
Most schemas also have their own custom name validator function to apply
custom validation rules after this function, for example a
``package_name_validator()`` to check that no package with the given name
already exists.
:raises ckan.lib.navl.dictization_functions.Invalid: if ``value`` is not
a valid name
'''
if not isinstance(value, basestring):
raise Invalid(_('Names must be strings'))
# check basic textual rules
if value in ['new', 'edit', 'search']:
raise Invalid(_('That name cannot be used'))
if len(value) < 2:
raise Invalid(_('Must be at least %s characters long') % 2)
if len(value) > PACKAGE_NAME_MAX_LENGTH:
raise Invalid(_('Name must be a maximum of %i characters long') % \
PACKAGE_NAME_MAX_LENGTH)
if not name_match.match(value):
raise Invalid(_('Must be purely lowercase alphanumeric '
'(ascii) characters and these symbols: -_'))
return value
def package_name_validator(key, data, errors, context):
model = context['model']
session = context['session']
package = context.get('package')
query = session.query(model.Package.name).filter_by(name=data[key])
if package:
package_id = package.id
else:
package_id = data.get(key[:-1] + ('id',))
if package_id and package_id is not missing:
query = query.filter(model.Package.id <> package_id)
result = query.first()
if result:
errors[key].append(_('That URL is already in use.'))
value = data[key]
if len(value) < PACKAGE_NAME_MIN_LENGTH:
raise Invalid(
_('Name "%s" length is less than minimum %s') % (value, PACKAGE_NAME_MIN_LENGTH)
)
if len(value) > PACKAGE_NAME_MAX_LENGTH:
raise Invalid(
_('Name "%s" length is more than maximum %s') % (value, PACKAGE_NAME_MAX_LENGTH)
)
def package_version_validator(value, context):
if len(value) > PACKAGE_VERSION_MAX_LENGTH:
raise Invalid(_('Version must be a maximum of %i characters long') % \
PACKAGE_VERSION_MAX_LENGTH)
return value
def duplicate_extras_key(key, data, errors, context):
unflattened = df.unflatten(data)
extras = unflattened.get('extras', [])
extras_keys = []
for extra in extras:
if not extra.get('deleted'):
extras_keys.append(extra['key'])
for extra_key in set(extras_keys):
extras_keys.remove(extra_key)
if extras_keys:
key_ = ('extras_validation',)
assert key_ not in errors
errors[key_] = [_('Duplicate key "%s"') % extras_keys[0]]
def group_name_validator(key, data, errors, context):
model = context['model']
session = context['session']
group = context.get('group')
query = session.query(model.Group.name).filter_by(name=data[key])
if group:
group_id = group.id
else:
group_id = data.get(key[:-1] + ('id',))
if group_id and group_id is not missing:
query = query.filter(model.Group.id <> group_id)
result = query.first()
if result:
errors[key].append(_('Group name already exists in database'))
def tag_length_validator(value, context):
if len(value) < MIN_TAG_LENGTH:
raise Invalid(
_('Tag "%s" length is less than minimum %s') % (value, MIN_TAG_LENGTH)
)
if len(value) > MAX_TAG_LENGTH:
raise Invalid(
_('Tag "%s" length is more than maximum %i') % (value, MAX_TAG_LENGTH)
)
return value
def tag_name_validator(value, context):
tagname_match = re.compile('[\w \-.]*$', re.UNICODE)
if not tagname_match.match(value):
raise Invalid(_('Tag "%s" must be alphanumeric '
'characters or symbols: -_.') % (value))
return value
def tag_not_uppercase(value, context):
tagname_uppercase = re.compile('[A-Z]')
if tagname_uppercase.search(value):
raise Invalid(_('Tag "%s" must not be uppercase' % (value)))
return value
def tag_string_convert(key, data, errors, context):
'''Takes a list of tags that is a comma-separated string (in data[key])
and parses tag names. These are added to the data dict, enumerated. They
are also validated.'''
if isinstance(data[key], basestring):
tags = [tag.strip() \
for tag in data[key].split(',') \
if tag.strip()]
else:
tags = data[key]
current_index = max( [int(k[1]) for k in data.keys() if len(k) == 3 and k[0] == 'tags'] + [-1] )
for num, tag in zip(count(current_index+1), tags):
data[('tags', num, 'name')] = tag
for tag in tags:
tag_length_validator(tag, context)
tag_name_validator(tag, context)
def ignore_not_admin(key, data, errors, context):
# Deprecated in favour of ignore_not_package_admin
return ignore_not_package_admin(key, data, errors, context)
def ignore_not_package_admin(key, data, errors, context):
'''Ignore if the user is not allowed to administer the package specified.'''
model = context['model']
user = context.get('user')
if 'ignore_auth' in context:
return
if user and authz.is_sysadmin(user):
return
authorized = False
pkg = context.get('package')
if pkg:
try:
logic.check_access('package_change_state',context)
authorized = True
except logic.NotAuthorized:
authorized = False
if (user and pkg and authorized):
return
# allow_state_change in the context will allow the state to be changed
# FIXME is this the best way to cjeck for state only?
if key == ('state',) and context.get('allow_state_change'):
return
data.pop(key)
def ignore_not_sysadmin(key, data, errors, context):
'''Ignore the field if user not sysadmin or ignore_auth in context.'''
user = context.get('user')
ignore_auth = context.get('ignore_auth')
if ignore_auth or (user and authz.is_sysadmin(user)):
return
data.pop(key)
def ignore_not_group_admin(key, data, errors, context):
'''Ignore if the user is not allowed to administer for the group specified.'''
model = context['model']
user = context.get('user')
if user and authz.is_sysadmin(user):
return
authorized = False
group = context.get('group')
if group:
try:
logic.check_access('group_change_state',context)
authorized = True
except logic.NotAuthorized:
authorized = False
if (user and group and authorized):
return
data.pop(key)
def user_name_validator(key, data, errors, context):
'''Validate a new user name.
Append an error message to ``errors[key]`` if a user named ``data[key]``
already exists. Otherwise, do nothing.
:raises ckan.lib.navl.dictization_functions.Invalid: if ``data[key]`` is
not a string
:rtype: None
'''
model = context['model']
new_user_name = data[key]
if not isinstance(new_user_name, basestring):
raise Invalid(_('User names must be strings'))
user = model.User.get(new_user_name)
if user is not None:
# A user with new_user_name already exists in the database.
user_obj_from_context = context.get('user_obj')
if user_obj_from_context and user_obj_from_context.id == user.id:
# If there's a user_obj in context with the same id as the user
# found in the db, then we must be doing a user_update and not
# updating the user name, so don't return an error.
return
else:
# Otherwise return an error: there's already another user with that
# name, so you can create a new user with that name or update an
# existing user's name to that name.
errors[key].append(_('That login name is not available.'))
def user_both_passwords_entered(key, data, errors, context):
password1 = data.get(('password1',),None)
password2 = data.get(('password2',),None)
if password1 is None or password1 == '' or \
password2 is None or password2 == '':
errors[('password',)].append(_('Please enter both passwords'))
def user_password_validator(key, data, errors, context):
value = data[key]
if isinstance(value, Missing):
pass
elif not isinstance(value, basestring):
errors[('password',)].append(_('Passwords must be strings'))
elif value == '':
pass
elif len(value) < 4:
errors[('password',)].append(_('Your password must be 4 characters or longer'))
def user_passwords_match(key, data, errors, context):
password1 = data.get(('password1',),None)
password2 = data.get(('password2',),None)
if not password1 == password2:
errors[key].append(_('The passwords you entered do not match'))
else:
#Set correct password
data[('password',)] = password1
def user_password_not_empty(key, data, errors, context):
'''Only check if password is present if the user is created via action API.
If not, user_both_passwords_entered will handle the validation'''
# sysadmin may provide password_hash directly for importing users
if (data.get(('password_hash',), missing) is not missing and
authz.is_sysadmin(context.get('user'))):
return
if not ('password1',) in data and not ('password2',) in data:
password = data.get(('password',),None)
if not password:
errors[key].append(_('Missing value'))
def user_about_validator(value,context):
if 'http://' in value or 'https://' in value:
raise Invalid(_('Edit not allowed as it looks like spam. Please avoid links in your description.'))
return value
def vocabulary_name_validator(name, context):
model = context['model']
session = context['session']
if len(name) < VOCABULARY_NAME_MIN_LENGTH:
raise Invalid(_('Name must be at least %s characters long') %
VOCABULARY_NAME_MIN_LENGTH)
if len(name) > VOCABULARY_NAME_MAX_LENGTH:
raise Invalid(_('Name must be a maximum of %i characters long') %
VOCABULARY_NAME_MAX_LENGTH)
query = session.query(model.Vocabulary.name).filter_by(name=name)
result = query.first()
if result:
raise Invalid(_('That vocabulary name is already in use.'))
return name
def vocabulary_id_not_changed(value, context):
vocabulary = context.get('vocabulary')
if vocabulary and value != vocabulary.id:
raise Invalid(_('Cannot change value of key from %s to %s. '
'This key is read-only') % (vocabulary.id, value))
return value
def vocabulary_id_exists(value, context):
model = context['model']
session = context['session']
result = session.query(model.Vocabulary).get(value)
if not result:
raise Invalid(_('Tag vocabulary was not found.'))
return value
def tag_in_vocabulary_validator(value, context):
model = context['model']
session = context['session']
vocabulary = context.get('vocabulary')
if vocabulary:
query = session.query(model.Tag)\
.filter(model.Tag.vocabulary_id==vocabulary.id)\
.filter(model.Tag.name==value)\
.count()
if not query:
raise Invalid(_('Tag %s does not belong to vocabulary %s') % (value, vocabulary.name))
return value
def tag_not_in_vocabulary(key, tag_dict, errors, context):
tag_name = tag_dict[('name',)]
if not tag_name:
raise Invalid(_('No tag name'))
if tag_dict.has_key(('vocabulary_id',)):
vocabulary_id = tag_dict[('vocabulary_id',)]
else:
vocabulary_id = None
model = context['model']
session = context['session']
query = session.query(model.Tag)
query = query.filter(model.Tag.vocabulary_id==vocabulary_id)
query = query.filter(model.Tag.name==tag_name)
count = query.count()
if count > 0:
raise Invalid(_('Tag %s already belongs to vocabulary %s') %
(tag_name, vocabulary_id))
else:
return
def url_validator(key, data, errors, context):
''' Checks that the provided value (if it is present) is a valid URL '''
import urlparse
import string
model = context['model']
session = context['session']
url = data.get(key, None)
if not url:
return
pieces = urlparse.urlparse(url)
if all([pieces.scheme, pieces.netloc]) and \
set(pieces.netloc) <= set(string.letters + string.digits + '-.') and \
pieces.scheme in ['http', 'https']:
return
errors[key].append(_('Please provide a valid URL'))
def user_name_exists(user_name, context):
model = context['model']
session = context['session']
result = session.query(model.User).filter_by(name=user_name).first()
if not result:
raise Invalid('%s: %s' % (_('Not found'), _('User')))
return result.name
def role_exists(role, context):
if role not in authz.ROLE_PERMISSIONS:
raise Invalid(_('role does not exist.'))
return role
def datasets_with_no_organization_cannot_be_private(key, data, errors,
context):
dataset_id = data.get(('id',))
owner_org = data.get(('owner_org',))
private = data[key] is True
check_passed = True
if not dataset_id and private and not owner_org:
# When creating a dataset, enforce it directly
check_passed = False
elif dataset_id and private and not owner_org:
# Check if the dataset actually has an owner_org, even if not provided
try:
dataset_dict = logic.get_action('package_show')({},
{'id': dataset_id})
if not dataset_dict.get('owner_org'):
check_passed = False
except logic.NotFound:
check_passed = False
if not check_passed:
errors[key].append(
_("Datasets with no organization can't be private."))
def list_of_strings(key, data, errors, context):
value = data.get(key)
if not isinstance(value, list):
raise Invalid(_('Not a list'))
for x in value:
if not isinstance(x, basestring):
raise Invalid('%s: %s' % (_('Not a string'), x))
def if_empty_guess_format(key, data, errors, context):
value = data[key]
resource_id = data.get(key[:-1] + ('id',))
# if resource_id then an update
if (not value or value is Missing) and not resource_id:
url = data.get(key[:-1] + ('url',), '')
mimetype, encoding = mimetypes.guess_type(url)
if mimetype:
data[key] = mimetype
def clean_format(format):
return h.unified_resource_format(format)
def no_loops_in_hierarchy(key, data, errors, context):
'''Checks that the parent groups specified in the data would not cause
a loop in the group hierarchy, and therefore cause the recursion up/down
the hierarchy to get into an infinite loop.
'''
if not 'id' in data:
# Must be a new group - has no children, so no chance of loops
return
group = context['model'].Group.get(data['id'])
allowable_parents = group.\
groups_allowed_to_be_its_parent(type=group.type)
for parent in data['groups']:
parent_name = parent['name']
# a blank name signifies top level, which is always allowed
if parent_name and context['model'].Group.get(parent_name) \
not in allowable_parents:
raise Invalid(_('This parent would create a loop in the '
'hierarchy'))
def filter_fields_and_values_should_have_same_length(key, data, errors, context):
convert_to_list_if_string = logic.converters.convert_to_list_if_string
fields = convert_to_list_if_string(data.get(('filter_fields',), []))
values = convert_to_list_if_string(data.get(('filter_values',), []))
if len(fields) != len(values):
msg = _('"filter_fields" and "filter_values" should have the same length')
errors[('filter_fields',)].append(msg)
errors[('filter_values',)].append(msg)
def filter_fields_and_values_exist_and_are_valid(key, data, errors, context):
convert_to_list_if_string = logic.converters.convert_to_list_if_string
fields = convert_to_list_if_string(data.get(('filter_fields',)))
values = convert_to_list_if_string(data.get(('filter_values',)))
if not fields:
errors[('filter_fields',)].append(_('"filter_fields" is required when '
'"filter_values" is filled'))
if not values:
errors[('filter_values',)].append(_('"filter_values" is required when '
'"filter_fields" is filled'))
filters = collections.defaultdict(list)
for field, value in zip(fields, values):
filters[field].append(value)
data[('filters',)] = dict(filters)
def extra_key_not_in_root_schema(key, data, errors, context):
for schema_key in context.get('schema_keys', []):
if schema_key == data[key]:
raise Invalid(_('There is a schema field with the same name'))
def empty_if_not_sysadmin(key, data, errors, context):
'''Only sysadmins may pass this value'''
from ckan.lib.navl.validators import empty
user = context.get('user')
ignore_auth = context.get('ignore_auth')
if ignore_auth or (user and authz.is_sysadmin(user)):
return
empty(key, data, errors, context)
| gpl-3.0 | 4,499,609,140,008,467,000 | 31.989183 | 107 | 0.626407 | false |
endlessm/chromium-browser | third_party/chromite/lib/replication_lib.py | 1 | 5370 | # -*- coding: utf-8 -*-
# Copyright 2019 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""An implementation of the ReplicationConfig proto interface."""
from __future__ import print_function
import json
import os
import shutil
import sys
from chromite.api.gen.config import replication_config_pb2
from chromite.lib import constants
from chromite.lib import cros_logging as logging
from chromite.lib import osutils
from chromite.utils import field_mask_util
assert sys.version_info >= (3, 6), 'This module requires Python 3.6+'
def _ValidateFileReplicationRule(rule):
"""Raises an error if a FileReplicationRule is invalid.
For example, checks that if REPLICATION_TYPE_FILTER, destination_fields
are specified.
Args:
rule: (FileReplicationRule) The rule to validate.
"""
if rule.file_type == replication_config_pb2.FILE_TYPE_JSON:
if rule.replication_type != replication_config_pb2.REPLICATION_TYPE_FILTER:
raise ValueError(
'Rule for JSON source %s must use REPLICATION_TYPE_FILTER.' %
rule.source_path)
elif rule.file_type == replication_config_pb2.FILE_TYPE_OTHER:
if rule.replication_type != replication_config_pb2.REPLICATION_TYPE_COPY:
raise ValueError('Rule for source %s must use REPLICATION_TYPE_COPY.' %
rule.source_path)
else:
raise NotImplementedError('Replicate not implemented for file type %s' %
rule.file_type)
if rule.replication_type == replication_config_pb2.REPLICATION_TYPE_COPY:
if rule.destination_fields.paths:
raise ValueError(
'Rule with REPLICATION_TYPE_COPY cannot use destination_fields.')
elif rule.replication_type == replication_config_pb2.REPLICATION_TYPE_FILTER:
if not rule.destination_fields.paths:
raise ValueError(
'Rule with REPLICATION_TYPE_FILTER must use destination_fields.')
else:
raise NotImplementedError(
'Replicate not implemented for replication type %s' %
rule.replication_type)
if os.path.isabs(rule.source_path) or os.path.isabs(rule.destination_path):
raise ValueError(
'Only paths relative to the source root are allowed. In rule: %s' %
rule)
def _ApplyStringReplacementRules(destination_path, rules):
"""Read the file at destination path, apply rules, and write a new file.
Args:
destination_path: (str) Path to the destination file to read. The new file
will also be written at this path.
rules: (list[StringReplacementRule]) Rules to apply. Must not be empty.
"""
assert rules
with open(destination_path, 'r') as f:
dst_data = f.read()
for string_replacement_rule in rules:
dst_data = dst_data.replace(string_replacement_rule.before,
string_replacement_rule.after)
with open(destination_path, 'w') as f:
f.write(dst_data)
def Replicate(replication_config):
"""Run the replication described in replication_config.
Args:
replication_config: (ReplicationConfig) Describes the replication to run.
"""
# Validate all rules before any of them are run, to decrease chance of ending
# with a partial replication.
for rule in replication_config.file_replication_rules:
_ValidateFileReplicationRule(rule)
for rule in replication_config.file_replication_rules:
logging.info('Processing FileReplicationRule: %s', rule)
src = os.path.join(constants.SOURCE_ROOT, rule.source_path)
dst = os.path.join(constants.SOURCE_ROOT, rule.destination_path)
osutils.SafeMakedirs(os.path.dirname(dst))
if rule.file_type == replication_config_pb2.FILE_TYPE_JSON:
assert (rule.replication_type ==
replication_config_pb2.REPLICATION_TYPE_FILTER)
assert rule.destination_fields.paths
with open(src, 'r') as f:
source_json = json.load(f)
try:
source_device_configs = source_json['chromeos']['configs']
except KeyError:
raise NotImplementedError(
('Currently only ChromeOS Configs are supported (expected file %s '
'to have a list at "$.chromeos.configs")') % src)
destination_device_configs = []
for source_device_config in source_device_configs:
destination_device_configs.append(
field_mask_util.CreateFilteredDict(rule.destination_fields,
source_device_config))
destination_json = {'chromeos': {'configs': destination_device_configs}}
logging.info('Writing filtered JSON source to %s', dst)
with open(dst, 'w') as f:
# Use the print function, so the file ends in a newline.
print(
json.dumps(
destination_json,
sort_keys=True,
indent=2,
separators=(',', ': ')),
file=f)
else:
assert rule.file_type == replication_config_pb2.FILE_TYPE_OTHER
assert (
rule.replication_type == replication_config_pb2.REPLICATION_TYPE_COPY)
assert not rule.destination_fields.paths
logging.info('Copying full file from %s to %s', src, dst)
shutil.copy2(src, dst)
if rule.string_replacement_rules:
_ApplyStringReplacementRules(dst, rule.string_replacement_rules)
| bsd-3-clause | 684,216,379,399,522,800 | 34.8 | 80 | 0.679143 | false |
narunask/silly_chatbot | rtmbot/app/plugins/chatbot.py | 1 | 1552 | #!/usr/bin/env python3
# coding: utf-8
from rtmbot.core import Plugin
from chatterbot import ChatBot
from plugins.console import Command
# Sessions
SESS = {}
# Init ChatBots
BOTS = ['HAL 9000', 'Wall-E', 'Agent Smith']
TRAINER='chatterbot.trainers.ChatterBotCorpusTrainer'
BOT_DICT = {B: ChatBot(B, trainer=TRAINER) for B in BOTS}
# Train based on the english corpus
#for B in BOT_DICT.values():
# B.train("chatterbot.corpus.english")
class Reply(Plugin):
def process_message(self, data):
print(data)
channel = data['channel']
if not channel.startswith("D"):
return
user = data['user']
team = data['team']
# User ID
uid = '_'.join([user,team])
bot = SESS.get(uid, None)
cmd = Command(bot=bot, bot_dict=BOT_DICT)
question = data['text'].strip()
if bot:
print(bot.name, 'is processing question:', question)
else:
print('Processing question:', question)
bot_response = cmd.run(q=question)
if cmd.error:
self.outputs.append([channel, '<BOT> {answer}'.format(answer=cmd.error)])
elif cmd.bot:
bot = cmd.bot
SESS[uid] = bot
answ_dict = dict(bot=bot.name, answer=bot_response)
self.outputs.append([channel, '<BOT@{bot}> {answer}'.format(**answ_dict)])
elif not cmd.bot:
if uid in SESS:
del SESS[uid]
self.outputs.append([channel, '<BOT> {answer}'.format(answer=bot_response)])
| mit | 2,409,432,908,618,128,000 | 27.740741 | 88 | 0.586985 | false |
jds2001/ocp-checkbox | plugins/jobs_info.py | 1 | 13008 | #
# This file is part of Checkbox.
#
# Copyright 2010 Canonical Ltd.
#
# Checkbox is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Checkbox is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Checkbox. If not, see <http://www.gnu.org/licenses/>.
#
import os
import re
import sys
import difflib
import gettext
import logging
from collections import defaultdict
from gettext import gettext as _
from checkbox.lib.resolver import Resolver
from checkbox.arguments import coerce_arguments
from checkbox.plugin import Plugin
from checkbox.properties import (
Float,
Int,
List,
Map,
Path,
String,
)
job_schema = Map({
"plugin": String(),
"name": String(),
"type": String(required=False),
"status": String(required=False),
"suite": String(required=False),
"description": String(required=False),
"purpose": String(required=False),
"steps": String(required=False),
"info": String(required=False),
"verification": String(required=False),
"command": String(required=False),
"depends": List(String(), required=False),
"duration": Float(required=False),
"environ": List(String(), required=False),
"requires": List(String(), separator=r"\n", required=False),
"resources": List(String(), required=False),
"estimated_duration": Float(required=False),
"timeout": Int(required=False),
"user": String(required=False),
"data": String(required=False)})
class JobsInfo(Plugin):
# Domain for internationalization
domain = String(default="checkbox")
# Space separated list of directories where job files are stored.
directories = List(Path(),
default_factory=lambda: "%(checkbox_share)s/jobs")
# List of jobs to blacklist
blacklist = List(String(), default_factory=lambda: "")
# Path to blacklist file
blacklist_file = Path(required=False)
# List of jobs to whitelist
whitelist = List(String(), default_factory=lambda: "")
# Path to whitelist file
whitelist_file = Path(required=False)
def register(self, manager):
super(JobsInfo, self).register(manager)
self.whitelist_patterns = self.get_patterns(
self.whitelist, self.whitelist_file)
self.blacklist_patterns = self.get_patterns(
self.blacklist, self.blacklist_file)
self.selected_jobs = defaultdict(list)
self._missing_dependencies_report = ""
self._manager.reactor.call_on("prompt-begin", self.prompt_begin)
self._manager.reactor.call_on("gather", self.gather)
if logging.getLogger().getEffectiveLevel() <= logging.DEBUG:
self._manager.reactor.call_on(
"prompt-gather", self.post_gather, 90)
self._manager.reactor.call_on("report-job", self.report_job, -100)
def prompt_begin(self, interface):
"""
Capture interface object to use it later
to display errors
"""
self.interface = interface
self.unused_patterns = (
self.whitelist_patterns + self.blacklist_patterns)
def check_ordered_messages(self, messages):
"""Return whether the list of messages are ordered or not.
Also populates a _missing_dependencies_report string variable
with a report of any jobs that are required but not present
in the whitelist."""
names_so_far = set()
all_names = set([message['name'] for message in messages])
messages_ordered = True
missing_dependencies = defaultdict(set)
for message in messages:
name = message["name"]
for dependency in message.get("depends", []):
if dependency not in names_so_far:
messages_ordered = False
#Two separate checks :) we *could* save a negligible
#bit of time by putting this inside the previous "if"
#but we're not in *that* big a hurry.
if dependency not in all_names:
missing_dependencies[name].add(dependency)
names_so_far.add(name)
#Now assemble the list of missing deps into a nice report
jobs_and_missing_deps = ["{} required by {}".format(job_name,
", ".join(missing_dependencies[job_name]))
for job_name in missing_dependencies]
self._missing_dependencies_report = "\n".join(jobs_and_missing_deps)
return messages_ordered
def get_patterns(self, strings, filename=None):
"""Return the list of strings as compiled regular expressions."""
if filename:
try:
file = open(filename)
except IOError as e:
error_message = (_("Failed to open file '%s': %s")
% (filename, e.strerror))
logging.critical(error_message)
sys.stderr.write("%s\n" % error_message)
sys.exit(os.EX_NOINPUT)
else:
strings.extend([l.strip() for l in file.readlines()])
return [re.compile(r"^%s$" % s) for s in strings
if s and not s.startswith("#")]
def get_unique_messages(self, messages):
"""Return the list of messages without any duplicates, giving
precedence to messages that are the longest.
"""
unique_messages = []
unique_indexes = {}
for message in messages:
name = message["name"]
index = unique_indexes.get(name)
if index is None:
unique_indexes[name] = len(unique_messages)
unique_messages.append(message)
elif len(message) > len(unique_messages[index]):
unique_messages[index] = message
return unique_messages
def gather(self):
# Register temporary handler for report-message events
messages = []
def report_message(message):
if self.whitelist_patterns:
name = message["name"]
names = [name for p in self.whitelist_patterns
if p.match(name)]
if not names:
return
messages.append(message)
# Set domain and message event handler
old_domain = gettext.textdomain()
gettext.textdomain(self.domain)
event_id = self._manager.reactor.call_on(
"report-message", report_message, 100)
for directory in self.directories:
self._manager.reactor.fire("message-directory", directory)
for message in messages:
self._manager.reactor.fire("report-job", message)
# Unset domain and event handler
self._manager.reactor.cancel_call(event_id)
gettext.textdomain(old_domain)
# Get unique messages from the now complete list
messages = self.get_unique_messages(messages)
# Apply whitelist ordering
if self.whitelist_patterns:
def key_function(obj):
name = obj["name"]
for pattern in self.whitelist_patterns:
if pattern.match(name):
return self.whitelist_patterns.index(pattern)
messages = sorted(messages, key=key_function)
if not self.check_ordered_messages(messages):
#One of two things may have happened if we enter this code path.
#Either the jobs are not in topological ordering,
#Or they are in topological ordering but a dependency is
#missing.
old_message_names = [
message["name"] + "\n" for message in messages]
resolver = Resolver(key_func=lambda m: m["name"])
for message in messages:
resolver.add(
message, *message.get("depends", []))
messages = resolver.get_dependents()
if (self.whitelist_patterns and
logging.getLogger().getEffectiveLevel() <= logging.DEBUG):
new_message_names = [
message["name"] + "\n" for message in messages]
#This will contain a report of out-of-order jobs.
detailed_text = "".join(
difflib.unified_diff(
old_message_names,
new_message_names,
"old whitelist",
"new whitelist"))
#First, we report missing dependencies, if any.
if self._missing_dependencies_report:
primary = _("Dependencies are missing so some jobs "
"will not run.")
secondary = _("To fix this, close checkbox and add "
"the missing dependencies to the "
"whitelist.")
self._manager.reactor.fire("prompt-warning",
self.interface,
primary,
secondary,
self._missing_dependencies_report)
#If detailed_text is empty, it means the problem
#was missing dependencies, which we already reported.
#Otherwise, we also need to report reordered jobs here.
if detailed_text:
primary = _("Whitelist not topologically ordered")
secondary = _("Jobs will be reordered to fix broken "
"dependencies")
self._manager.reactor.fire("prompt-warning",
self.interface,
primary,
secondary,
detailed_text)
self._manager.reactor.fire("report-jobs", messages)
def post_gather(self, interface):
"""
Verify that all patterns were used
"""
if logging.getLogger().getEffectiveLevel() > logging.DEBUG:
return
orphan_test_cases = []
for name, jobs in self.selected_jobs.items():
is_test = any(job.get('type') == 'test' for job in jobs)
has_suite = any(job.get('suite') for job in jobs)
if is_test and not has_suite:
orphan_test_cases.append(name)
if orphan_test_cases:
detailed_error = \
('Test cases not included in any test suite:\n'
'{0}\n\n'
'This might cause problems '
'when uploading test cases results.\n'
'Please make sure that the patterns you used are up-to-date\n'
.format('\n'.join(['- {0}'.format(tc)
for tc in orphan_test_cases])))
self._manager.reactor.fire('prompt-warning', self.interface,
'Orphan test cases detected',
"Some test cases aren't included "
'in any test suite',
detailed_error)
if self.unused_patterns:
detailed_error = \
('Unused patterns:\n'
'{0}\n\n'
"Please make sure that the patterns you used are up-to-date\n"
.format('\n'.join(['- {0}'.format(p.pattern[1:-1])
for p in self.unused_patterns])))
self._manager.reactor.fire('prompt-warning', self.interface,
'Unused patterns',
'Please make sure that the patterns '
'you used are up-to-date',
detailed_error)
@coerce_arguments(job=job_schema)
def report_job(self, job):
name = job["name"]
patterns = self.whitelist_patterns or self.blacklist_patterns
if patterns:
match = next((p for p in patterns if p.match(name)), None)
if match:
# Keep track of which patterns didn't match any job
if match in self.unused_patterns:
self.unused_patterns.remove(match)
self.selected_jobs[name].append(job)
else:
# Stop if job not in whitelist or in blacklist
self._manager.reactor.stop()
factory = JobsInfo
| gpl-3.0 | 8,009,267,436,187,903,000 | 38.180723 | 79 | 0.555889 | false |
oniwan/GCI | sele.py | 1 | 1328 | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as ec
import time
driver = webdriver.PhantomJS()
url = "https://lzone.daiwa.co.jp/lzone/common/authorize"
username='[email protected]'
password = 'gcigci'
driver.get(url)
driver.save_screenshot('search_result2.png')
print driver.current_url
driver.find_element_by_css_selector('input[name="memberId"]').send_keys(username)
driver.find_element_by_css_selector('input[name="passWord"]').send_keys(password)
driver.save_screenshot('search_result3.png')
#driver.find_element_by_class_name('button-login').send_keys(Keys.ENTER)
driver.find_element_by_id('image-btn_ok').send_keys(Keys.ENTER)
print driver.current_url
driver.save_screenshot('search_result410.png')
'''
wait = WebDriverWait(driver, 10)
driver.get(url)
print driver.current_url
driver.save_screenshot('search_result1.png')
driver.find_element_by_css_selector('input[name="account"]').send_keys(username)
driver.find_element_by_css_selector('input[name="password"]').send_keys(password)
driver.find_element_by_class_name('button-login').send_keys(Keys.ENTER)
time.sleep(10)
print driver.current_url
driver.save_screenshot('search_result2.png')
print "end"
'''
| mit | 2,879,929,457,836,766,700 | 29.883721 | 81 | 0.779367 | false |
ujiro99/auto_logger | logger/merge.py | 1 | 2373 | import fileinput
import os
import re
from datetime import datetime as dt
from logger import log
class Merge:
"""
Merge multiple log files.
"""
class Parsed:
"""
Parsed datetime and log line.
"""
def __init__(self, time=None, line=None):
self.time = time # type: datetime
self.line = line # type: bytes
TIME_FMT = '%H:%M:%S.%f' # format of time stamps
TIME_FMT_LEN = 12 # time stamps length
PATTERN = re.compile(r'(.+?)/.*') # pattern to extract timestamp
FILE_SUFFIX = '.merged.log' # Merged file's name suffix
FILE_ENCODE = 'utf8' # log file's encode
def exec(self, dir_path):
"""
Exec merge files and sort by timestamp.
:param str dir_path: Directory path which contains log files.
:return: Merge result.
:rtype bool
"""
dir_path = dir_path.rstrip('/')
log.i("- start merge: [%s]" % dir_path)
lines = [self.Parsed(dt.min, b'')]
files = list(self.__files(dir_path))
if len(files) == 0:
return False
for l in list(fileinput.input(files, mode="rb")):
p = self.__parse(l)
if p.time is None:
lines[-1].line = lines[-1].line + p.line
continue
lines.append(p)
log.i("- write merged file: [%s%s]" % (dir_path, Merge.FILE_SUFFIX))
lines = sorted(lines, key=lambda x: x.time)
with open(dir_path + Merge.FILE_SUFFIX, "wb") as fd:
for l in lines: fd.write(l.line)
return True
def __files(self, dir_path):
"""
Find files.
:return: Iterator[str]
"""
for root, dirs, files in os.walk(dir_path):
for file in files:
ret = os.path.join(root, file)
log.d(ret)
yield ret
return
def __parse(self, byte):
"""
Parse log line.
:param bytes byte:
:return: Parse result. If failed to decode, returns None time.
:rtype Merger.Parsed
"""
try:
s = byte[0:Merge.TIME_FMT_LEN].decode(Merge.FILE_ENCODE)
t = dt.strptime(s, Merge.TIME_FMT)
except Exception as e:
log.d(e)
return self.Parsed(line=byte)
return self.Parsed(t, byte)
| mit | 8,957,099,488,283,862,000 | 26.917647 | 76 | 0.52381 | false |
wmde/jenkins-job-builder | jenkins_jobs/modules/publishers.py | 1 | 140186 | # Copyright 2012 Hewlett-Packard Development Company, L.P.
# Copyright 2012 Varnish Software AS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Publishers define actions that the Jenkins job should perform after
the build is complete.
**Component**: publishers
:Macro: publisher
:Entry Point: jenkins_jobs.publishers
"""
import xml.etree.ElementTree as XML
import jenkins_jobs.modules.base
from jenkins_jobs.modules import hudson_model
from jenkins_jobs.errors import JenkinsJobsException
import logging
import sys
import random
def archive(parser, xml_parent, data):
"""yaml: archive
Archive build artifacts
:arg str artifacts: path specifier for artifacts to archive
:arg str excludes: path specifier for artifacts to exclude
:arg bool latest-only: only keep the artifacts from the latest
successful build
:arg bool allow-empty: pass the build if no artifacts are
found (default false)
Example:
.. literalinclude:: /../../tests/publishers/fixtures/archive001.yaml
"""
logger = logging.getLogger("%s:archive" % __name__)
archiver = XML.SubElement(xml_parent, 'hudson.tasks.ArtifactArchiver')
artifacts = XML.SubElement(archiver, 'artifacts')
artifacts.text = data['artifacts']
if 'excludes' in data:
excludes = XML.SubElement(archiver, 'excludes')
excludes.text = data['excludes']
latest = XML.SubElement(archiver, 'latestOnly')
# backward compatibility
latest_only = data.get('latest_only', False)
if 'latest_only' in data:
logger.warn('latest_only is deprecated please use latest-only')
if 'latest-only' in data:
latest_only = data['latest-only']
if latest_only:
latest.text = 'true'
else:
latest.text = 'false'
if 'allow-empty' in data:
empty = XML.SubElement(archiver, 'allowEmptyArchive')
# Default behavior is to fail the build.
empty.text = str(data.get('allow-empty', False)).lower()
def blame_upstream(parser, xml_parent, data):
"""yaml: blame-upstream
Notify upstream commiters when build fails
Requires the Jenkins `Blame upstream commiters Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/
Blame+Upstream+Committers+Plugin>`_
Example:
.. literalinclude:: /../../tests/publishers/fixtures/blame001.yaml
"""
XML.SubElement(xml_parent,
'hudson.plugins.blame__upstream__commiters.'
'BlameUpstreamCommitersPublisher')
def campfire(parser, xml_parent, data):
"""yaml: campfire
Send build notifications to Campfire rooms.
Requires the Jenkins `Campfire Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Campfire+Plugin>`_
Campfire notifications global default values must be configured for
the Jenkins instance. Default values will be used if no specific
values are specified for each job, so all config params are optional.
:arg str subdomain: override the default campfire subdomain
:arg str token: override the default API token
:arg bool ssl: override the default 'use SSL'
:arg str room: override the default room name
Example:
.. literalinclude:: /../../tests/publishers/fixtures/campfire001.yaml
"""
root = XML.SubElement(xml_parent,
'hudson.plugins.campfire.'
'CampfireNotifier')
campfire = XML.SubElement(root, 'campfire')
if ('subdomain' in data and data['subdomain']):
subdomain = XML.SubElement(campfire, 'subdomain')
subdomain.text = data['subdomain']
if ('token' in data and data['token']):
token = XML.SubElement(campfire, 'token')
token.text = data['token']
if ('ssl' in data):
ssl = XML.SubElement(campfire, 'ssl')
ssl.text = str(data['ssl']).lower()
if ('room' in data and data['room']):
room = XML.SubElement(root, 'room')
name = XML.SubElement(room, 'name')
name.text = data['room']
XML.SubElement(room, 'campfire reference="../../campfire"')
def emotional_jenkins(parser, xml_parent, data):
"""yaml: emotional-jenkins
Emotional Jenkins.
Requires the Jenkins `Emotional Jenkins Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Emotional+Jenkins+Plugin>`_
Example:
.. literalinclude:: /../../tests/publishers/fixtures/emotional-jenkins.yaml
"""
XML.SubElement(xml_parent,
'org.jenkinsci.plugins.emotional__jenkins.'
'EmotionalJenkinsPublisher')
def trigger_parameterized_builds(parser, xml_parent, data):
"""yaml: trigger-parameterized-builds
Trigger parameterized builds of other jobs.
Requires the Jenkins `Parameterized Trigger Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/
Parameterized+Trigger+Plugin>`_
:arg str project: name of the job to trigger
:arg str predefined-parameters: parameters to pass to the other
job (optional)
:arg bool current-parameters: Whether to include the parameters passed
to the current build to the triggered job (optional)
:arg bool svn-revision: Pass svn revision to the triggered job (optional)
:arg bool git-revision: Pass git revision to the other job (optional)
:arg str condition: when to trigger the other job (default 'ALWAYS')
:arg str property-file: Use properties from file (optional)
:arg bool fail-on-missing: Blocks the triggering of the downstream jobs
if any of the files are not found in the workspace (default 'False')
:arg str restrict-matrix-project: Filter that restricts the subset
of the combinations that the downstream project will run (optional)
Example::
publishers:
- trigger-parameterized-builds:
- project: other_job, foo, bar
predefined-parameters: foo=bar
- project: other_job1, other_job2
predefined-parameters: BUILD_NUM=${BUILD_NUMBER}
property-file: version.prop
fail-on-missing: true
- project: yet_another_job
predefined-parameters: foo=bar
git-revision: true
restrict-matrix-project: label=="x86"
"""
tbuilder = XML.SubElement(xml_parent,
'hudson.plugins.parameterizedtrigger.'
'BuildTrigger')
configs = XML.SubElement(tbuilder, 'configs')
for project_def in data:
tconfig = XML.SubElement(configs,
'hudson.plugins.parameterizedtrigger.'
'BuildTriggerConfig')
tconfigs = XML.SubElement(tconfig, 'configs')
if ('predefined-parameters' in project_def
or 'git-revision' in project_def
or 'property-file' in project_def
or 'current-parameters' in project_def
or 'svn-revision' in project_def
or 'restrict-matrix-project' in project_def):
if 'predefined-parameters' in project_def:
params = XML.SubElement(tconfigs,
'hudson.plugins.parameterizedtrigger.'
'PredefinedBuildParameters')
properties = XML.SubElement(params, 'properties')
properties.text = project_def['predefined-parameters']
if 'git-revision' in project_def and project_def['git-revision']:
params = XML.SubElement(tconfigs,
'hudson.plugins.git.'
'GitRevisionBuildParameters')
properties = XML.SubElement(params, 'combineQueuedCommits')
properties.text = 'false'
if 'property-file' in project_def and project_def['property-file']:
params = XML.SubElement(tconfigs,
'hudson.plugins.parameterizedtrigger.'
'FileBuildParameters')
properties = XML.SubElement(params, 'propertiesFile')
properties.text = project_def['property-file']
failOnMissing = XML.SubElement(params, 'failTriggerOnMissing')
failOnMissing.text = str(project_def.get('fail-on-missing',
False)).lower()
if ('current-parameters' in project_def
and project_def['current-parameters']):
XML.SubElement(tconfigs,
'hudson.plugins.parameterizedtrigger.'
'CurrentBuildParameters')
if 'svn-revision' in project_def and project_def['svn-revision']:
XML.SubElement(tconfigs,
'hudson.plugins.parameterizedtrigger.'
'SubversionRevisionBuildParameters')
if ('restrict-matrix-project' in project_def
and project_def['restrict-matrix-project']):
subset = XML.SubElement(tconfigs,
'hudson.plugins.parameterizedtrigger.'
'matrix.MatrixSubsetBuildParameters')
XML.SubElement(subset, 'filter').text = \
project_def['restrict-matrix-project']
else:
tconfigs.set('class', 'java.util.Collections$EmptyList')
projects = XML.SubElement(tconfig, 'projects')
projects.text = project_def['project']
condition = XML.SubElement(tconfig, 'condition')
condition.text = project_def.get('condition', 'ALWAYS')
trigger_with_no_params = XML.SubElement(tconfig,
'triggerWithNoParameters')
trigger_with_no_params.text = 'false'
def trigger(parser, xml_parent, data):
"""yaml: trigger
Trigger non-parametrised builds of other jobs.
:arg str project: name of the job to trigger
:arg str threshold: when to trigger the other job (default 'SUCCESS'),
alternatives: SUCCESS, UNSTABLE, FAILURE
Example:
.. literalinclude:: /../../tests/publishers/fixtures/trigger_success.yaml
"""
tconfig = XML.SubElement(xml_parent, 'hudson.tasks.BuildTrigger')
childProjects = XML.SubElement(tconfig, 'childProjects')
childProjects.text = data['project']
tthreshold = XML.SubElement(tconfig, 'threshold')
threshold = data.get('threshold', 'SUCCESS')
supported_thresholds = ['SUCCESS', 'UNSTABLE', 'FAILURE']
if threshold not in supported_thresholds:
raise JenkinsJobsException("threshold must be one of %s" %
", ".join(supported_thresholds))
tname = XML.SubElement(tthreshold, 'name')
tname.text = hudson_model.THRESHOLDS[threshold]['name']
tordinal = XML.SubElement(tthreshold, 'ordinal')
tordinal.text = hudson_model.THRESHOLDS[threshold]['ordinal']
tcolor = XML.SubElement(tthreshold, 'color')
tcolor.text = hudson_model.THRESHOLDS[threshold]['color']
def clone_workspace(parser, xml_parent, data):
"""yaml: clone-workspace
Archive the workspace from builds of one project and reuse them as the SCM
source for another project.
Requires the Jenkins `Clone Workspace SCM Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Clone+Workspace+SCM+Plugin>`_
:arg str workspace-glob: Files to include in cloned workspace
:arg str workspace-exclude-glob: Files to exclude from cloned workspace
:arg str criteria: Criteria for build to be archived. Can be 'any',
'not failed', or 'successful'. (default: any )
:arg str archive-method: Choose the method to use for archiving the
workspace. Can be 'tar' or 'zip'. (default: tar)
:arg bool override-default-excludes: Override default ant excludes.
(default: false)
Minimal example:
.. literalinclude::
/../../tests/publishers/fixtures/clone-workspace001.yaml
Full example:
.. literalinclude::
/../../tests/publishers/fixtures/clone-workspace002.yaml
"""
cloneworkspace = XML.SubElement(
xml_parent,
'hudson.plugins.cloneworkspace.CloneWorkspacePublisher',
{'plugin': 'clone-workspace-scm'})
XML.SubElement(
cloneworkspace,
'workspaceGlob').text = data.get('workspace-glob', None)
if 'workspace-exclude-glob' in data:
XML.SubElement(
cloneworkspace,
'workspaceExcludeGlob').text = data['workspace-exclude-glob']
criteria_list = ['Any', 'Not Failed', 'Successful']
criteria = data.get('criteria', 'Any').title()
if 'criteria' in data and criteria not in criteria_list:
raise JenkinsJobsException(
'clone-workspace criteria must be one of: '
+ ', '.join(criteria_list))
else:
XML.SubElement(cloneworkspace, 'criteria').text = criteria
archive_list = ['TAR', 'ZIP']
archive_method = data.get('archive-method', 'TAR').upper()
if 'archive-method' in data and archive_method not in archive_list:
raise JenkinsJobsException(
'clone-workspace archive-method must be one of: '
+ ', '.join(archive_list))
else:
XML.SubElement(cloneworkspace, 'archiveMethod').text = archive_method
XML.SubElement(
cloneworkspace,
'overrideDefaultExcludes').text = str(data.get(
'override-default-excludes',
False)).lower()
def cloverphp(parser, xml_parent, data):
"""yaml: cloverphp
Capture code coverage reports from PHPUnit
Requires the Jenkins `Clover PHP Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Clover+PHP+Plugin>`_
Your job definition should pass to PHPUnit the --coverage-clover option
pointing to a file in the workspace (ex: clover-coverage.xml). The filename
has to be filled in the `xml-location` field.
:arg str xml-location: Path to the coverage XML file generated by PHPUnit
using --coverage-clover. Relative to workspace. (required)
:arg dict html: When existent, whether the plugin should generate a HTML
report. Note that PHPUnit already provide a HTML report via its
--cover-html option which can be set in your builder (optional):
* **dir** (str): Directory where HTML report will be generated relative
to workspace. (required in `html` dict).
* **archive** (bool): Whether to archive HTML reports (default True).
:arg list metric-targets: List of metric targets to reach, must be one of
**healthy**, **unhealthy** and **failing**. Each metric target can takes
two parameters:
* **method** Target for method coverage
* **statement** Target for statements coverage
Whenever a metric target is not filled in, the Jenkins plugin can fill in
defaults for you (as of v0.3.3 of the plugin the healthy target will have
method: 70 and statement: 80 if both are left empty). Jenkins Job Builder
will mimic that feature to ensure clean configuration diff.
Minimal example:
.. literalinclude:: /../../tests/publishers/fixtures/cloverphp001.yaml
Full example:
.. literalinclude:: /../../tests/publishers/fixtures/cloverphp002.yaml
"""
cloverphp = XML.SubElement(
xml_parent,
'org.jenkinsci.plugins.cloverphp.CloverPHPPublisher')
# The plugin requires clover XML file to parse
if 'xml-location' not in data:
raise JenkinsJobsException('xml-location must be set')
# Whether HTML publishing has been checked
html_publish = False
# By default, disableArchiving = false. Note that we use
# reversed logic.
html_archive = True
if 'html' in data:
html_publish = True
html_dir = data['html'].get('dir', None)
html_archive = data['html'].get('archive', html_archive)
if html_dir is None:
# No point in going further, the plugin would not work
raise JenkinsJobsException('htmldir is required in a html block')
XML.SubElement(cloverphp, 'publishHtmlReport').text = \
str(html_publish).lower()
if html_publish:
XML.SubElement(cloverphp, 'reportDir').text = html_dir
XML.SubElement(cloverphp, 'xmlLocation').text = data.get('xml-location')
XML.SubElement(cloverphp, 'disableArchiving').text = \
str(not html_archive).lower()
# Handle targets
# Plugin v0.3.3 will fill defaults for us whenever healthy targets are both
# blanks.
default_metrics = {
'healthy': {'method': 70, 'statement': 80}
}
allowed_metrics = ['healthy', 'unhealthy', 'failing']
metrics = data.get('metric-targets', [])
# list of dicts to dict
metrics = dict(kv for m in metrics for kv in m.iteritems())
# Populate defaults whenever nothing has been filled by user.
for default in default_metrics.keys():
if metrics.get(default, None) is None:
metrics[default] = default_metrics[default]
# The plugin would at least define empty targets so make sure
# we output them all in the XML regardless of what the user
# has or has not entered.
for target in allowed_metrics:
cur_target = XML.SubElement(cloverphp, target + 'Target')
for t_type in ['method', 'statement']:
val = metrics.get(target, {}).get(t_type)
if val is None or type(val) != int:
continue
if val < 0 or val > 100:
raise JenkinsJobsException(
"Publisher cloverphp metric target %s:%s = %s "
"is not in valid range 0-100." % (target, t_type, val))
XML.SubElement(cur_target, t_type + 'Coverage').text = str(val)
def coverage(parser, xml_parent, data):
"""yaml: coverage
WARNING: The coverage function is deprecated. Instead, use the
cobertura function to generate a cobertura coverage report.
Requires the Jenkins `Cobertura Coverage Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Cobertura+Plugin>`_
Example::
publishers:
- coverage
"""
logger = logging.getLogger(__name__)
logger.warn("Coverage function is deprecated. Switch to cobertura.")
cobertura = XML.SubElement(xml_parent,
'hudson.plugins.cobertura.CoberturaPublisher')
XML.SubElement(cobertura, 'coberturaReportFile').text = '**/coverage.xml'
XML.SubElement(cobertura, 'onlyStable').text = 'false'
healthy = XML.SubElement(cobertura, 'healthyTarget')
targets = XML.SubElement(healthy, 'targets', {
'class': 'enum-map',
'enum-type': 'hudson.plugins.cobertura.targets.CoverageMetric'})
entry = XML.SubElement(targets, 'entry')
XML.SubElement(entry, 'hudson.plugins.cobertura.targets.CoverageMetric'
).text = 'CONDITIONAL'
XML.SubElement(entry, 'int').text = '70'
entry = XML.SubElement(targets, 'entry')
XML.SubElement(entry, 'hudson.plugins.cobertura.targets.CoverageMetric'
).text = 'LINE'
XML.SubElement(entry, 'int').text = '80'
entry = XML.SubElement(targets, 'entry')
XML.SubElement(entry, 'hudson.plugins.cobertura.targets.CoverageMetric'
).text = 'METHOD'
XML.SubElement(entry, 'int').text = '80'
unhealthy = XML.SubElement(cobertura, 'unhealthyTarget')
targets = XML.SubElement(unhealthy, 'targets', {
'class': 'enum-map',
'enum-type': 'hudson.plugins.cobertura.targets.CoverageMetric'})
entry = XML.SubElement(targets, 'entry')
XML.SubElement(entry, 'hudson.plugins.cobertura.targets.CoverageMetric'
).text = 'CONDITIONAL'
XML.SubElement(entry, 'int').text = '0'
entry = XML.SubElement(targets, 'entry')
XML.SubElement(entry, 'hudson.plugins.cobertura.targets.CoverageMetric'
).text = 'LINE'
XML.SubElement(entry, 'int').text = '0'
entry = XML.SubElement(targets, 'entry')
XML.SubElement(entry, 'hudson.plugins.cobertura.targets.CoverageMetric'
).text = 'METHOD'
XML.SubElement(entry, 'int').text = '0'
failing = XML.SubElement(cobertura, 'failingTarget')
targets = XML.SubElement(failing, 'targets', {
'class': 'enum-map',
'enum-type': 'hudson.plugins.cobertura.targets.CoverageMetric'})
entry = XML.SubElement(targets, 'entry')
XML.SubElement(entry, 'hudson.plugins.cobertura.targets.CoverageMetric'
).text = 'CONDITIONAL'
XML.SubElement(entry, 'int').text = '0'
entry = XML.SubElement(targets, 'entry')
XML.SubElement(entry, 'hudson.plugins.cobertura.targets.CoverageMetric'
).text = 'LINE'
XML.SubElement(entry, 'int').text = '0'
entry = XML.SubElement(targets, 'entry')
XML.SubElement(entry, 'hudson.plugins.cobertura.targets.CoverageMetric'
).text = 'METHOD'
XML.SubElement(entry, 'int').text = '0'
XML.SubElement(cobertura, 'sourceEncoding').text = 'ASCII'
def cobertura(parser, xml_parent, data):
"""yaml: cobertura
Generate a cobertura coverage report.
Requires the Jenkins `Cobertura Coverage Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Cobertura+Plugin>`_
:arg str report-file: This is a file name pattern that can be used
to locate the cobertura xml report files (optional)
:arg bool only-stable: Include only stable builds (default false)
:arg bool fail-no-reports: fail builds if no coverage reports are found
(default false)
:arg bool fail-unhealthy: Unhealthy projects will be failed
(default false)
:arg bool fail-unstable: Unstable projects will be failed (default false)
:arg bool health-auto-update: Auto update threshold for health on
successful build (default false)
:arg bool stability-auto-update: Auto update threshold for stability on
successful build (default false)
:arg bool zoom-coverage-chart: Zoom the coverage chart and crop area below
the minimum and above the maximum coverage
of the past reports (default false)
:arg str source-encoding: Override the source encoding (default ASCII)
:arg dict targets:
:targets: (packages, files, classes, method, line, conditional)
* **healthy** (`int`): Healthy threshold (default 0)
* **unhealthy** (`int`): Unhealthy threshold (default 0)
* **failing** (`int`): Failing threshold (default 0)
Example::
publishers:
- cobertura:
report-file: "/reports/cobertura/coverage.xml"
only-stable: "true"
fail-no-reports: "true"
fail-unhealthy: "true"
fail-unstable: "true"
health-auto-update: "true"
stability-auto-update: "true"
zoom-coverage-chart: "true"
source-encoding: "Big5"
targets:
- files:
healthy: 10
unhealthy: 20
failing: 30
- method:
healthy: 50
unhealthy: 40
failing: 30
"""
cobertura = XML.SubElement(xml_parent,
'hudson.plugins.cobertura.CoberturaPublisher')
XML.SubElement(cobertura, 'coberturaReportFile').text = data.get(
'report-file', '**/coverage.xml')
XML.SubElement(cobertura, 'onlyStable').text = str(
data.get('only-stable', False)).lower()
XML.SubElement(cobertura, 'failUnhealthy').text = str(
data.get('fail-unhealthy', False)).lower()
XML.SubElement(cobertura, 'failUnstable').text = str(
data.get('fail-unstable', False)).lower()
XML.SubElement(cobertura, 'autoUpdateHealth').text = str(
data.get('health-auto-update', False)).lower()
XML.SubElement(cobertura, 'autoUpdateStability').text = str(
data.get('stability-auto-update', False)).lower()
XML.SubElement(cobertura, 'zoomCoverageChart').text = str(
data.get('zoom-coverage-chart', False)).lower()
XML.SubElement(cobertura, 'failNoReports').text = str(
data.get('fail-no-reports', False)).lower()
healthy = XML.SubElement(cobertura, 'healthyTarget')
targets = XML.SubElement(healthy, 'targets', {
'class': 'enum-map',
'enum-type': 'hudson.plugins.cobertura.targets.CoverageMetric'})
for item in data['targets']:
item_name = item.keys()[0]
item_values = item.get(item_name, 0)
entry = XML.SubElement(targets, 'entry')
XML.SubElement(entry,
'hudson.plugins.cobertura.targets.'
'CoverageMetric').text = str(item_name).upper()
XML.SubElement(entry, 'int').text = str(item_values.get('healthy', 0))
unhealthy = XML.SubElement(cobertura, 'unhealthyTarget')
targets = XML.SubElement(unhealthy, 'targets', {
'class': 'enum-map',
'enum-type': 'hudson.plugins.cobertura.targets.CoverageMetric'})
for item in data['targets']:
item_name = item.keys()[0]
item_values = item.get(item_name, 0)
entry = XML.SubElement(targets, 'entry')
XML.SubElement(entry, 'hudson.plugins.cobertura.targets.'
'CoverageMetric').text = str(item_name).upper()
XML.SubElement(entry, 'int').text = str(item_values.get('unhealthy',
0))
failing = XML.SubElement(cobertura, 'failingTarget')
targets = XML.SubElement(failing, 'targets', {
'class': 'enum-map',
'enum-type': 'hudson.plugins.cobertura.targets.CoverageMetric'})
for item in data['targets']:
item_name = item.keys()[0]
item_values = item.get(item_name, 0)
entry = XML.SubElement(targets, 'entry')
XML.SubElement(entry, 'hudson.plugins.cobertura.targets.'
'CoverageMetric').text = str(item_name).upper()
XML.SubElement(entry, 'int').text = str(item_values.get('failing', 0))
XML.SubElement(cobertura, 'sourceEncoding').text = data.get(
'source-encoding', 'ASCII')
def jacoco(parser, xml_parent, data):
"""yaml: jacoco
Generate a JaCoCo coverage report.
Requires the Jenkins `JaCoCo Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/JaCoCo+Plugin>`_
:arg str exec-pattern: This is a file name pattern that can be used to
locate the jacoco report files (default
``**/**.exec``)
:arg str class-pattern: This is a file name pattern that can be used
to locate class files (default ``**/classes``)
:arg str source-pattern: This is a file name pattern that can be used
to locate source files (default ``**/src/main/java``)
:arg bool update-build-status: Update the build according to the results
(default False)
:arg str inclusion-pattern: This is a file name pattern that can be used
to include certain class files (optional)
:arg str exclusion-pattern: This is a file name pattern that can be used
to exclude certain class files (optional)
:arg dict targets:
:targets: (instruction, branch, complexity, line, method, class)
* **healthy** (`int`): Healthy threshold (default 0)
* **unhealthy** (`int`): Unhealthy threshold (default 0)
Example::
publishers:
- jacoco:
exec-pattern: "**/**.exec"
class-pattern: "**/classes"
source-pattern: "**/src/main/java"
status-update: true
targets:
- branch:
healthy: 10
unhealthy: 20
- method:
healthy: 50
unhealthy: 40
"""
jacoco = XML.SubElement(xml_parent,
'hudson.plugins.jacoco.JacocoPublisher')
XML.SubElement(jacoco, 'execPattern').text = data.get(
'exec-pattern', '**/**.exec')
XML.SubElement(jacoco, 'classPattern').text = data.get(
'class-pattern', '**/classes')
XML.SubElement(jacoco, 'sourcePattern').text = data.get(
'source-pattern', '**/src/main/java')
XML.SubElement(jacoco, 'changeBuildStatus').text = data.get(
'update-build-status', False)
XML.SubElement(jacoco, 'inclusionPattern').text = data.get(
'inclusion-pattern', '')
XML.SubElement(jacoco, 'exclusionPattern').text = data.get(
'exclusion-pattern', '')
itemsList = ['instruction',
'branch',
'complexity',
'line',
'method',
'class']
for item in data['targets']:
item_name = item.keys()[0]
if item_name not in itemsList:
raise JenkinsJobsException("item entered is not valid must be "
"one of: %s" % ",".join(itemsList))
item_values = item.get(item_name, 0)
XML.SubElement(jacoco,
'maximum' +
item_name.capitalize() +
'Coverage').text = str(item_values.get('healthy', 0))
XML.SubElement(jacoco,
'minimum' +
item_name.capitalize() +
'Coverage').text = str(item_values.get('unhealthy', 0))
def ftp(parser, xml_parent, data):
"""yaml: ftp
Upload files via FTP.
Requires the Jenkins `Publish over FTP Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Publish+Over+FTP+Plugin>`_
:arg str site: name of the ftp site
:arg str target: destination directory
:arg bool target-is-date-format: whether target is a date format. If true,
raw text should be quoted (defaults to False)
:arg bool clean-remote: should the remote directory be deleted before
transferring files (defaults to False)
:arg str source: source path specifier
:arg str excludes: excluded file pattern (optional)
:arg str remove-prefix: prefix to remove from uploaded file paths
(optional)
:arg bool fail-on-error: fail the build if an error occurs (defaults to
False).
Example::
publishers:
- ftp:
site: 'ftp.example.com'
target: 'dest/dir'
source: 'base/source/dir/**'
remove-prefix: 'base/source/dir'
excludes: '**/*.excludedfiletype'
"""
console_prefix = 'FTP: '
plugin_tag = 'jenkins.plugins.publish__over__ftp.BapFtpPublisherPlugin'
publisher_tag = 'jenkins.plugins.publish__over__ftp.BapFtpPublisher'
transfer_tag = 'jenkins.plugins.publish__over__ftp.BapFtpTransfer'
plugin_reference_tag = 'jenkins.plugins.publish_over_ftp.' \
'BapFtpPublisherPlugin'
(_, transfer_node) = base_publish_over(xml_parent,
data,
console_prefix,
plugin_tag,
publisher_tag,
transfer_tag,
plugin_reference_tag)
XML.SubElement(transfer_node, 'asciiMode').text = 'false'
def junit(parser, xml_parent, data):
"""yaml: junit
Publish JUnit test results.
:arg str results: results filename
:arg bool keep-long-stdio: Retain long standard output/error in test
results (default true).
:arg bool test-stability: Add historical information about test
results stability (default false).
Requires the Jenkins `Test stability Plugin
<https://wiki.jenkins-ci.org/display/JENKINS/Test+stability+plugin>`_.
Minimal example using defaults:
.. literalinclude:: /../../tests/publishers/fixtures/junit001.yaml
Full example:
.. literalinclude:: /../../tests/publishers/fixtures/junit002.yaml
"""
junitresult = XML.SubElement(xml_parent,
'hudson.tasks.junit.JUnitResultArchiver')
XML.SubElement(junitresult, 'testResults').text = data['results']
XML.SubElement(junitresult, 'keepLongStdio').text = str(
data.get('keep-long-stdio', True)).lower()
datapublisher = XML.SubElement(junitresult, 'testDataPublishers')
if str(data.get('test-stability', False)).lower() == 'true':
XML.SubElement(datapublisher,
'de.esailors.jenkins.teststability'
'.StabilityTestDataPublisher')
def xunit(parser, xml_parent, data):
"""yaml: xunit
Publish tests results. Requires the Jenkins `xUnit Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/xUnit+Plugin>`_
:arg str thresholdmode: whether thresholds represents an absolute \
number of tests or a percentage. Either 'number' or 'percent', will \
default to 'number' if omitted.
:arg dict thresholds: list containing the thresholds for both \
'failed' and 'skipped' tests. Each entry should in turn have a \
list of "threshold name: values". The threshold names are \
'unstable', 'unstablenew', 'failure', 'failurenew'. Omitting a \
value will resort on xUnit default value (should be 0).
:arg dict types: per framework configuration. The key should be \
one of the internal types we support:\
'aunit', 'boosttest', 'checktype', 'cpptest', 'cppunit', 'fpcunit', \
'junit', 'mstest', 'nunit', 'phpunit', 'tusar', 'unittest', 'valgrind'. \
The 'custom' type is not supported.
Each framework type can be configured using the following parameters:
:arg str pattern: An Ant pattern to look for Junit result files, \
relative to the workspace root.
:arg bool requireupdate: fail the build whenever fresh tests \
results have not been found (default: true).
:arg bool deleteoutput: delete temporary JUnit files (default: true)
:arg bool stoponerror: Fail the build whenever an error occur during \
a result file processing (default: true).
Example::
publishers:
- xunit:
thresholdmode: 'percent'
thresholds:
- failed:
unstable: 0
unstablenew: 0
failure: 0
failurenew: 0
- skipped:
unstable: 0
unstablenew: 0
failure: 0
failurenew: 0
types:
- phpunit:
pattern: junit.log
- cppUnit:
pattern: cppunit.log
"""
logger = logging.getLogger(__name__)
xunit = XML.SubElement(xml_parent, 'xunit')
# Map our internal types to the XML element names used by Jenkins plugin
types_to_plugin_types = {
'aunit': 'AUnitJunitHudsonTestType',
'boosttest': 'BoostTestJunitHudsonTestType',
'checktype': 'CheckType',
'cpptest': 'CppTestJunitHudsonTestType',
'cppunit': 'CppUnitJunitHudsonTestType',
'fpcunit': 'FPCUnitJunitHudsonTestType',
'junit': 'JUnitType',
'mstest': 'MSTestJunitHudsonTestType',
'nunit': 'NUnitJunitHudsonTestType',
'phpunit': 'PHPUnitJunitHudsonTestType',
'tusar': 'TUSARJunitHudsonTestType',
'unittest': 'UnitTestJunitHudsonTestType',
'valgrind': 'ValgrindJunitHudsonTestType',
# FIXME should implement the 'custom' type
}
implemented_types = types_to_plugin_types.keys() # shortcut
# Unit framework we are going to generate xml for
supported_types = []
for configured_type in data['types']:
type_name = configured_type.keys()[0]
if type_name not in implemented_types:
logger.warn("Requested xUnit type '%s' is not yet supported" %
type_name)
else:
# Append for generation
supported_types.append(configured_type)
# Generate XML for each of the supported framework types
xmltypes = XML.SubElement(xunit, 'types')
for supported_type in supported_types:
framework_name = supported_type.keys()[0]
xmlframework = XML.SubElement(xmltypes,
types_to_plugin_types[framework_name])
XML.SubElement(xmlframework, 'pattern').text = \
supported_type[framework_name].get('pattern', '')
XML.SubElement(xmlframework, 'failIfNotNew').text = \
str(supported_type[framework_name].get(
'requireupdate', True)).lower()
XML.SubElement(xmlframework, 'deleteOutputFiles').text = \
str(supported_type[framework_name].get(
'deleteoutput', True)).lower()
XML.SubElement(xmlframework, 'stopProcessingIfError').text = \
str(supported_type[framework_name].get(
'stoponerror', True)).lower()
xmlthresholds = XML.SubElement(xunit, 'thresholds')
if 'thresholds' in data:
for t in data['thresholds']:
if not ('failed' in t or 'skipped' in t):
logger.warn(
"Unrecognized threshold, should be 'failed' or 'skipped'")
continue
elname = "org.jenkinsci.plugins.xunit.threshold.%sThreshold" \
% t.keys()[0].title()
el = XML.SubElement(xmlthresholds, elname)
for threshold_name, threshold_value in t.values()[0].items():
# Normalize and craft the element name for this threshold
elname = "%sThreshold" % threshold_name.lower().replace(
'new', 'New')
XML.SubElement(el, elname).text = threshold_value
# Whether to use percent of exact number of tests.
# Thresholdmode is either:
# - 1 : absolute (number of tests), default.
# - 2 : relative (percentage of tests)
thresholdmode = '1'
if 'percent' == data.get('thresholdmode', 'number'):
thresholdmode = '2'
XML.SubElement(xunit, 'thresholdMode').text = \
thresholdmode
def _violations_add_entry(xml_parent, name, data):
vmin = data.get('min', 10)
vmax = data.get('max', 999)
vunstable = data.get('unstable', 999)
pattern = data.get('pattern', None)
entry = XML.SubElement(xml_parent, 'entry')
XML.SubElement(entry, 'string').text = name
tconfig = XML.SubElement(entry, 'hudson.plugins.violations.TypeConfig')
XML.SubElement(tconfig, 'type').text = name
XML.SubElement(tconfig, 'min').text = str(vmin)
XML.SubElement(tconfig, 'max').text = str(vmax)
XML.SubElement(tconfig, 'unstable').text = str(vunstable)
XML.SubElement(tconfig, 'usePattern').text = 'false'
if pattern:
XML.SubElement(tconfig, 'pattern').text = pattern
else:
XML.SubElement(tconfig, 'pattern')
def violations(parser, xml_parent, data):
"""yaml: violations
Publish code style violations.
Requires the Jenkins `Violations Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Violations>`_
The violations component accepts any number of dictionaries keyed
by the name of the violations system. The dictionary has the
following values:
:arg int min: sunny threshold
:arg int max: stormy threshold
:arg int unstable: unstable threshold
:arg str pattern: report filename pattern
Any system without a dictionary provided will use default values.
Valid systems are:
checkstyle, codenarc, cpd, cpplint, csslint, findbugs, fxcop,
gendarme, jcreport, jslint, pep8, pmd, pylint, simian, stylecop
Example::
publishers:
- violations:
pep8:
min: 0
max: 1
unstable: 1
pattern: '**/pep8.txt'
"""
violations = XML.SubElement(xml_parent,
'hudson.plugins.violations.'
'ViolationsPublisher')
config = XML.SubElement(violations, 'config')
suppressions = XML.SubElement(config, 'suppressions',
{'class': 'tree-set'})
XML.SubElement(suppressions, 'no-comparator')
configs = XML.SubElement(config, 'typeConfigs')
XML.SubElement(configs, 'no-comparator')
for name in [
'checkstyle',
'codenarc',
'cpd',
'cpplint',
'csslint',
'findbugs',
'fxcop',
'gendarme',
'jcreport',
'jslint',
'pep8',
'pmd',
'pylint',
'simian',
'stylecop']:
_violations_add_entry(configs, name, data.get(name, {}))
XML.SubElement(config, 'limit').text = '100'
XML.SubElement(config, 'sourcePathPattern')
XML.SubElement(config, 'fauxProjectPath')
XML.SubElement(config, 'encoding').text = 'default'
def checkstyle(parser, xml_parent, data):
"""yaml: checkstyle
Publish trend reports with Checkstyle.
Requires the Jenkins `Checkstyle Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Checkstyle+Plugin>`_
The checkstyle component accepts a dictionary with the
following values:
:arg str pattern: report filename pattern
:arg bool canRunOnFailed: also runs for failed builds
(instead of just stable or unstable builds)
:arg bool shouldDetectModules:
:arg int healthy: sunny threshold
:arg int unHealthy: stormy threshold
:arg str healthThreshold: threshold priority for health status
(high: only high, normal: high and normal, low: all)
:arg dict thresholds:
:thresholds:
* **unstable** (`dict`)
:unstable: * **totalAll** (`int`)
* **totalHigh** (`int`)
* **totalNormal** (`int`)
* **totalLow** (`int`)
* **failed** (`dict`)
:failed: * **totalAll** (`int`)
* **totalHigh** (`int`)
* **totalNormal** (`int`)
* **totalLow** (`int`)
:arg str defaultEncoding: encoding for parsing or showing files
(empty will use platform default)
Example::
publishers:
- checkstyle:
pattern: '**/checkstyle-result.xml'
healthy: 0
unHealthy: 100
healthThreshold: 'high'
thresholds:
unstable:
totalHigh: 10
failed:
totalHigh: 1
"""
checkstyle = XML.SubElement(xml_parent,
'hudson.plugins.checkstyle.'
'CheckStylePublisher')
dval = data.get('healthy', None)
if dval:
XML.SubElement(checkstyle, 'healthy').text = str(dval)
else:
XML.SubElement(checkstyle, 'healthy')
dval = data.get('unHealthy', None)
if dval:
XML.SubElement(checkstyle, 'unHealthy').text = str(dval)
else:
XML.SubElement(checkstyle, 'unHealthy')
XML.SubElement(checkstyle, 'thresholdLimit').text = \
data.get('healthThreshold', 'low')
XML.SubElement(checkstyle, 'pluginName').text = '[CHECKSTYLE] '
XML.SubElement(checkstyle, 'defaultEncoding').text = \
data.get('defaultEncoding', '')
if data.get('canRunOnFailed', False):
XML.SubElement(checkstyle, 'canRunOnFailed').text = 'true'
else:
XML.SubElement(checkstyle, 'canRunOnFailed').text = 'false'
XML.SubElement(checkstyle, 'useStableBuildAsReference').text = 'false'
XML.SubElement(checkstyle, 'useDeltaValues').text = 'false'
dthresholds = data.get('thresholds', {})
dunstable = dthresholds.get('unstable', {})
dfailed = dthresholds.get('failed', {})
thresholds = XML.SubElement(checkstyle, 'thresholds')
dval = dunstable.get('totalAll', None)
if dval:
XML.SubElement(thresholds, 'unstableTotalAll').text = str(dval)
else:
XML.SubElement(thresholds, 'unstableTotalAll')
dval = dunstable.get('totalHigh', None)
if dval:
XML.SubElement(thresholds, 'unstableTotalHigh').text = str(dval)
else:
XML.SubElement(thresholds, 'unstableTotalHigh')
dval = dunstable.get('totalNormal', None)
if dval:
XML.SubElement(thresholds, 'unstableTotalNormal').text = str(dval)
else:
XML.SubElement(thresholds, 'unstableTotalNormal')
dval = dunstable.get('totalLow', None)
if dval:
XML.SubElement(thresholds, 'unstableTotalLow').text = str(dval)
else:
XML.SubElement(thresholds, 'unstableTotalLow')
dval = dfailed.get('totalAll', None)
if dval:
XML.SubElement(thresholds, 'failedTotalAll').text = str(dval)
else:
XML.SubElement(thresholds, 'failedTotalAll')
dval = dfailed.get('totalHigh', None)
if dval:
XML.SubElement(thresholds, 'failedTotalHigh').text = str(dval)
else:
XML.SubElement(thresholds, 'failedTotalHigh')
dval = dfailed.get('totalNormal', None)
if dval:
XML.SubElement(thresholds, 'failedTotalNormal').text = str(dval)
else:
XML.SubElement(thresholds, 'failedTotalNormal')
dval = dfailed.get('totalLow', None)
if dval:
XML.SubElement(thresholds, 'failedTotalLow').text = str(dval)
else:
XML.SubElement(thresholds, 'failedTotalLow')
if data.get('shouldDetectModules', False):
XML.SubElement(checkstyle, 'shouldDetectModules').text = 'true'
else:
XML.SubElement(checkstyle, 'shouldDetectModules').text = 'false'
XML.SubElement(checkstyle, 'dontComputeNew').text = 'true'
XML.SubElement(checkstyle, 'doNotResolveRelativePaths').text = 'false'
XML.SubElement(checkstyle, 'pattern').text = data.get('pattern', '')
def scp(parser, xml_parent, data):
"""yaml: scp
Upload files via SCP
Requires the Jenkins `SCP Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/SCP+plugin>`_
:arg str site: name of the scp site
:arg str target: destination directory
:arg str source: source path specifier
:arg bool keep-hierarchy: keep the file hierarchy when uploading
(default false)
:arg bool copy-after-failure: copy files even if the job fails
(default false)
:arg bool copy-console: copy the console log (default false); if
specified, omit 'target'
Example:
.. literalinclude:: /../../tests/publishers/fixtures/scp001.yaml
"""
site = data['site']
scp = XML.SubElement(xml_parent,
'be.certipost.hudson.plugin.SCPRepositoryPublisher')
XML.SubElement(scp, 'siteName').text = site
entries = XML.SubElement(scp, 'entries')
for entry in data['files']:
entry_e = XML.SubElement(entries, 'be.certipost.hudson.plugin.Entry')
XML.SubElement(entry_e, 'filePath').text = entry['target']
XML.SubElement(entry_e, 'sourceFile').text = entry.get('source', '')
if entry.get('keep-hierarchy', False):
XML.SubElement(entry_e, 'keepHierarchy').text = 'true'
else:
XML.SubElement(entry_e, 'keepHierarchy').text = 'false'
if entry.get('copy-console', False):
XML.SubElement(entry_e, 'copyConsoleLog').text = 'true'
else:
XML.SubElement(entry_e, 'copyConsoleLog').text = 'false'
if entry.get('copy-after-failure', False):
XML.SubElement(entry_e, 'copyAfterFailure').text = 'true'
else:
XML.SubElement(entry_e, 'copyAfterFailure').text = 'false'
def ssh(parser, xml_parent, data):
"""yaml: ssh
Upload files via SCP.
Requires the Jenkins `Publish over SSH Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Publish+Over+SSH+Plugin>`_
:arg str site: name of the ssh site
:arg str target: destination directory
:arg bool target-is-date-format: whether target is a date format. If true,
raw text should be quoted (defaults to False)
:arg bool clean-remote: should the remote directory be deleted before
transferring files (defaults to False)
:arg str source: source path specifier
:arg str command: a command to execute on the remote server (optional)
:arg int timeout: timeout in milliseconds for the Exec command (optional)
:arg bool use-pty: run the exec command in pseudo TTY (defaults to False)
:arg str excludes: excluded file pattern (optional)
:arg str remove-prefix: prefix to remove from uploaded file paths
(optional)
:arg bool fail-on-error: fail the build if an error occurs (defaults to
False).
Example::
publishers:
- ssh:
site: 'server.example.com'
target: 'dest/dir'
source: 'base/source/dir/**'
remove-prefix: 'base/source/dir'
excludes: '**/*.excludedfiletype'
use-pty: true
command: 'rm -r jenkins_$BUILD_NUMBER'
timeout: 1800000
"""
console_prefix = 'SSH: '
plugin_tag = 'jenkins.plugins.publish__over__ssh.BapSshPublisherPlugin'
publisher_tag = 'jenkins.plugins.publish__over__ssh.BapSshPublisher'
transfer_tag = 'jenkins.plugins.publish__over__ssh.BapSshTransfer'
plugin_reference_tag = 'jenkins.plugins.publish_over_ssh.' \
'BapSshPublisherPlugin'
base_publish_over(xml_parent,
data,
console_prefix,
plugin_tag,
publisher_tag,
transfer_tag,
plugin_reference_tag)
def pipeline(parser, xml_parent, data):
"""yaml: pipeline
Specify a downstream project in a pipeline.
Requires the Jenkins `Build Pipeline Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Build+Pipeline+Plugin>`_
:arg str project: the name of the downstream project
:arg str predefined-parameters: parameters to pass to the other
job (optional)
:arg bool current-parameters: Whether to include the parameters passed
to the current build to the triggered job (optional)
Example:
.. literalinclude:: /../../tests/publishers/fixtures/pipeline002.yaml
You can build pipeline jobs that are re-usable in different pipelines by
using a :ref:`job-template` to define the pipeline jobs,
and variable substitution to specify the name of
the downstream job in the pipeline.
Job-specific substitutions are useful here (see :ref:`project`).
See 'samples/pipeline.yaml' for an example pipeline implementation.
"""
if 'project' in data and data['project'] != '':
pippub = XML.SubElement(xml_parent,
'au.com.centrumsystems.hudson.plugin.'
'buildpipeline.trigger.BuildPipelineTrigger')
configs = XML.SubElement(pippub, 'configs')
if 'predefined-parameters' in data:
params = XML.SubElement(configs,
'hudson.plugins.parameterizedtrigger.'
'PredefinedBuildParameters')
properties = XML.SubElement(params, 'properties')
properties.text = data['predefined-parameters']
if ('current-parameters' in data
and data['current-parameters']):
XML.SubElement(configs,
'hudson.plugins.parameterizedtrigger.'
'CurrentBuildParameters')
XML.SubElement(pippub, 'downstreamProjectNames').text = data['project']
def email(parser, xml_parent, data):
"""yaml: email
Email notifications on build failure.
:arg str recipients: Recipient email addresses
:arg bool notify-every-unstable-build: Send an email for every
unstable build (default true)
:arg bool send-to-individuals: Send an email to the individual
who broke the build (default false)
Example::
publishers:
- email:
recipients: [email protected]
"""
# TODO: raise exception if this is applied to a maven job
mailer = XML.SubElement(xml_parent,
'hudson.tasks.Mailer')
XML.SubElement(mailer, 'recipients').text = data['recipients']
# Note the logic reversal (included here to match the GUI
if data.get('notify-every-unstable-build', True):
XML.SubElement(mailer, 'dontNotifyEveryUnstableBuild').text = 'false'
else:
XML.SubElement(mailer, 'dontNotifyEveryUnstableBuild').text = 'true'
XML.SubElement(mailer, 'sendToIndividuals').text = str(
data.get('send-to-individuals', False)).lower()
def claim_build(parser, xml_parent, data):
"""yaml: claim-build
Claim build failures
Requires the Jenkins `Claim Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Claim+plugin>`_
Example::
publishers:
- claim-build
"""
XML.SubElement(xml_parent, 'hudson.plugins.claim.ClaimPublisher')
def base_email_ext(parser, xml_parent, data, ttype):
trigger = XML.SubElement(xml_parent,
'hudson.plugins.emailext.plugins.trigger.'
+ ttype)
email = XML.SubElement(trigger, 'email')
XML.SubElement(email, 'recipientList').text = ''
XML.SubElement(email, 'subject').text = '$PROJECT_DEFAULT_SUBJECT'
XML.SubElement(email, 'body').text = '$PROJECT_DEFAULT_CONTENT'
XML.SubElement(email, 'sendToDevelopers').text = 'false'
XML.SubElement(email, 'sendToRequester').text = 'false'
XML.SubElement(email, 'includeCulprits').text = 'false'
XML.SubElement(email, 'sendToRecipientList').text = 'true'
def email_ext(parser, xml_parent, data):
"""yaml: email-ext
Extend Jenkin's built in email notification
Requires the Jenkins `Email-ext Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Email-ext+plugin>`_
:arg str recipients: Comma separated list of emails
:arg str reply-to: Comma separated list of emails that should be in
the Reply-To header for this project (default is $DEFAULT_RECIPIENTS)
:arg str content-type: The content type of the emails sent. If not set, the
Jenkins plugin uses the value set on the main configuration page.
Possible values: 'html', 'text' or 'default' (default 'default')
:arg str subject: Subject for the email, can include variables like
${BUILD_NUMBER} or even groovy or javascript code
:arg str body: Content for the body of the email, can include variables
like ${BUILD_NUMBER}, but the real magic is using groovy or
javascript to hook into the Jenkins API itself
:arg bool attach-build-log: Include build log in the email (default false)
:arg str attachments: pattern of files to include as attachment (optional)
:arg bool unstable: Send an email for an unstable result (default false)
:arg bool first-failure: Send an email for just the first failure
(default false)
:arg bool not-built: Send an email if not built (default false)
:arg bool aborted: Send an email if the build is aborted (default false)
:arg bool regression: Send an email if there is a regression
(default false)
:arg bool failure: Send an email if the build fails (default true)
:arg bool improvement: Send an email if the build improves (default false)
:arg bool still-failing: Send an email if the build is still failing
(default false)
:arg bool success: Send an email for a successful build (default false)
:arg bool fixed: Send an email if the build is fixed (default false)
:arg bool still-unstable: Send an email if the build is still unstable
(default false)
:arg bool pre-build: Send an email before the build (default false)
:arg str matrix-trigger: If using matrix projects, when to trigger
:matrix-trigger values:
* **both**
* **only-parent**
* **only-configurations**
Example:
.. literalinclude:: /../../tests/publishers/fixtures/email-ext001.yaml
"""
emailext = XML.SubElement(xml_parent,
'hudson.plugins.emailext.ExtendedEmailPublisher')
if 'recipients' in data:
XML.SubElement(emailext, 'recipientList').text = data['recipients']
else:
XML.SubElement(emailext, 'recipientList').text = '$DEFAULT_RECIPIENTS'
ctrigger = XML.SubElement(emailext, 'configuredTriggers')
if data.get('unstable', False):
base_email_ext(parser, ctrigger, data, 'UnstableTrigger')
if data.get('first-failure', False):
base_email_ext(parser, ctrigger, data, 'FirstFailureTrigger')
if data.get('not-built', False):
base_email_ext(parser, ctrigger, data, 'NotBuiltTrigger')
if data.get('aborted', False):
base_email_ext(parser, ctrigger, data, 'AbortedTrigger')
if data.get('regression', False):
base_email_ext(parser, ctrigger, data, 'RegressionTrigger')
if data.get('failure', True):
base_email_ext(parser, ctrigger, data, 'FailureTrigger')
if data.get('improvement', False):
base_email_ext(parser, ctrigger, data, 'ImprovementTrigger')
if data.get('still-failing', False):
base_email_ext(parser, ctrigger, data, 'StillFailingTrigger')
if data.get('success', False):
base_email_ext(parser, ctrigger, data, 'SuccessTrigger')
if data.get('fixed', False):
base_email_ext(parser, ctrigger, data, 'FixedTrigger')
if data.get('still-unstable', False):
base_email_ext(parser, ctrigger, data, 'StillUnstableTrigger')
if data.get('pre-build', False):
base_email_ext(parser, ctrigger, data, 'PreBuildTrigger')
content_type_mime = {
'text': 'text/plain',
'html': 'text/html',
'default': 'default',
}
ctype = data.get('content-type', 'default')
if ctype not in content_type_mime:
raise JenkinsJobsException('email-ext content type must be one of: %s'
% ', '.join(content_type_mime.keys()))
XML.SubElement(emailext, 'contentType').text = content_type_mime[ctype]
XML.SubElement(emailext, 'defaultSubject').text = data.get(
'subject', '$DEFAULT_SUBJECT')
XML.SubElement(emailext, 'defaultContent').text = data.get(
'body', '$DEFAULT_CONTENT')
XML.SubElement(emailext, 'attachmentsPattern').text = data.get(
'attachments', '')
XML.SubElement(emailext, 'presendScript').text = ''
XML.SubElement(emailext, 'attachBuildLog').text = \
str(data.get('attach-build-log', False)).lower()
XML.SubElement(emailext, 'replyTo').text = data.get('reply-to',
'$DEFAULT_RECIPIENTS')
matrix_dict = {'both': 'BOTH',
'only-configurations': 'ONLY_CONFIGURATIONS',
'only-parent': 'ONLY_PARENT'}
matrix_trigger = data.get('matrix-trigger', None)
## If none defined, then do not create entry
if matrix_trigger is not None:
if matrix_trigger not in matrix_dict:
raise JenkinsJobsException("matrix-trigger entered is not valid, "
"must be one of: %s" %
", ".join(matrix_dict.keys()))
XML.SubElement(emailext, 'matrixTriggerMode').text = \
matrix_dict.get(matrix_trigger)
def fingerprint(parser, xml_parent, data):
"""yaml: fingerprint
Fingerprint files to track them across builds
:arg str files: files to fingerprint, follows the @includes of Ant fileset
(default is blank)
:arg bool record-artifacts: fingerprint all archived artifacts
(default false)
Example::
publishers:
- fingerprint:
files: builddir/test*.xml
record-artifacts: false
"""
finger = XML.SubElement(xml_parent, 'hudson.tasks.Fingerprinter')
XML.SubElement(finger, 'targets').text = data.get('files', '')
XML.SubElement(finger, 'recordBuildArtifacts').text = str(data.get(
'record-artifacts', False)).lower()
def aggregate_tests(parser, xml_parent, data):
"""yaml: aggregate-tests
Aggregate downstream test results
:arg bool include-failed-builds: whether to include failed builds
Example::
publishers:
- aggregate-tests:
include-failed-builds: true
"""
agg = XML.SubElement(xml_parent,
'hudson.tasks.test.AggregatedTestResultPublisher')
XML.SubElement(agg, 'includeFailedBuilds').text = str(data.get(
'include-failed-builds', False)).lower()
def cppcheck(parser, xml_parent, data):
"""yaml: cppcheck
Cppcheck result publisher
Requires the Jenkins `Cppcheck Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Cppcheck+Plugin>`_
:arg str pattern: file pattern for cppcheck xml report
for more optional parameters see the example
Example::
publishers:
- cppcheck:
pattern: "**/cppcheck.xml"
# the rest is optional
# build status (new) error count thresholds
thresholds:
unstable: 5
new-unstable: 5
failure: 7
new-failure: 3
# severities which count towards the threshold, default all true
severity:
error: true
warning: true
information: false
graph:
xysize: [500, 200]
# which errors to display, default only sum
display:
sum: false
error: true
"""
cppextbase = XML.SubElement(xml_parent,
'org.jenkinsci.plugins.cppcheck.'
'CppcheckPublisher')
cppext = XML.SubElement(cppextbase, 'cppcheckConfig')
XML.SubElement(cppext, 'pattern').text = data['pattern']
XML.SubElement(cppext, 'ignoreBlankFiles').text = \
str(data.get('ignoreblankfiles', False)).lower()
csev = XML.SubElement(cppext, 'configSeverityEvaluation')
thrsh = data.get('thresholds', {})
XML.SubElement(csev, 'threshold').text = str(thrsh.get('unstable', ''))
XML.SubElement(csev, 'newThreshold').text = \
str(thrsh.get('new-unstable', ''))
XML.SubElement(csev, 'failureThreshold').text = \
str(thrsh.get('failure', ''))
XML.SubElement(csev, 'newFailureThreshold').text = \
str(thrsh.get('new-failure', ''))
XML.SubElement(csev, 'healthy').text = str(thrsh.get('healthy', ''))
XML.SubElement(csev, 'unHealthy').text = str(thrsh.get('unhealthy', ''))
sev = thrsh.get('severity', {})
XML.SubElement(csev, 'severityError').text = \
str(sev.get('error', True)).lower()
XML.SubElement(csev, 'severityWarning').text = \
str(sev.get('warning', True)).lower()
XML.SubElement(csev, 'severityStyle').text = \
str(sev.get('style', True)).lower()
XML.SubElement(csev, 'severityPerformance').text = \
str(sev.get('performance', True)).lower()
XML.SubElement(csev, 'severityInformation').text = \
str(sev.get('information', True)).lower()
graph = data.get('graph', {})
cgraph = XML.SubElement(cppext, 'configGraph')
x, y = graph.get('xysize', [500, 200])
XML.SubElement(cgraph, 'xSize').text = str(x)
XML.SubElement(cgraph, 'ySize').text = str(y)
gdisplay = graph.get('display', {})
XML.SubElement(cgraph, 'displayAllErrors').text = \
str(gdisplay.get('sum', True)).lower()
XML.SubElement(cgraph, 'displayErrorSeverity').text = \
str(gdisplay.get('error', False)).lower()
XML.SubElement(cgraph, 'displayWarningSeverity').text = \
str(gdisplay.get('warning', False)).lower()
XML.SubElement(cgraph, 'displayStyleSeverity').text = \
str(gdisplay.get('style', False)).lower()
XML.SubElement(cgraph, 'displayPerformanceSeverity').text = \
str(gdisplay.get('performance', False)).lower()
XML.SubElement(cgraph, 'displayInformationSeverity').text = \
str(gdisplay.get('information', False)).lower()
def logparser(parser, xml_parent, data):
"""yaml: logparser
Requires the Jenkins `Log Parser Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Log+Parser+Plugin>`_
:arg str parse-rules: full path to parse rules
:arg bool unstable-on-warning: mark build unstable on warning
:arg bool fail-on-error: mark build failed on error
Example::
publishers:
- logparser:
parse-rules: "/path/to/parserules"
unstable-on-warning: true
fail-on-error: true
"""
clog = XML.SubElement(xml_parent,
'hudson.plugins.logparser.LogParserPublisher')
XML.SubElement(clog, 'unstableOnWarning').text = \
str(data.get('unstable-on-warning', False)).lower()
XML.SubElement(clog, 'failBuildOnError').text = \
str(data.get('fail-on-error', False)).lower()
# v1.08: this must be the full path, the name of the rules is not enough
XML.SubElement(clog, 'parsingRulesPath').text = data.get('parse-rules', '')
def copy_to_master(parser, xml_parent, data):
"""yaml: copy-to-master
Copy files to master from slave
Requires the Jenkins `Copy To Slave Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Copy+To+Slave+Plugin>`_
:arg list includes: list of file patterns to copy
:arg list excludes: list of file patterns to exclude
:arg string destination: absolute path into which the files will be copied.
If left blank they will be copied into the
workspace of the current job
Example::
publishers:
- copy-to-master:
includes:
- file1
- file2*.txt
excludes:
- file2bad.txt
"""
p = 'com.michelin.cio.hudson.plugins.copytoslave.CopyToMasterNotifier'
cm = XML.SubElement(xml_parent, p)
XML.SubElement(cm, 'includes').text = ','.join(data.get('includes', ['']))
XML.SubElement(cm, 'excludes').text = ','.join(data.get('excludes', ['']))
XML.SubElement(cm, 'destinationFolder').text = \
data.get('destination', '')
if data.get('destination', ''):
XML.SubElement(cm, 'overrideDestinationFolder').text = 'true'
def jira(parser, xml_parent, data):
"""yaml: jira
Update relevant JIRA issues
Requires the Jenkins `JIRA Plugin
<https://wiki.jenkins-ci.org/display/JENKINS/JIRA+Plugin>`_
Example::
publishers:
- jira
"""
XML.SubElement(xml_parent, 'hudson.plugins.jira.JiraIssueUpdater')
def groovy_postbuild(parser, xml_parent, data):
"""yaml: groovy-postbuild
Execute a groovy script.
Requires the Jenkins `Groovy Postbuild Plugin
<https://wiki.jenkins-ci.org/display/JENKINS/Groovy+Postbuild+Plugin>`_
:Parameter: the groovy script to execute
Example::
publishers:
- groovy-postbuild: "manager.buildFailure()"
"""
root_tag = 'org.jvnet.hudson.plugins.groovypostbuild.'\
'GroovyPostbuildRecorder'
groovy = XML.SubElement(xml_parent, root_tag)
XML.SubElement(groovy, 'groovyScript').text = data
def base_publish_over(xml_parent, data, console_prefix,
plugin_tag, publisher_tag,
transferset_tag, reference_plugin_tag):
outer = XML.SubElement(xml_parent, plugin_tag)
XML.SubElement(outer, 'consolePrefix').text = console_prefix
delegate = XML.SubElement(outer, 'delegate')
publishers = XML.SubElement(delegate, 'publishers')
inner = XML.SubElement(publishers, publisher_tag)
XML.SubElement(inner, 'configName').text = data['site']
XML.SubElement(inner, 'verbose').text = 'true'
transfers = XML.SubElement(inner, 'transfers')
transfersset = XML.SubElement(transfers, transferset_tag)
XML.SubElement(transfersset, 'remoteDirectory').text = data['target']
XML.SubElement(transfersset, 'sourceFiles').text = data['source']
if 'command' in data:
XML.SubElement(transfersset, 'execCommand').text = data['command']
if 'timeout' in data:
XML.SubElement(transfersset, 'execTimeout').text = str(data['timeout'])
if 'use-pty' in data:
XML.SubElement(transfersset, 'usePty').text = \
str(data.get('use-pty', False)).lower()
XML.SubElement(transfersset, 'excludes').text = data.get('excludes', '')
XML.SubElement(transfersset, 'removePrefix').text = \
data.get('remove-prefix', '')
XML.SubElement(transfersset, 'remoteDirectorySDF').text = \
str(data.get('target-is-date-format', False)).lower()
XML.SubElement(transfersset, 'flatten').text = 'false'
XML.SubElement(transfersset, 'cleanRemote').text = \
str(data.get('clean-remote', False)).lower()
XML.SubElement(inner, 'useWorkspaceInPromotion').text = 'false'
XML.SubElement(inner, 'usePromotionTimestamp').text = 'false'
XML.SubElement(delegate, 'continueOnError').text = 'false'
XML.SubElement(delegate, 'failOnError').text = \
str(data.get('fail-on-error', False)).lower()
XML.SubElement(delegate, 'alwaysPublishFromMaster').text = 'false'
XML.SubElement(delegate, 'hostConfigurationAccess',
{'class': reference_plugin_tag,
'reference': '../..'})
return (outer, transfersset)
def cifs(parser, xml_parent, data):
"""yaml: cifs
Upload files via CIFS.
Requires the Jenkins `Publish over CIFS Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Publish+Over+CIFS+Plugin>`_
:arg str site: name of the cifs site/share
:arg str target: destination directory
:arg bool target-is-date-format: whether target is a date format. If true,
raw text should be quoted (defaults to False)
:arg bool clean-remote: should the remote directory be deleted before
transferring files (defaults to False)
:arg str source: source path specifier
:arg str excludes: excluded file pattern (optional)
:arg str remove-prefix: prefix to remove from uploaded file paths
(optional)
:arg bool fail-on-error: fail the build if an error occurs (defaults to
False).
Example::
publishers:
- cifs:
site: 'cifs.share'
target: 'dest/dir'
source: 'base/source/dir/**'
remove-prefix: 'base/source/dir'
excludes: '**/*.excludedfiletype'
"""
console_prefix = 'CIFS: '
plugin_tag = 'jenkins.plugins.publish__over__cifs.CifsPublisherPlugin'
publisher_tag = 'jenkins.plugins.publish__over__cifs.CifsPublisher'
transfer_tag = 'jenkins.plugins.publish__over__cifs.CifsTransfer'
plugin_reference_tag = 'jenkins.plugins.publish_over_cifs.' \
'CifsPublisherPlugin'
base_publish_over(xml_parent,
data,
console_prefix,
plugin_tag,
publisher_tag,
transfer_tag,
plugin_reference_tag)
def sonar(parser, xml_parent, data):
"""yaml: sonar
Sonar plugin support.
Requires the Jenkins `Sonar Plugin.
<http://docs.codehaus.org/pages/viewpage.action?pageId=116359341>`_
:arg str jdk: JDK to use (inherited from the job if omitted). (optional)
:arg str branch: branch onto which the analysis will be posted (optional)
:arg str language: source code language (optional)
:arg str maven-opts: options given to maven (optional)
:arg str additional-properties: sonar analysis parameters (optional)
:arg dict skip-global-triggers:
:Triggers: * **skip-when-scm-change** (`bool`): skip analysis when
build triggered by scm
* **skip-when-upstream-build** (`bool`): skip analysis when
build triggered by an upstream build
* **skip-when-envvar-defined** (`str`): skip analysis when
the specified environment variable is set to true
This publisher supports the post-build action exposed by the Jenkins
Sonar Plugin, which is triggering a Sonar Analysis with Maven.
Example::
publishers:
- sonar:
jdk: MyJdk
branch: myBranch
language: java
maven-opts: -DskipTests
additional-properties: -DsonarHostURL=http://example.com/
skip-global-triggers:
skip-when-scm-change: true
skip-when-upstream-build: true
skip-when-envvar-defined: SKIP_SONAR
"""
sonar = XML.SubElement(xml_parent, 'hudson.plugins.sonar.SonarPublisher')
if 'jdk' in data:
XML.SubElement(sonar, 'jdk').text = data['jdk']
XML.SubElement(sonar, 'branch').text = data.get('branch', '')
XML.SubElement(sonar, 'language').text = data.get('language', '')
XML.SubElement(sonar, 'mavenOpts').text = data.get('maven-opts', '')
XML.SubElement(sonar, 'jobAdditionalProperties').text = \
data.get('additional-properties', '')
if 'skip-global-triggers' in data:
data_triggers = data['skip-global-triggers']
triggers = XML.SubElement(sonar, 'triggers')
XML.SubElement(triggers, 'skipScmCause').text = \
str(data_triggers.get('skip-when-scm-change', False)).lower()
XML.SubElement(triggers, 'skipUpstreamCause').text = \
str(data_triggers.get('skip-when-upstream-build', False)).lower()
XML.SubElement(triggers, 'envVar').text = \
data_triggers.get('skip-when-envvar-defined', '')
def performance(parser, xml_parent, data):
"""yaml: performance
Publish performance test results from jmeter and junit.
Requires the Jenkins `Performance Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Performance+Plugin>`_
:arg int failed-threshold: Specify the error percentage threshold that
set the build failed. A negative value means
don't use this threshold (default 0)
:arg int unstable-threshold: Specify the error percentage threshold that
set the build unstable. A negative value means
don't use this threshold (default 0)
:arg dict report:
:(jmeter or junit): (`dict` or `str`): Specify a custom report file
(optional; jmeter default \**/*.jtl, junit default **/TEST-\*.xml)
Examples::
publishers:
- performance:
failed-threshold: 85
unstable-threshold: -1
report:
- jmeter: "/special/file.jtl"
- junit: "/special/file.xml"
publishers:
- performance:
failed-threshold: 85
unstable-threshold: -1
report:
- jmeter
- junit
publishers:
- performance:
failed-threshold: 85
unstable-threshold: -1
report:
- jmeter: "/special/file.jtl"
- junit: "/special/file.xml"
- jmeter
- junit
"""
logger = logging.getLogger(__name__)
perf = XML.SubElement(xml_parent, 'hudson.plugins.performance.'
'PerformancePublisher')
XML.SubElement(perf, 'errorFailedThreshold').text = str(data.get(
'failed-threshold', 0))
XML.SubElement(perf, 'errorUnstableThreshold').text = str(data.get(
'unstable-threshold', 0))
parsers = XML.SubElement(perf, 'parsers')
for item in data['report']:
if isinstance(item, dict):
item_name = item.keys()[0]
item_values = item.get(item_name, None)
if item_name == 'jmeter':
jmhold = XML.SubElement(parsers, 'hudson.plugins.performance.'
'JMeterParser')
XML.SubElement(jmhold, 'glob').text = str(item_values)
elif item_name == 'junit':
juhold = XML.SubElement(parsers, 'hudson.plugins.performance.'
'JUnitParser')
XML.SubElement(juhold, 'glob').text = str(item_values)
else:
logger.fatal("You have not specified jmeter or junit, or "
"you have incorrectly assigned the key value.")
sys.exit(1)
elif isinstance(item, str):
if item == 'jmeter':
jmhold = XML.SubElement(parsers, 'hudson.plugins.performance.'
'JMeterParser')
XML.SubElement(jmhold, 'glob').text = '**/*.jtl'
elif item == 'junit':
juhold = XML.SubElement(parsers, 'hudson.plugins.performance.'
'JUnitParser')
XML.SubElement(juhold, 'glob').text = '**/TEST-*.xml'
else:
logger.fatal("You have not specified jmeter or junit, or "
"you have incorrectly assigned the key value.")
sys.exit(1)
def join_trigger(parser, xml_parent, data):
"""yaml: join-trigger
Trigger a job after all the immediate downstream jobs have completed
:arg list projects: list of projects to trigger
Example::
publishers:
- join-trigger:
projects:
- project-one
- project-two
"""
jointrigger = XML.SubElement(xml_parent, 'join.JoinTrigger')
# Simple Project List
joinProjectsText = ','.join(data.get('projects', ['']))
XML.SubElement(jointrigger, 'joinProjects').text = joinProjectsText
def jabber(parser, xml_parent, data):
"""yaml: jabber
Integrates Jenkins with the Jabber/XMPP instant messaging protocol
Requires the Jenkins `Jabber Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Jabber+Plugin>`_
:arg bool notify-on-build-start: Whether to send notifications
to channels when a build starts (default false)
:arg bool notify-scm-committers: Whether to send notifications
to the users that are suspected of having broken this build
(default false)
:arg bool notify-scm-culprits: Also send notifications to 'culprits'
from previous unstable/failed builds (default false)
:arg bool notify-upstream-committers: Whether to send notifications to
upstream committers if no committers were found for a broken build
(default false)
:arg bool notify-scm-fixers: Whether to send notifications to the users
that have fixed a broken build (default false)
:arg list group-targets: List of group targets to notify
:arg list individual-targets: List of individual targets to notify
:arg dict strategy: When to send notifications (default all)
:strategy values:
* **all** -- Always
* **failure** -- On any failure
* **failure-fixed** -- On failure and fixes
* **change** -- Only on state change
:arg dict message: Channel notification message (default summary-scm)
:message values:
* **summary-scm** -- Summary + SCM changes
* **summary** -- Just summary
* **summary-build** -- Summary and build parameters
* **summary-scm-fail** -- Summary, SCM changes, and failed tests
Example::
publishers:
- jabber:
notify-on-build-start: true
group-targets:
- "[email protected]"
individual-targets:
- "[email protected]"
strategy: all
message: summary-scm
"""
j = XML.SubElement(xml_parent, 'hudson.plugins.jabber.im.transport.'
'JabberPublisher')
t = XML.SubElement(j, 'targets')
if 'group-targets' in data:
for group in data['group-targets']:
gcimt = XML.SubElement(t, 'hudson.plugins.im.'
'GroupChatIMMessageTarget')
XML.SubElement(gcimt, 'name').text = group
XML.SubElement(gcimt, 'notificationOnly').text = 'false'
if 'individual-targets' in data:
for individual in data['individual-targets']:
dimt = XML.SubElement(t, 'hudson.plugins.im.'
'DefaultIMMessageTarget')
XML.SubElement(dimt, 'value').text = individual
strategy = data.get('strategy', 'all')
strategydict = {'all': 'ALL',
'failure': 'ANY_FAILURE',
'failure-fixed': 'FAILURE_AND_FIXED',
'change': 'STATECHANGE_ONLY'}
if strategy not in strategydict:
raise JenkinsJobsException("Strategy entered is not valid, must be " +
"one of: all, failure, failure-fixed, or "
"change")
XML.SubElement(j, 'strategy').text = strategydict[strategy]
XML.SubElement(j, 'notifyOnBuildStart').text = str(
data.get('notify-on-build-start', False)).lower()
XML.SubElement(j, 'notifySuspects').text = str(
data.get('notify-scm-committers', False)).lower()
XML.SubElement(j, 'notifyCulprits').text = str(
data.get('notify-scm-culprits', False)).lower()
XML.SubElement(j, 'notifyFixers').text = str(
data.get('notify-scm-fixers', False)).lower()
XML.SubElement(j, 'notifyUpstreamCommitters').text = str(
data.get('notify-upstream-committers', False)).lower()
message = data.get('message', 'summary-scm')
messagedict = {'summary-scm': 'DefaultBuildToChatNotifier',
'summary': 'SummaryOnlyBuildToChatNotifier',
'summary-build': 'BuildParametersBuildToChatNotifier',
'summary-scm-fail': 'PrintFailingTestsBuildToChatNotifier'}
if message not in messagedict:
raise JenkinsJobsException("Message entered is not valid, must be one "
"of: summary-scm, summary, summary-build "
"or summary-scm-fail")
XML.SubElement(j, 'buildToChatNotifier', {
'class': 'hudson.plugins.im.build_notify.' + messagedict[message]})
XML.SubElement(j, 'matrixMultiplier').text = 'ONLY_CONFIGURATIONS'
def workspace_cleanup(parser, xml_parent, data):
"""yaml: workspace-cleanup (post-build)
Requires the Jenkins `Workspace Cleanup Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Workspace+Cleanup+Plugin>`_
The pre-build workspace-cleanup is available as a wrapper.
:arg list include: list of files to be included
:arg list exclude: list of files to be excluded
:arg bool dirmatch: Apply pattern to directories too (default: false)
:arg list clean-if: clean depending on build status
:clean-if values:
* **success** (`bool`) (default: true)
* **unstable** (`bool`) (default: true)
* **failure** (`bool`) (default: true)
* **aborted** (`bool`) (default: true)
* **not-built** (`bool`) (default: true)
:arg bool fail-build: Fail the build if the cleanup fails (default: true)
:arg bool clean-parent: Cleanup matrix parent workspace (default: false)
Example::
publishers:
- workspace-cleanup:
include:
- "*.zip"
clean-if:
- success: true
- not-built: false
"""
p = XML.SubElement(xml_parent,
'hudson.plugins.ws__cleanup.WsCleanup')
p.set("plugin", "[email protected]")
if "include" in data or "exclude" in data:
patterns = XML.SubElement(p, 'patterns')
for inc in data.get("include", []):
ptrn = XML.SubElement(patterns, 'hudson.plugins.ws__cleanup.Pattern')
XML.SubElement(ptrn, 'pattern').text = inc
XML.SubElement(ptrn, 'type').text = "INCLUDE"
for exc in data.get("exclude", []):
ptrn = XML.SubElement(patterns, 'hudson.plugins.ws__cleanup.Pattern')
XML.SubElement(ptrn, 'pattern').text = exc
XML.SubElement(ptrn, 'type').text = "EXCLUDE"
XML.SubElement(p, 'deleteDirs').text = \
str(data.get("dirmatch", False)).lower()
XML.SubElement(p, 'cleanupMatrixParent').text = \
str(data.get("clean-parent", False)).lower()
mask = {'success': 'cleanWhenSuccess', 'unstable': 'cleanWhenUnstable',
'failure': 'cleanWhenFailure', 'not-built': 'cleanWhenNotBuilt',
'aborted': 'cleanWhenAborted'}
clean = data.get('clean-if', [])
cdict = dict()
for d in clean:
cdict.update(d)
for k, v in mask.iteritems():
XML.SubElement(p, v).text = str(cdict.pop(k, True)).lower()
if len(cdict) > 0:
raise ValueError('clean-if must be one of: %r' % list(mask.keys()))
if str(data.get("fail-build", False)).lower() == 'false':
XML.SubElement(p, 'notFailBuild').text = 'true'
else:
XML.SubElement(p, 'notFailBuild').text = 'false'
def maven_deploy(parser, xml_parent, data):
"""yaml: maven-deploy
Deploy artifacts to Maven repository.
:arg str id: Repository ID
:arg str url: Repository URL
:arg bool unique-version: Assign unique versions to snapshots
(default true)
:arg bool deploy-unstable: Deploy even if the build is unstable
(default false)
Example::
publishers:
- maven-deploy:
id: example
url: http://repo.example.com/maven2/
unique-version: true
deploy-unstable: false
"""
p = XML.SubElement(xml_parent, 'hudson.maven.RedeployPublisher')
if 'id' in data:
XML.SubElement(p, 'id').text = data['id']
XML.SubElement(p, 'url').text = data['url']
XML.SubElement(p, 'uniqueVersion').text = str(
data.get('unique-version', True)).lower()
XML.SubElement(p, 'evenIfUnstable').text = str(
data.get('deploy-unstable', False)).lower()
def text_finder(parser, xml_parent, data):
"""yaml: text-finder
This plugin lets you search keywords in the files you specified and
additionally check build status
Requires the Jenkins `Text-finder Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Text-finder+Plugin>`_
:arg str regexp: Specify a regular expression
:arg str fileset: Specify the path to search
:arg bool also-check-console-output:
Search the console output (default False)
:arg bool succeed-if-found:
Force a build to succeed if a string was found (default False)
:arg bool unstable-if-found:
Set build unstable instead of failing the build (default False)
Example::
publishers:
- text-finder:
regexp: "some string"
fileset: "file.txt"
also-check-console-output: true
succeed-if-found: false
unstable-if-found: false
"""
finder = XML.SubElement(xml_parent,
'hudson.plugins.textfinder.TextFinderPublisher')
if ('fileset' in data):
XML.SubElement(finder, 'fileSet').text = data['fileset']
XML.SubElement(finder, 'regexp').text = data['regexp']
check_output = str(data.get('also-check-console-output', False)).lower()
XML.SubElement(finder, 'alsoCheckConsoleOutput').text = check_output
succeed_if_found = str(data.get('succeed-if-found', False)).lower()
XML.SubElement(finder, 'succeedIfFound').text = succeed_if_found
unstable_if_found = str(data.get('unstable-if-found', False)).lower()
XML.SubElement(finder, 'unstableIfFound').text = unstable_if_found
def html_publisher(parser, xml_parent, data):
"""yaml: html-publisher
This plugin publishes HTML reports.
Requires the Jenkins `HTML Publisher Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/HTML+Publisher+Plugin>`_
:arg str name: Report name
:arg str dir: HTML directory to archive
:arg str files: Specify the pages to display
:arg bool keep-all: keep HTML reports for each past build (Default False)
:arg bool allow-missing: Allow missing HTML reports (Default False)
Example::
publishers:
- html-publisher:
name: "some name"
dir: "path/"
files: "index.html"
keep-all: true
allow-missing: true
"""
reporter = XML.SubElement(xml_parent, 'htmlpublisher.HtmlPublisher')
targets = XML.SubElement(reporter, 'reportTargets')
ptarget = XML.SubElement(targets, 'htmlpublisher.HtmlPublisherTarget')
XML.SubElement(ptarget, 'reportName').text = data['name']
XML.SubElement(ptarget, 'reportDir').text = data['dir']
XML.SubElement(ptarget, 'reportFiles').text = data['files']
keep_all = str(data.get('keep-all', False)).lower()
XML.SubElement(ptarget, 'keepAll').text = keep_all
allow_missing = str(data.get('allow-missing', False)).lower()
XML.SubElement(ptarget, 'allowMissing').text = allow_missing
XML.SubElement(ptarget, 'wrapperName').text = "htmlpublisher-wrapper.html"
def tap(parser, xml_parent, data):
"""yaml: tap
Adds support to TAP test result files
Requires the Jenkins `TAP Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/TAP+Plugin>`_
:arg str results: TAP test result files
:arg bool fail-if-no-results: Fail if no result (default False)
:arg bool failed-tests-mark-build-as-failure:
Mark build as failure if test fails (default False)
:arg bool output-tap-to-console: Output tap to console (default True)
:arg bool enable-subtests: Enable subtests (Default True)
:arg bool discard-old-reports: Discard old reports (Default False)
:arg bool todo-is-failure: Handle TODO's as failures (Default True)
Example::
publishers:
- tap:
results: puiparts.tap
todo-is-failure: false
"""
tap = XML.SubElement(xml_parent, 'org.tap4j.plugin.TapPublisher')
XML.SubElement(tap, 'testResults').text = data['results']
XML.SubElement(tap, 'failIfNoResults').text = str(
data.get('fail-if-no-results', False)).lower()
XML.SubElement(tap, 'failedTestsMarkBuildAsFailure').text = str(
data.get('failed-tests-mark-build-as-failure', False)).lower()
XML.SubElement(tap, 'outputTapToConsole').text = str(
data.get('output-tap-to-console', True)).lower()
XML.SubElement(tap, 'enableSubtests').text = str(
data.get('enable-subtests', True)).lower()
XML.SubElement(tap, 'discardOldReports').text = str(
data.get('discard-old-reports', False)).lower()
XML.SubElement(tap, 'todoIsFailure').text = str(
data.get('todo-is-failure', True)).lower()
def post_tasks(parser, xml_parent, data):
"""yaml: post-tasks
Adds support to post build task plugin
Requires the Jenkins `Post Build Task plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Post+build+task>`_
:arg dict task: Post build task definition
:arg list task[matches]: list of matches when to run the task
:arg dict task[matches][*]: match definition
:arg str task[matches][*][log-text]: text to match against the log
:arg str task[matches][*][operator]: operator to apply with the next match
:task[matches][*][operator] values (default 'AND'):
* **AND**
* **OR**
:arg bool task[escalate-status]: Escalate the task status to the job
(default 'false')
:arg bool task[run-if-job-successful]: Run only if the job was successful
(default 'false')
:arg str task[script]: Shell script to run (default '')
Example::
publishers:
- post-tasks:
- matches:
- log-text: line to match
operator: AND
- log-text: line to match
operator: OR
- log-text: line to match
operator: AND
escalate-status: false
run-if-job-successful:false
script: |
echo "Here goes the task script"
"""
pb_xml = XML.SubElement(xml_parent,
'hudson.plugins.postbuildtask.PostbuildTask')
tasks_xml = XML.SubElement(pb_xml, 'tasks')
for task in data:
task_xml = XML.SubElement(
tasks_xml,
'hudson.plugins.postbuildtask.TaskProperties')
matches_xml = XML.SubElement(task_xml, 'logTexts')
for match in task.get('matches', []):
lt_xml = XML.SubElement(
matches_xml,
'hudson.plugins.postbuildtask.LogProperties')
XML.SubElement(lt_xml, 'logText').text = str(
match.get('log-text', ''))
XML.SubElement(lt_xml, 'operator').text = str(
match.get('operator', 'AND')).upper()
XML.SubElement(task_xml, 'EscalateStatus').text = str(
task.get('escalate-status', False)).lower()
XML.SubElement(task_xml, 'RunIfJobSuccessful').text = str(
task.get('run-if-job-successful', False)).lower()
XML.SubElement(task_xml, 'script').text = str(
task.get('script', ''))
def xml_summary(parser, xml_parent, data):
"""yaml: xml-summary
Adds support for the Summary Display Plugin
Requires the Jenkins `Summary Display Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Summary+Display+Plugin>`_
:arg str files: Files to parse (default '')
Example::
publishers:
- xml-summary:
files: '*_summary_report.xml'
"""
summary = XML.SubElement(xml_parent,
'hudson.plugins.summary__report.'
'ACIPluginPublisher')
XML.SubElement(summary, 'name').text = data['files']
def robot(parser, xml_parent, data):
"""yaml: robot
Adds support for the Robot Framework Plugin
Requires the Jenkins `Robot Framework Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Robot+Framework+Plugin>`_
:arg str output-path: Path to directory containing robot xml and html files
relative to build workspace. (default '')
:arg str log-file-link: Name of log or report file to be linked on jobs
front page (default '')
:arg str report-html: Name of the html file containing robot test report
(default 'report.html')
:arg str log-html: Name of the html file containing detailed robot test log
(default 'log.html')
:arg str output-xml: Name of the xml file containing robot output
(default 'output.xml')
:arg str pass-threshold: Minimum percentage of passed tests to consider
the build successful (default 0.0)
:arg str unstable-threshold: Minimum percentage of passed test to
consider the build as not failed (default 0.0)
:arg bool only-critical: Take only critical tests into account when
checking the thresholds (default true)
:arg list other-files: list other files to archive (default '')
Example::
- publishers:
- robot:
output-path: reports/robot
log-file-link: report.html
report-html: report.html
log-html: log.html
output-xml: output.xml
pass-threshold: 80.0
unstable-threshold: 60.0
only-critical: false
other-files:
- extra-file1.html
- extra-file2.txt
"""
parent = XML.SubElement(xml_parent, 'hudson.plugins.robot.RobotPublisher')
XML.SubElement(parent, 'outputPath').text = data['output-path']
XML.SubElement(parent, 'logFileLink').text = str(
data.get('log-file-link', ''))
XML.SubElement(parent, 'reportFileName').text = str(
data.get('report-html', 'report.html'))
XML.SubElement(parent, 'logFileName').text = str(
data.get('log-html', 'log.html'))
XML.SubElement(parent, 'outputFileName').text = str(
data.get('output-xml', 'output.xml'))
XML.SubElement(parent, 'passThreshold').text = str(
data.get('pass-threshold', 0.0))
XML.SubElement(parent, 'unstableThreshold').text = str(
data.get('unstable-threshold', 0.0))
XML.SubElement(parent, 'onlyCritical').text = str(
data.get('only-critical', True)).lower()
other_files = XML.SubElement(parent, 'otherFiles')
for other_file in data['other-files']:
XML.SubElement(other_files, 'string').text = str(other_file)
def warnings(parser, xml_parent, data):
"""yaml: warnings
Generate trend report for compiler warnings in the console log or
in log files. Requires the Jenkins `Warnings Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Warnings+Plugin>`_
:arg list console-log-parsers: The parser to use to scan the console
log (default '')
:arg dict workspace-file-scanners:
:workspace-file-scanners:
* **file-pattern** (`str`) -- Fileset 'includes' setting that
specifies the files to scan for warnings
* **scanner** (`str`) -- The parser to use to scan the files
provided in workspace-file-pattern (default '')
:arg str files-to-include: Comma separated list of regular
expressions that specifies the files to include in the report
(based on their absolute filename). By default all files are
included
:arg str files-to-ignore: Comma separated list of regular expressions
that specifies the files to exclude from the report (based on their
absolute filename). (default '')
:arg bool run-always: By default, this plug-in runs only for stable or
unstable builds, but not for failed builds. Set to true if the
plug-in should run even for failed builds. (default false)
:arg bool detect-modules: Determines if Ant or Maven modules should be
detected for all files that contain warnings. Activating this
option may increase your build time since the detector scans
the whole workspace for 'build.xml' or 'pom.xml' files in order
to assign the correct module names. (default false)
:arg bool resolve-relative-paths: Determines if relative paths in
warnings should be resolved using a time expensive operation that
scans the whole workspace for matching files. Deactivate this
option if you encounter performance problems. (default false)
:arg int health-threshold-high: The upper threshold for the build
health. If left empty then no health report is created. If
the actual number of warnings is between the provided
thresholds then the build health is interpolated (default '')
:arg int health-threshold-low: The lower threshold for the build
health. See health-threshold-high. (default '')
:arg dict health-priorities: Determines which warning priorities
should be considered when evaluating the build health (default
all-priorities)
:health-priorities values:
* **priority-high** -- Only priority high
* **high-and-normal** -- Priorities high and normal
* **all-priorities** -- All priorities
:arg dict total-thresholds: If the number of total warnings is greater
than one of these thresholds then a build is considered as unstable
or failed, respectively. (default '')
:total-thresholds:
* **unstable** (`dict`)
:unstable: * **total-all** (`int`)
* **total-high** (`int`)
* **total-normal** (`int`)
* **total-low** (`int`)
* **failed** (`dict`)
:failed: * **total-all** (`int`)
* **total-high** (`int`)
* **total-normal** (`int`)
* **total-low** (`int`)
:arg dict new-thresholds: If the specified number of new warnings exceeds
one of these thresholds then a build is considered as unstable or
failed, respectively. (default '')
:new-thresholds:
* **unstable** (`dict`)
:unstable: * **new-all** (`int`)
* **new-high** (`int`)
* **new-normal** (`int`)
* **new-low** (`int`)
* **failed** (`dict`)
:failed: * **new-all** (`int`)
* **new-high** (`int`)
* **new-normal** (`int`)
* **new-high** (`int`)
:arg bool use-delta-for-new-warnings: If set then the number of new
warnings is calculated by subtracting the total number of warnings
of the current build from the reference build. This may lead to wrong
results if you have both fixed and new warnings in a build. If not set,
then the number of new warnings is calculated by an asymmetric set
difference of the warnings in the current and reference build. This
will find all new warnings even if the number of total warnings is
decreasing. However, sometimes false positives will be reported due
to minor changes in a warning (refactoring of variable of method
names, etc.) (default false)
:arg bool only-use-stable-builds-as-reference: The number of new warnings
will be calculated based on the last stable build, allowing reverts
of unstable builds where the number of warnings was decreased.
(default false)
:arg str default-encoding: Default encoding when parsing or showing files
Leave empty to use default encoding of platform (default '')
Example::
publishers:
- warnings:
console-log-parsers:
- FxCop
- CodeAnalysis
workspace-file-scanners:
- file-pattern: '**/*.out'
scanner: 'AcuCobol Compiler
- file-pattern: '**/*.warnings'
scanner: FxCop
files-to-include: '[a-zA-Z]\.java,[a-zA-Z]\.cpp'
files-to-ignore: '[a-zA-Z]\.html,[a-zA-Z]\.js'
run-always: true
detect-modules: true
resolve-relative-paths: true
health-threshold-high: 50
health-threshold-low: 25
health-priorities: high-and-normal
total-thresholds:
unstable:
total-all: 90
total-high: 90
total-normal: 40
total-low: 30
failed:
total-all: 100
total-high: 100
total-normal: 50
total-low: 40
new-thresholds:
unstable:
new-all: 100
new-high: 50
new-normal: 30
new-low: 10
failed:
new-all: 100
new-high: 60
new-normal: 50
new-low: 40
use-delta-for-new-warnings: true
only-use-stable-builds-as-reference: true
default-encoding: ISO-8859-9
"""
warnings = XML.SubElement(xml_parent,
'hudson.plugins.warnings.'
'WarningsPublisher')
console = XML.SubElement(warnings, 'consoleParsers')
for parser in data.get('console-log-parsers', []):
console_parser = XML.SubElement(console,
'hudson.plugins.warnings.'
'ConsoleParser')
XML.SubElement(console_parser, 'parserName').text = parser
workspace = XML.SubElement(warnings, 'parserConfigurations')
for wfs in data.get('workspace-file-scanners', []):
workspace_pattern = XML.SubElement(workspace,
'hudson.plugins.warnings.'
'ParserConfiguration')
XML.SubElement(workspace_pattern, 'pattern').text = \
wfs['file-pattern']
XML.SubElement(workspace_pattern, 'parserName').text = \
wfs['scanner']
warnings_to_include = data.get('files-to-include', '')
XML.SubElement(warnings, 'includePattern').text = warnings_to_include
warnings_to_ignore = data.get('files-to-ignore', '')
XML.SubElement(warnings, 'excludePattern').text = warnings_to_ignore
run_always = str(data.get('run-always', False)).lower()
XML.SubElement(warnings, 'canRunOnFailed').text = run_always
detect_modules = str(data.get('detect-modules', False)).lower()
XML.SubElement(warnings, 'shouldDetectModules').text = detect_modules
#Note the logic reversal (included here to match the GUI)
XML.SubElement(warnings, 'doNotResolveRelativePaths').text = \
str(not data.get('resolve-relative-paths', False)).lower()
health_threshold_high = str(data.get('health-threshold-high', ''))
XML.SubElement(warnings, 'healthy').text = health_threshold_high
health_threshold_low = str(data.get('health-threshold-low', ''))
XML.SubElement(warnings, 'unHealthy').text = health_threshold_low
prioritiesDict = {'priority-high': 'high',
'high-and-normal': 'normal',
'all-priorities': 'low'}
priority = data.get('health-priorities', 'all-priorities')
if priority not in prioritiesDict:
raise JenkinsJobsException("Health-Priority entered is not valid must "
"be one of: %s" %
",".join(prioritiesDict.keys()))
XML.SubElement(warnings, 'thresholdLimit').text = prioritiesDict[priority]
td = XML.SubElement(warnings, 'thresholds')
for base in ["total", "new"]:
thresholds = data.get("%s-thresholds" % base, {})
for status in ["unstable", "failed"]:
bystatus = thresholds.get(status, {})
for level in ["all", "high", "normal", "low"]:
val = str(bystatus.get("%s-%s" % (base, level), ''))
XML.SubElement(td, "%s%s%s" % (status,
base.capitalize(), level.capitalize())
).text = val
if data.get('new-thresholds'):
XML.SubElement(warnings, 'dontComputeNew').text = 'false'
delta = data.get('use-delta-for-new-warnings', False)
XML.SubElement(warnings, 'useDeltaValues').text = str(delta).lower()
use_stable_builds = data.get('only-use-stable-builds-as-reference',
False)
XML.SubElement(warnings, 'useStableBuildAsReference').text = str(
use_stable_builds).lower()
else:
XML.SubElement(warnings, 'dontComputeNew').text = 'true'
XML.SubElement(warnings, 'useStableBuildAsReference').text = 'false'
XML.SubElement(warnings, 'useDeltaValues').text = 'false'
encoding = data.get('default-encoding', '')
XML.SubElement(warnings, 'defaultEncoding').text = encoding
def sloccount(parser, xml_parent, data):
"""yaml: sloccount
Generates the trend report for SLOCCount
Requires the Jenkins `SLOCCount Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/SLOCCount+Plugin>`_
:arg str report-files: Setting that specifies the generated raw
SLOCCount report files.
Be sure not to include any non-report files into
this pattern. The report files must have been
generated by sloccount using the
"--wide --details" options.
(default: '\*\*/sloccount.sc')
:arg str charset: The character encoding to be used to read the SLOCCount
result files. (default: 'UTF-8')
Example::
publishers:
- sloccount:
report-files: sloccount.sc
charset: UTF-8
"""
top = XML.SubElement(xml_parent,
'hudson.plugins.sloccount.SloccountPublisher')
XML.SubElement(top, 'pattern').text = data.get('report-files',
'**/sloccount.sc')
XML.SubElement(top, 'encoding').text = data.get('charset', 'UTF-8')
def ircbot(parser, xml_parent, data):
"""yaml: ircbot
ircbot enables Jenkins to send build notifications via IRC and lets you
interact with Jenkins via an IRC bot.
Requires the Jenkins `IRC Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/IRC+Plugin>`_
:arg string strategy: When to send notifications
:strategy values:
* **all** always (default)
* **any-failure** on any failure_and_fixed
* **failure-and-fixed** on failure and fixes
* **new-failure-and-fixed** on new failure and fixes
* **statechange-only** only on state change
:arg bool notify-start: Whether to send notifications to channels when a
build starts
(default: false)
:arg bool notify-committers: Whether to send notifications to the users
that are suspected of having broken this build
(default: false)
:arg bool notify-culprits: Also send notifications to 'culprits' from
previous unstable/failed builds
(default: false)
:arg bool notify-upstream: Whether to send notifications to upstream
committers if no committers were found for a
broken build
(default: false)
:arg bool notify-fixers: Whether to send notifications to the users that
have fixed a broken build
(default: false)
:arg string message-type: Channel Notification Message.
:message-type values:
* **summary-scm** for summary and SCM changes (default)
* **summary** for summary only
* **summary-params** for summary and build parameters
* **summary-scm-fail** for summary, SCM changes, failures)
:arg list channels: list channels definitions
If empty, it takes channel from Jenkins configuration.
(default: empty)
WARNING: the IRC plugin requires the channel to be
configured in the system wide configuration or the jobs
will fail to emit notifications to the channel
:Channel: * **name** (`str`) Channel name
* **password** (`str`) Channel password (optional)
* **notify-only** (`bool`) Set to true if you want to
disallow bot commands (default: false)
:arg string matrix-notifier: notify for matrix projects
instant-messaging-plugin injects an additional
field in the configuration form whenever the
project is a multi-configuration project
:matrix-notifier values:
* **all**
* **only-configurations** (default)
* **only-parent**
Example::
publishers:
- ircbot:
strategy: all
notify-start: false
notify-committers: false
notify-culprits: false
notify-upstream: false
notify-fixers: false
message-type: summary-scm
channels:
- name: '#jenkins-channel1'
password: secrete
notify-only: false
- name: '#jenkins-channel2'
notify-only: true
matrix-notifier: only-configurations
"""
top = XML.SubElement(xml_parent, 'hudson.plugins.ircbot.IrcPublisher')
message_dict = {'summary-scm': 'DefaultBuildToChatNotifier',
'summary': 'SummaryOnlyBuildToChatNotifier',
'summary-params': 'BuildParametersBuildToChatNotifier',
'summary-scm-fail': 'PrintFailingTestsBuildToChatNotifier'}
message = data.get('message-type', 'summary-scm')
if message not in message_dict:
raise JenkinsJobsException("message-type entered is not valid, must "
"be one of: %s" %
", ".join(message_dict.keys()))
message = "hudson.plugins.im.build_notify." + message_dict.get(message)
XML.SubElement(top, 'buildToChatNotifier', attrib={'class': message})
strategy_dict = {'all': 'ALL',
'any-failure': 'ANY_FAILURE',
'failure-and-fixed': 'FAILURE_AND_FIXED',
'new-failure-and-fixed': 'NEW_FAILURE_AND_FIXED',
'statechange-only': 'STATECHANGE_ONLY'}
strategy = data.get('strategy', 'all')
if strategy not in strategy_dict:
raise JenkinsJobsException("strategy entered is not valid, must be "
"one of: %s" %
", ".join(strategy_dict.keys()))
XML.SubElement(top, 'strategy').text = strategy_dict.get(strategy)
targets = XML.SubElement(top, 'targets')
channels = data.get('channels', [])
for channel in channels:
sub = XML.SubElement(targets,
'hudson.plugins.im.GroupChatIMMessageTarget')
XML.SubElement(sub, 'name').text = channel.get('name')
XML.SubElement(sub, 'password').text = channel.get('password')
XML.SubElement(sub, 'notificationOnly').text = str(
channel.get('notify-only', False)).lower()
XML.SubElement(top, 'notifyOnBuildStart').text = str(
data.get('notify-start', False)).lower()
XML.SubElement(top, 'notifySuspects').text = str(
data.get('notify-committers', False)).lower()
XML.SubElement(top, 'notifyCulprits').text = str(
data.get('notify-culprits', False)).lower()
XML.SubElement(top, 'notifyFixers').text = str(
data.get('notify-fixers', False)).lower()
XML.SubElement(top, 'notifyUpstreamCommitters').text = str(
data.get('notify-upstream', False)).lower()
matrix_dict = {'all': 'ALL',
'only-configurations': 'ONLY_CONFIGURATIONS',
'only-parent': 'ONLY_PARENT'}
matrix = data.get('matrix-notifier', 'only_configurations')
if matrix not in matrix_dict:
raise JenkinsJobsException("matrix-notifier entered is not valid, "
"must be one of: %s" %
", ".join(matrix_dict.keys()))
XML.SubElement(top, 'matrixMultiplier').text = matrix_dict.get(matrix)
def plot(parser, xml_parent, data):
"""yaml: plot
Plot provides generic plotting (or graphing).
Requires the Jenkins `Plot Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Plot+Plugin>`_
:arg str title: title for the graph
(default: '')
:arg str yaxis: title of Y axis
:arg str group: name of the group to which the plot belongs
:arg int num-builds: number of builds to plot across
(default: plot all builds)
:arg str style: Specifies the graph style of the plot
Can be: area, bar, bar3d, line, line3d, stackedArea,
stackedbar, stackedbar3d, waterfall
(default: 'line')
:arg bool use-description: When false, the X-axis labels are formed
using build numbers and dates, and the
corresponding tooltips contain the build
descriptions. When enabled, the contents of
the labels and tooltips are swapped, with the
descriptions used as X-axis labels and the
build number and date used for tooltips.
(default: False)
:arg str csv-file-name: Use for choosing the file name in which the data
will be persisted. If none specified and random
name is generated as done in the Jenkins Plot
plugin.
(default: random generated .csv filename, same
behaviour as the Jenkins Plot plugin)
:arg list series: list data series definitions
:Serie: * **file** (`str`) : files to include
* **inclusion-flag** filtering mode for CSV files. Possible
values are:
* **off** (default)
* **include-by-string**
* **exclude-by-string**
* **include-by-column**
* **exclude-by-column**
* **exclude** (`str`) : exclude pattern for CSV file.
* **url** (`str`) : for 'csv' and 'xml' file types
used when you click on a point (default: empty)
* **display-table** (`bool`) : for 'csv' file type
if true, original CSV will be shown above plot (default: False)
* **label** (`str`) : used by 'properties' file type
Specifies the legend label for this data series.
(default: empty)
* **format** (`str`) : Type of file where we get datas.
Can be: properties, csv, xml
* **xpath-type** (`str`) : The result type of the expression must
be supplied due to limitations in the java.xml.xpath parsing.
The result can be: node, nodeset, boolean, string, or number.
Strings and numbers will be converted to double. Boolean will
be converted to 1 for true, and 0 for false. (default: 'node')
* **xpath** (`str`) : used by 'xml' file type
Xpath which selects the values that should be plotted.
Example::
publishers:
- plot:
- title: MyPlot
yaxis: Y
group: PlotGroup
num-builds: ''
style: line
use-description: false
series:
- file: graph-me-second.properties
label: MyLabel
format: properties
- file: graph-me-first.csv
url: 'http://srv1'
inclusion-flag: 'off'
display-table: true
format: csv
- title: MyPlot2
yaxis: Y
group: PlotGroup
style: line
use-description: false
series:
- file: graph-me-third.xml
url: 'http://srv2'
format: xml
xpath-type: 'node'
xpath: '/*'
"""
top = XML.SubElement(xml_parent, 'hudson.plugins.plot.PlotPublisher')
plots = XML.SubElement(top, 'plots')
format_dict = {'properties': 'hudson.plugins.plot.PropertiesSeries',
'csv': 'hudson.plugins.plot.CSVSeries',
'xml': 'hudson.plugins.plot.XMLSeries'}
xpath_dict = {'nodeset': 'NODESET', 'node': 'NODE', 'string': 'STRING',
'boolean': 'BOOLEAN', 'number': 'NUMBER'}
inclusion_dict = {'off': 'OFF',
'include-by-string': 'INCLUDE_BY_STRING',
'exclude-by-string': 'EXCLUDE_BY_STRING',
'include-by-column': 'INCLUDE_BY_COLUMN',
'exclude-by-column': 'EXCLUDE_BY_COLUMN'}
for plot in data:
plugin = XML.SubElement(plots, 'hudson.plugins.plot.Plot')
XML.SubElement(plugin, 'title').text = plot.get('title', '')
XML.SubElement(plugin, 'yaxis').text = plot['yaxis']
XML.SubElement(plugin, 'csvFileName').text = \
plot.get('csv-file-name', '%s.csv' % random.randrange(2 << 32))
topseries = XML.SubElement(plugin, 'series')
series = plot['series']
for serie in series:
format_data = serie.get('format')
if format_data not in format_dict:
raise JenkinsJobsException("format entered is not valid, must "
"be one of: %s" %
" , ".join(format_dict.keys()))
subserie = XML.SubElement(topseries, format_dict.get(format_data))
XML.SubElement(subserie, 'file').text = serie.get('file')
if format_data == 'properties':
XML.SubElement(subserie, 'label').text = serie.get('label', '')
if format_data == 'csv':
inclusion_flag = serie.get('inclusion-flag', 'off')
if inclusion_flag not in inclusion_dict:
raise JenkinsJobsException("Inclusion flag result entered "
"is not valid, must be one of: "
"%s"
% ", ".join(inclusion_dict))
XML.SubElement(subserie, 'inclusionFlag').text = \
inclusion_dict.get(inclusion_flag)
XML.SubElement(subserie, 'exclusionValues').text = \
serie.get('exclude', '')
XML.SubElement(subserie, 'url').text = serie.get('url', '')
XML.SubElement(subserie, 'displayTableFlag').text = \
str(plot.get('display-table', False)).lower()
if format_data == 'xml':
XML.SubElement(subserie, 'url').text = serie.get('url', '')
XML.SubElement(subserie, 'xpathString').text = \
serie.get('xpath')
xpathtype = serie.get('xpath-type', 'node')
if xpathtype not in xpath_dict:
raise JenkinsJobsException("XPath result entered is not "
"valid, must be one of: %s" %
", ".join(xpath_dict))
XML.SubElement(subserie, 'nodeTypeString').text = \
xpath_dict.get(xpathtype)
XML.SubElement(subserie, 'fileType').text = serie.get('format')
XML.SubElement(plugin, 'group').text = plot['group']
XML.SubElement(plugin, 'useDescr').text = \
str(plot.get('use-description', False)).lower()
XML.SubElement(plugin, 'numBuilds').text = plot.get('num-builds', '')
style_list = ['area', 'bar', 'bar3d', 'line', 'line3d', 'stackedArea',
'stackedbar', 'stackedbar3d', 'waterfall']
style = plot.get('style', 'line')
if style not in style_list:
raise JenkinsJobsException("style entered is not valid, must be "
"one of: %s" % ", ".join(style_list))
XML.SubElement(plugin, 'style').text = style
def git(parser, xml_parent, data):
"""yaml: git
This plugin will configure the Jenkins Git plugin to
push merge results, tags, and/or branches to
remote repositories after the job completes.
Requires the Jenkins `Git Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Git+Plugin>`_
:arg bool push-merge: push merges back to the origin specified in the
pre-build merge options (Default: False)
:arg bool push-only-if-success: Only push to remotes if the build succeeds
- otherwise, nothing will be pushed.
(Default: True)
:arg list tags: tags to push at the completion of the build
:tag: * **remote** (`str`) remote repo name to push to
(Default: 'origin')
* **name** (`str`) name of tag to push
* **message** (`str`) message content of the tag
* **create-tag** (`bool`) whether or not to create the tag
after the build, if this is False then the tag needs to
exist locally (Default: False)
* **update-tag** (`bool`) whether to overwrite a remote tag
or not (Default: False)
:arg list branches: branches to push at the completion of the build
:branch: * **remote** (`str`) remote repo name to push to
(Default: 'origin')
* **name** (`str`) name of remote branch to push to
:arg list notes: notes to push at the completion of the build
:note: * **remote** (`str`) remote repo name to push to
(Default: 'origin')
* **message** (`str`) content of the note
* **namespace** (`str`) namespace of the note
(Default: master)
* **replace-note** (`bool`) whether to overwrite a note or not
(Default: False)
Example::
publishers:
- git:
push-merge: true
push-only-if-success: false
tags:
- tag:
remote: tagremotename
name: tagname
message: "some tag message"
create-tag: true
update-tag: true
branches:
- branch:
remote: branchremotename
name: "some/branch"
notes:
- note:
remote: remotename
message: "some note to push"
namespace: commits
replace-note: true
"""
mappings = [('push-merge', 'pushMerge', False),
('push-only-if-success', 'pushOnlyIfSuccess', True)]
tag_mappings = [('remote', 'targetRepoName', 'origin'),
('name', 'tagName', None),
('message', 'tagMessage', ''),
('create-tag', 'createTag', False),
('update-tag', 'updateTag', False)]
branch_mappings = [('remote', 'targetRepoName', 'origin'),
('name', 'branchName', None)]
note_mappings = [('remote', 'targetRepoName', 'origin'),
('message', 'noteMsg', None),
('namespace', 'noteNamespace', 'master'),
('replace-note', 'noteReplace', False)]
def handle_entity_children(entity, entity_xml, child_mapping):
for prop in child_mapping:
opt, xmlopt, default_val = prop[:3]
val = entity.get(opt, default_val)
if val is None:
raise JenkinsJobsException('Required option missing: %s' % opt)
if type(val) == bool:
val = str(val).lower()
XML.SubElement(entity_xml, xmlopt).text = val
top = XML.SubElement(xml_parent, 'hudson.plugins.git.GitPublisher')
XML.SubElement(top, 'configVersion').text = '2'
handle_entity_children(data, top, mappings)
tags = data.get('tags', [])
if tags:
xml_tags = XML.SubElement(top, 'tagsToPush')
for tag in tags:
xml_tag = XML.SubElement(
xml_tags,
'hudson.plugins.git.GitPublisher_-TagToPush')
handle_entity_children(tag['tag'], xml_tag, tag_mappings)
branches = data.get('branches', [])
if branches:
xml_branches = XML.SubElement(top, 'branchesToPush')
for branch in branches:
xml_branch = XML.SubElement(
xml_branches,
'hudson.plugins.git.GitPublisher_-BranchToPush')
handle_entity_children(branch['branch'], xml_branch,
branch_mappings)
notes = data.get('notes', [])
if notes:
xml_notes = XML.SubElement(top, 'notesToPush')
for note in notes:
xml_note = XML.SubElement(
xml_notes,
'hudson.plugins.git.GitPublisher_-NoteToPush')
handle_entity_children(note['note'], xml_note, note_mappings)
def github_notifier(parser, xml_parent, data):
"""yaml: github-notifier
Set build status on Github commit.
Requires the Jenkins `Github Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/GitHub+Plugin>`_
Example:
.. literalinclude:: /../../tests/publishers/fixtures/github-notifier.yaml
"""
XML.SubElement(xml_parent,
'com.cloudbees.jenkins.GitHubCommitNotifier')
def build_publisher(parser, xml_parent, data):
"""yaml: build-publisher
This plugin allows records from one Jenkins to be published
on another Jenkins.
Requires the Jenkins `Build Publisher Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Build+Publisher+Plugin>`_
:arg str servers: Specify the servers where to publish
Example::
publishers:
- build-publisher:
name: servername
publish-unstable-builds: true
publish-failed-builds: true
days-to-keep: -1
num-to-keep: -1
artifact-days-to-keep: -1
artifact-num-to-keep: -1
"""
reporter = XML.SubElement(
xml_parent,
'hudson.plugins.build__publisher.BuildPublisher')
XML.SubElement(reporter, 'serverName').text = data['name']
XML.SubElement(reporter, 'publishUnstableBuilds').text = \
str(data.get('publish-unstable-builds', True)).lower()
XML.SubElement(reporter, 'publishFailedBuilds').text = \
str(data.get('publish-failed-builds', True)).lower()
logrotator = XML.SubElement(reporter, 'logRotator')
XML.SubElement(logrotator, 'daysToKeep').text = \
str(data.get('days-to-keep', -1))
XML.SubElement(logrotator, 'numToKeep').text = \
str(data.get('num-to-keep', -1))
XML.SubElement(logrotator, 'artifactDaysToKeep').text = \
str(data.get('artifact-days-to-keep', -1))
XML.SubElement(logrotator, 'artifactNumToKeep').text = \
str(data.get('artifact-num-to-keep', -1))
def stash(parser, xml_parent, data):
"""yaml: stash
This plugin will configure the Jenkins Stash Notifier plugin to
notify Atlassian Stash after job completes.
Requires the Jenkins `StashNotifier Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/StashNotifier+Plugin>`_
:arg string url: Base url of Stash Server (Default: "")
:arg string username: Username of Stash Server (Default: "")
:arg string password: Password of Stash Server (Default: "")
:arg bool ignore-ssl: Ignore unverified SSL certificate (Default: False)
:arg string commit-sha1: Commit SHA1 to notify (Default: "")
:arg bool include-build-number: Include build number in key
(Default: False)
Example:
.. literalinclude:: /../../tests/publishers/fixtures/stash001.yaml
"""
top = XML.SubElement(xml_parent,
'org.jenkinsci.plugins.stashNotifier.StashNotifier')
XML.SubElement(top, 'stashServerBaseUrl').text = data.get('url', '')
XML.SubElement(top, 'stashUserName').text = data.get('username', '')
XML.SubElement(top, 'stashUserPassword').text = data.get('password', '')
XML.SubElement(top, 'ignoreUnverifiedSSLPeer').text = str(
data.get('ignore-ssl', False)).lower()
XML.SubElement(top, 'commitSha1').text = data.get('commit-sha1', '')
XML.SubElement(top, 'includeBuildNumberInKey').text = str(
data.get('include-build-number', False)).lower()
def description_setter(parser, xml_parent, data):
"""yaml: description-setter
This plugin sets the description for each build,
based upon a RegEx test of the build log file.
Requires the Jenkins `Description Setter Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Description+Setter+Plugin>`_
:arg str regexp: A RegEx which is used to scan the build log file
:arg str regexp-for-failed: A RegEx which is used for failed builds
(optional)
:arg str description: The description to set on the build (optional)
:arg str description-for-failed: The description to set on
the failed builds (optional)
:arg bool set-for-matrix: Also set the description on
a multi-configuration build (Default False)
Example:
.. literalinclude::
/../../tests/publishers/fixtures/description-setter001.yaml
"""
descriptionsetter = XML.SubElement(
xml_parent,
'hudson.plugins.descriptionsetter.DescriptionSetterPublisher')
XML.SubElement(descriptionsetter, 'regexp').text = data.get('regexp', '')
XML.SubElement(descriptionsetter, 'regexpForFailed').text = \
data.get('regexp-for-failed', '')
if 'description' in data:
XML.SubElement(descriptionsetter, 'description').text = \
data['description']
if 'description-for-failed' in data:
XML.SubElement(descriptionsetter, 'descriptionForFailed').text = \
data['description-for-failed']
for_matrix = str(data.get('set-for-matrix', False)).lower()
XML.SubElement(descriptionsetter, 'setForMatrix').text = for_matrix
def sitemonitor(parser, xml_parent, data):
"""yaml: sitemonitor
This plugin checks the availability of an url.
It requires the `sitemonitor plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/SiteMonitor+Plugin>`_
:arg list sites: List of URLs to check
Example:
.. literalinclude:: /../../tests/publishers/fixtures/sitemonitor001.yaml
"""
mon = XML.SubElement(xml_parent,
'hudson.plugins.sitemonitor.SiteMonitorRecorder')
if data.get('sites'):
sites = XML.SubElement(mon, 'mSites')
for siteurl in data.get('sites'):
site = XML.SubElement(sites,
'hudson.plugins.sitemonitor.model.Site')
XML.SubElement(site, 'mUrl').text = siteurl['url']
def testng(parser, xml_parent, data):
"""yaml: testng
This plugin publishes TestNG test reports.
Requires the Jenkins `TestNG Results Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/testng-plugin>`_
:arg str pattern: filename pattern to locate the TestNG XML report files
:arg bool escape-test-description: escapes the description string
associated with the test method while displaying test method details
(Default True)
:arg bool escape-exception-msg: escapes the test method's exception
messages. (Default True)
Example::
.. literalinclude::
/../../tests/publishers/fixtures/testng001.yaml
"""
reporter = XML.SubElement(xml_parent, 'hudson.plugins.testng.Publisher')
if not data['pattern']:
raise JenkinsJobsException("A filename pattern must be specified.")
XML.SubElement(reporter, 'reportFilenamePattern').text = data['pattern']
XML.SubElement(reporter, 'escapeTestDescp').text = str(data.get(
'escape-test-description', True))
XML.SubElement(reporter, 'escapeExceptionMsg').text = str(data.get(
'escape-exception-msg', True))
def artifact_deployer(parser, xml_parent, data):
"""yaml: artifact-deployer
This plugin makes it possible to copy artifacts to remote locations.
Requires the Jenkins `ArtifactDeployer Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/ArtifactDeployer+Plugin>`_
:arg list entries:
:entries:
* **files** (`str`) - files to deploy
* **basedir** (`str`) - the dir from files are deployed
* **excludes** (`str`) - the mask to exclude files
* **remote** (`str`) - a remote output directory
* **flatten** (`bool`) - ignore the source directory structure
(Default: False)
* **delete-remote** (`bool`) - clean-up remote directory
before deployment (Default: False)
* **delete-remote-artifacts** (`bool`) - delete remote artifacts
when the build is deleted (Default: False)
* **fail-no-files** (`bool`) - fail build if there are no files
(Default: False)
* **groovy-script** (`str`) - execute a Groovy script
before a build is deleted
:arg bool deploy-if-fail: Deploy if the build is failed (Default: False)
Example:
.. literalinclude:: /../../tests/publishers/fixtures/artifact-dep.yaml
"""
deployer = XML.SubElement(xml_parent,
'org.jenkinsci.plugins.artifactdeployer.'
'ArtifactDeployerPublisher')
if data is None or 'entries' not in data:
raise Exception('entries field is missing')
elif data.get('entries', None) is None:
entries = XML.SubElement(deployer, 'entries', {'class': 'empty-list'})
else:
entries = XML.SubElement(deployer, 'entries')
for entry in data.get('entries'):
deployer_entry = XML.SubElement(
entries,
'org.jenkinsci.plugins.artifactdeployer.ArtifactDeployerEntry')
XML.SubElement(deployer_entry, 'includes').text = \
entry.get('files')
XML.SubElement(deployer_entry, 'basedir').text = \
entry.get('basedir')
XML.SubElement(deployer_entry, 'excludes').text = \
entry.get('excludes')
XML.SubElement(deployer_entry, 'remote').text = entry.get('remote')
XML.SubElement(deployer_entry, 'flatten').text = \
str(entry.get('flatten', False)).lower()
XML.SubElement(deployer_entry, 'deleteRemote').text = \
str(entry.get('delete-remote', False)).lower()
XML.SubElement(deployer_entry, 'deleteRemoteArtifacts').text = \
str(entry.get('delete-remote-artifacts', False)).lower()
XML.SubElement(deployer_entry, 'failNoFilesDeploy').text = \
str(entry.get('fail-no-files', False)).lower()
XML.SubElement(deployer_entry, 'groovyExpression').text = \
entry.get('groovy-script')
deploy_if_fail = str(data.get('deploy-if-fail', False)).lower()
XML.SubElement(deployer, 'deployEvenBuildFail').text = deploy_if_fail
class Publishers(jenkins_jobs.modules.base.Base):
sequence = 70
component_type = 'publisher'
component_list_type = 'publishers'
def gen_xml(self, parser, xml_parent, data):
publishers = XML.SubElement(xml_parent, 'publishers')
for action in data.get('publishers', []):
self.registry.dispatch('publisher', parser, publishers, action)
| apache-2.0 | -4,559,542,569,043,464,700 | 40.622922 | 79 | 0.600895 | false |
joshwalawender/POCS | pocs/focuser/focuser.py | 1 | 16806 | from .. import PanBase
from ..utils import images
from ..utils import current_time
import matplotlib
matplotlib.use('AGG')
import matplotlib.pyplot as plt
from astropy.modeling import models, fitting
import numpy as np
import os
from threading import Event, Thread
class AbstractFocuser(PanBase):
"""
Base class for all focusers
"""
def __init__(self,
name='Generic Focuser',
model='simulator',
port=None,
camera=None,
initial_position=None,
autofocus_range=None,
autofocus_step=None,
autofocus_seconds=None,
autofocus_size=None,
autofocus_merit_function=None,
autofocus_merit_function_kwargs=None,
*args, **kwargs):
super().__init__(*args, **kwargs)
self.model = model
self.port = port
self.name = name
self._connected = False
self._serial_number = 'XXXXXX'
self._position = initial_position
if autofocus_range:
self.autofocus_range = (int(autofocus_range[0]), int(autofocus_range[1]))
else:
self.autofocus_range = None
if autofocus_step:
self.autofocus_step = (int(autofocus_step[0]), int(autofocus_step[1]))
else:
self.autofocus_step = None
self.autofocus_seconds = autofocus_seconds
self.autofocus_size = autofocus_size
self.autofocus_merit_function = autofocus_merit_function
self.autofocus_merit_function_kwargs = autofocus_merit_function_kwargs
self._camera = camera
self.logger.debug('Focuser created: {} on {}'.format(self.name, self.port))
##################################################################################################
# Properties
##################################################################################################
@property
def uid(self):
""" A serial number for the focuser """
return self._serial_number
@property
def is_connected(self):
""" Is the focuser available """
return self._connected
@property
def position(self):
""" Current encoder position of the focuser """
return self._position
@position.setter
def position(self, position):
""" Move focusser to new encoder position """
self.move_to(position)
@property
def camera(self):
"""
Reference to the Camera object that the Focuser is assigned to, if any. A Focuser
should only ever be assigned to one or zero Cameras!
"""
return self._camera
@camera.setter
def camera(self, camera):
if self._camera:
self.logger.warning("{} already assigned to camera {}, skipping attempted assignment to {}!".format(
self, self.camera, camera))
else:
self._camera = camera
@property
def min_position(self):
""" Get position of close limit of focus travel, in encoder units """
raise NotImplementedError
@property
def max_position(self):
""" Get position of far limit of focus travel, in encoder units """
raise NotImplementedError
##################################################################################################
# Methods
##################################################################################################
def move_to(self, position):
""" Move focusser to new encoder position """
raise NotImplementedError
def move_by(self, increment):
""" Move focusser by a given amount """
raise NotImplementedError
def autofocus(self,
seconds=None,
focus_range=None,
focus_step=None,
thumbnail_size=None,
merit_function=None,
merit_function_kwargs=None,
coarse=False,
plots=True,
blocking=False,
*args, **kwargs):
"""
Focuses the camera using the specified merit function. Optionally performs a coarse focus first before
performing the default fine focus. The expectation is that coarse focus will only be required for first use
of a optic to establish the approximate position of infinity focus and after updating the intial focus
position in the config only fine focus will be required.
Args:
seconds (scalar, optional): Exposure time for focus exposures, if not specified will use value from config
focus_range (2-tuple, optional): Coarse & fine focus sweep range, in encoder units. Specify to override
values from config
focus_step (2-tuple, optional): Coarse & fine focus sweep steps, in encoder units. Specofy to override
values from config
thumbnail_size (int, optional): Size of square central region of image to use, default 500 x 500 pixels
merit_function (str/callable, optional): Merit function to use as a focus metric, default vollath_F4
merit_function_kwargs (dict, optional): Dictionary of additional keyword arguments for the merit function
coarse (bool, optional): Whether to begin with coarse focusing, default False
plots (bool, optional: Whether to write focus plots to images folder, default True.
blocking (bool, optional): Whether to block until autofocus complete, default False
Returns:
threading.Event: Event that will be set when autofocusing is complete
"""
assert self._camera.is_connected, self.logger.error("Camera must be connected for autofocus!")
assert self.is_connected, self.logger.error("Focuser must be connected for autofocus!")
if not focus_range:
if self.autofocus_range:
focus_range = self.autofocus_range
else:
raise ValueError("No focus_range specified, aborting autofocus of {}!".format(self._camera))
if not focus_step:
if self.autofocus_step:
focus_step = self.autofocus_step
else:
raise ValueError("No focus_step specified, aborting autofocus of {}!".format(self._camera))
if not seconds:
if self.autofocus_seconds:
seconds = self.autofocus_seconds
else:
raise ValueError("No focus exposure time specified, aborting autofocus of {}!".format(self._camera))
if not thumbnail_size:
if self.autofocus_size:
thumbnail_size = self.autofocus_size
else:
raise ValueError("No focus thumbnail size specified, aborting autofocus of {}!".format(self._camera))
if not merit_function:
if self.autofocus_merit_function:
merit_function = self.autofocus_merit_function
else:
merit_function = 'vollath_F4'
if not merit_function_kwargs:
if self.autofocus_merit_function_kwargs:
merit_function_kwargs = self.autofocus_merit_function_kwargs
else:
merit_function_kwargs = {}
if coarse:
coarse_event = Event()
coarse_thread = Thread(target=self._autofocus,
args=args,
kwargs={'seconds': seconds,
'focus_range': focus_range,
'focus_step': focus_step,
'thumbnail_size': thumbnail_size,
'merit_function': merit_function,
'merit_function_kwargs': merit_function_kwargs,
'coarse': coarse,
'plots': plots,
'start_event': None,
'finished_event': coarse_event,
**kwargs})
coarse_thread.start()
else:
coarse_event = None
fine_event = Event()
fine_thread = Thread(target=self._autofocus,
args=args,
kwargs={'seconds': seconds,
'focus_range': focus_range,
'focus_step': focus_step,
'thumbnail_size': thumbnail_size,
'merit_function': merit_function,
'merit_function_kwargs': merit_function_kwargs,
'coarse': coarse,
'plots': plots,
'start_event': coarse_event,
'finished_event': fine_event,
**kwargs})
fine_thread.start()
if blocking:
fine_event.wait()
return fine_event
def _autofocus(self, seconds, focus_range, focus_step, thumbnail_size, merit_function,
merit_function_kwargs, coarse, plots, start_event, finished_event, *args, **kwargs):
# If passed a start_event wait until Event is set before proceeding (e.g. wait for coarse focus
# to finish before starting fine focus).
if start_event:
start_event.wait()
initial_focus = self.position
if coarse:
self.logger.debug("Beginning coarse autofocus of {} - initial focus position: {}".format(self._camera,
initial_focus))
else:
self.logger.debug("Beginning autofocus of {} - initial focus position: {}".format(self._camera,
initial_focus))
# Set up paths for temporary focus files, and plots if requested.
image_dir = self.config['directories']['images']
start_time = current_time(flatten=True)
file_path = "{}/{}/{}/{}.{}".format(image_dir,
'focus',
self._camera.uid,
start_time,
self._camera.file_extension)
if plots:
# Take an image before focusing, grab a thumbnail from the centre and add it to the plot
thumbnail = self._camera.get_thumbnail(seconds, file_path, thumbnail_size)
fig = plt.figure(figsize=(9, 18), tight_layout=True)
ax1 = fig.add_subplot(3, 1, 1)
im1 = ax1.imshow(thumbnail, interpolation='none', cmap='cubehelix')
fig.colorbar(im1)
ax1.set_title('Initial focus position: {}'.format(initial_focus))
# Set up encoder positions for autofocus sweep, truncating at focus travel limits if required.
if coarse:
focus_range = focus_range[1]
focus_step = focus_step[1]
else:
focus_range = focus_range[0]
focus_step = focus_step[0]
focus_positions = np.arange(max(initial_focus - focus_range / 2, self.min_position),
min(initial_focus + focus_range / 2, self.max_position) + 1,
focus_step, dtype=np.int)
n_positions = len(focus_positions)
metric = np.empty((n_positions))
for i, position in enumerate(focus_positions):
# Move focus, updating focus_positions with actual encoder position after move.
focus_positions[i] = self.move_to(position)
# Take exposure
thumbnail = self._camera.get_thumbnail(seconds, file_path, thumbnail_size)
# Calculate Vollath F4 focus metric
metric[i] = images.focus_metric(thumbnail, merit_function, **merit_function_kwargs)
self.logger.debug("Focus metric at position {}: {}".format(position, metric[i]))
# Find maximum values
imax = metric.argmax()
if imax == 0 or imax == (n_positions - 1):
# TODO: have this automatically switch to coarse focus mode if this happens
self.logger.warning("Best focus outside sweep range, aborting autofocus on {}!".format(self._camera))
best_focus = focus_positions[imax]
elif not coarse:
# Fit to data around the max value to determine best focus position. Lorentz function seems to fit OK
# provided you only fit in the immediate vicinity of the max value.
# Initialise models
fit = models.Lorentz1D(x_0=focus_positions[imax], amplitude=metric.max())
# Initialise fitter
fitter = fitting.LevMarLSQFitter()
# Select data range for fitting. Tries to use 2 points either side of max, if in range.
fitting_indices = (max(imax - 2, 0), min(imax + 2, n_positions - 1))
# Fit models to data
fit = fitter(fit,
focus_positions[fitting_indices[0]:fitting_indices[1] + 1],
metric[fitting_indices[0]:fitting_indices[1] + 1])
best_focus = fit.x_0.value
# Guard against fitting failures, force best focus to stay within sweep range
if best_focus < focus_positions[0]:
self.logger.warning("Fitting failure: best focus {} below sweep limit {}".format(best_focus,
focus_positions[0]))
best_focus = focus_positions[0]
if best_focus > focus_positions[-1]:
self.logger.warning("Fitting failure: best focus {} above sweep limit {}".format(best_focus,
focus_positions[-1]))
best_focus = focus_positions[-1]
else:
# Coarse focus, just use max value.
best_focus = focus_positions[imax]
if plots:
ax2 = fig.add_subplot(3, 1, 2)
ax2.plot(focus_positions, metric, 'bo', label='{}'.format(merit_function))
if not (imax == 0 or imax == (n_positions - 1)) and not coarse:
fs = np.arange(focus_positions[fitting_indices[0]], focus_positions[fitting_indices[1]] + 1)
ax2.plot(fs, fit(fs), 'b-', label='Lorentzian fit')
ax2.set_xlim(focus_positions[0] - focus_step / 2, focus_positions[-1] + focus_step / 2)
u_limit = 1.10 * metric.max()
l_limit = min(0.95 * metric.min(), 1.05 * metric.min())
ax2.set_ylim(l_limit, u_limit)
ax2.vlines(initial_focus, l_limit, u_limit, colors='k', linestyles=':',
label='Initial focus')
ax2.vlines(best_focus, l_limit, u_limit, colors='k', linestyles='--',
label='Best focus')
ax2.set_xlabel('Focus position')
ax2.set_ylabel('Focus metric')
if coarse:
ax2.set_title('{} coarse focus at {}'.format(self._camera, start_time))
else:
ax2.set_title('{} fine focus at {}'.format(self._camera, start_time))
ax2.legend(loc='best')
final_focus = self.move_to(best_focus)
if plots:
thumbnail = self._camera.get_thumbnail(seconds, file_path, thumbnail_size)
ax3 = fig.add_subplot(3, 1, 3)
im3 = ax3.imshow(thumbnail, interpolation='none', cmap='cubehelix')
fig.colorbar(im3)
ax3.set_title('Final focus position: {}'.format(final_focus))
plot_path = os.path.splitext(file_path)[0] + '.png'
fig.savefig(plot_path)
plt.close(fig)
if coarse:
self.logger.info('Coarse focus plot for camera {} written to {}'.format(self._camera, plot_path))
else:
self.logger.info('Fine focus plot for camera {} written to {}'.format(self._camera, plot_path))
self.logger.debug('Autofocus of {} complete - final focus position: {}'.format(self._camera, final_focus))
if finished_event:
finished_event.set()
return initial_focus, final_focus
def __str__(self):
return "{} ({}) on {}".format(self.name, self.uid, self.port)
| mit | -576,820,583,751,011,800 | 41.872449 | 118 | 0.529751 | false |
danielrenechaparro/Python | Clases.py | 1 | 4406 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Clases.py
#
# Copyright 2016 Daniel Rene <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
#Clase normal, sin herencia
class MyClase:
"""Esto es la documentacion de la clase"""
#Contructor de la clase, inicializa los atributos, siempre debe tener self
def __init__(self, valInicial):
self.valInicial = valInicial
print("Inicialice la variable en el constructor")
#Todos los metodos de la clase, siempre deben tener como primer parametro self
def metodoNormal(self):
"Estos metodos pueden llevar documentacion"
print("El valor inicial es: " + self.valInicial)
def __del__(self):
print("destructor")
def __str__(self):
return "metodo equivalente a toString"
#metodo llamado cuando se intenta comparar este objeto con otro: <, <=, >, >=
def __cmp__(self, otroObjetoAComparar) :
if otroObjetoAComparar > 0: #la comparacion con el objeto pasado dio q ese objeto es mayor q este, retorna num positivo
return 1
if otroObjetoAComparar < 0: #la comparacion con el objeto pasado dio q ese objeto es menor q este, retorna num negativo
return -1
else:
return 0 #la comparacion con el objeto pasado dio q son iguales, retorna cero
def __len__(self):
return 777
# Herencia
class ClaseDerivada1(MyClase):
"Esta clase hereda y no crea ni modifica nada del padre"
pass
class ClaseDerivada2(MyClase):
"En esta clase si se modifica el init, haciendo uso del init de la super clase, pero agregandole otras cosas"
def __init__(self, valInicial):
MyClase.__init__(self, valInicial)
print("Inicializado desde la clase hija")
class Clase2:
"Esta clase sive xa mostrar la herencia multiple"
def __init__(self, valInicial):
self.valInicial = valInicial
print("Cargue el valor desde la clase2")
def metodo2(self, mensaje):
print("este es el otro mensaje: "+mensaje)
#definiendo los getters y setters
def getValInicial(self):
return self.valInicial
def setValInicial(self, valInicial):
self.valInicial = valInicial
class ClaseDerivada3(MyClase, Clase2):
"Esta clase hereda de MyClase y de Clase2, si las clases tienen metodos q se llaman igual y reciben los mismos parametros, se sobre escriben, prevaleciendo los metodos de la clase mas a la izquierda, en este caso MyClase"
def metodoPublico(self):
print("Publico")
def __metodoPrivado(self):
print("Privado")
#otra forma de definir los getters y setter como una propiedad del atributo
class Clase3:
def setDia(self, dia):
if dia > 0 and dia < 32:
self.__dia = dia
else:
print("error")
def getDia(self):
return self.__dia
dia = property(getDia, setDia)
def main():
objeto = ClaseDerivada3("cadena de texto")
objeto.metodoNormal()
objeto.metodo2("Kenicito")
objeto.metodoPublico()
#objeto.__metodoPrivado()
#En realidad cuando un metodo inicial con __ el interprete lo renombra con el nombre de la clase, x lo cual xa poder acceder a este se debe usar el nombre de la clase y el del metodo
objeto._ClaseDerivada3__metodoPrivado()
print("El valor pasado a la clase fue: "+objeto.getValInicial())
p = Clase3()
p.dia = 33
print(p.dia)
q = MyClase(10)
print("Longitud: ")
print(len(q))
print("comparacion: ")
print(cmp(q,10))
print(q)
cadenaLarga = ("se puede partir una cadena"
"en varias lineas sin problema"
"gracias al uso de los parentesis, corcheteso llaves")
cadenaLarga2 = """con el uso de 3 comillas
se pueden partir las cadenas
sin problemas"""
print(cadenaLarga2)
return 0
#esta sentencia hace que el interprete sepa por donde comenzar el programa
if __name__ == '__main__':
main()
| gpl-3.0 | 7,002,834,793,237,624,000 | 28.178808 | 222 | 0.715161 | false |
MartinPyka/Parametric-Anatomical-Modeling | pam/connection_mapping.py | 1 | 19453 | """This module contains the functions and classes needed for mapping points between layers"""
import logging
import mathutils
import random
import numpy
from .mesh import *
from .constants import *
logger = logging.getLogger(__package__)
def computePoint(v1, v2, v3, v4, x1, x2):
"""Interpolates point on a quad
:param v1, v2, v3, v4: Vertices of the quad
:type v1, v2, v3, v4: mathutils.Vector
:param x1, x2: The interpolation values
:type x1, x2: float [0..1]"""
mv12_co = v1.co * x1 + v2.co * (1 - x1)
mv34_co = v3.co * (1 - x1) + v4.co * x1
mv_co = mv12_co * x2 + mv34_co * (1 - x2)
return mv_co
def selectRandomPoint(obj):
"""Selects a random point on the mesh of an object
:param obj: The object from which to select
:type obj: bpy.types.Object"""
# select a random polygon
p_select = random.random() * obj['area_sum']
polygon = obj.data.polygons[
numpy.nonzero(numpy.array(obj['area_cumsum']) > p_select)[0][0]]
# define position on the polygon
vert_inds = polygon.vertices[:]
poi = computePoint(obj.data.vertices[vert_inds[0]],
obj.data.vertices[vert_inds[1]],
obj.data.vertices[vert_inds[2]],
obj.data.vertices[vert_inds[3]],
random.random(), random.random())
p, n, f = obj.closest_point_on_mesh(poi)
return p, n, f
class MappingException(Exception):
def __init__(self):
pass
def __str__(self):
return "MappingException"
class Mapping():
"""Based on a list of layers, connections-properties and distance-properties,
this class can compute the 3d-point, the 2d-uv-point and the distance from a given
point on the first layer to the corresponding point on the last layer
"""
def __init__(self, layers, connections, distances, debug = False):
""":param list layers: layers connecting the pre-synaptic layer with the synaptic layer
:param list connections: values determining the type of layer-mapping
:param list distances: values determining the calculation of the distances between layers
:param bool debug: if true, compute mapping returns a list of layers that it was able
to pass. Helps to debug the mapping-definitions in order to figure
our where exactly the mapping stops"""
self.layers = layers
self.connections = connections
self.distances = distances
self.debug = debug
self.initFunctions()
def initFunctions(self):
"""Initializes the function lists from the connections and distances lists.
Needs to be called after connections or distances have changed"""
self.mapping_functions = [connection_dict[i] for i in self.connections]
self.distance_functions = [distance_dict[self.connections[i]][self.distances[i]] for i in range(len(self.distances))]
self.distance_functions[-1] = distance_dict_syn[self.connections[-1]][self.distances[-1]]
def computeMapping(self, point):
"""Compute the mapping of a single point
:param mathutils.Vector point: vector for which the mapping should be calculated
Return values
-------------
p3d list of 3d-vector of the neuron position on all layers until the last
last position before the synapse. Note, that this might be before the
synapse layer!!! This depends on the distance-property.
p2d 2d-vector of the neuron position on the UV map of the last layer
d distance between neuron position on the first layer and last position before
the synapse! This is not the distance to the p3d point! This is either the
distance to the 3d-position of the last but one layer or, in case
euclidean-uv-distance was used, the distance to the position of the last
layer determind by euclidean-distance. Functions, like computeConnectivity()
add the distance to the synapse to value d in order to retrieve
the complete distance from the pre- or post-synaptic neuron
to the synapse
"""
self.p3d = [point]
for i in range(0, len(self.connections)):
layer = self.layers[i]
layer_next = self.layers[i + 1]
con_func = self.mapping_functions[i]
dis_func = self.distance_functions[i]
try:
p3d_n = con_func(self, layer, layer_next, dis_func)
except MappingException:
if self.debug:
return self.p3d, i, None
else:
return None, None, None
# for the synaptic layer, compute the uv-coordinates
p2d = layer_next.map3dPointToUV(p3d_n)
return self.p3d, p2d, compute_path_length(self.p3d)
"""
The following functions are the combinations of all distance and mapping types.
If you wish to add a new mapping or distance type, you will have to create a
function for each possible combination. If you add a mapping function, you also
have to add a function that computes the point on the next layer (p3d_n),
prefix "con_", and pass it down to the distance function. You also have to add
your functions to the lists below.
"""
"""Euclidean mapping"""
def con_euclid(self, layer, layer_next, dis_func):
p3d_n = layer_next.map3dPointTo3d(layer_next, self.p3d[-1])
dis_func(self, p3d_n, layer, layer_next)
return p3d_n
def euclid_dis_euclid(self, p3d_n, layer, layer_next):
self.p3d.append(p3d_n)
def euclid_dis_euclid_uv(self, p3d_n, layer, layer_next):
self.p3d.append(p3d_n)
def euclid_dis_jump_uv(self, p3d_n, layer, layer_next):
self.self, p3d.append(p3d_n)
def euclid_dis_uv_jump(self, p3d_n, layer, layer_next):
p3d_t = layer.map3dPointTo3d(layer, p3d_n)
self.p3d = self.p3d + layer.interpolateUVTrackIn3D(self.p3d[-1], p3d_t)
self.p3d.append(p3d_n)
def euclid_dis_normal_uv(self, p3d_n, layer, layer_next):
p, n, f = layer.closest_point_on_mesh(self.p3d[-1])
p3d_t = layer_next.map3dPointTo3d(layer_next, p, n)
if p3d_t is None:
raise MappingException()
self.p3d.append(p3d_t)
self.p3d = self.p3d + layer_next.interpolateUVTrackIn3D(p3d_t, p3d_n)
self.p3d.append(p3d_n)
def euclid_dis_uv_normal(self, p3d_n, layer, layer_next):
self.p3d.append(p3d_n)
# If before the synaptic layer
def euclid_dis_euclid_syn(self, p3d_n, layer, layer_next):
pass
def euclid_dis_euclid_uv_syn(self, p3d_n, layer, layer_next):
self.p3d.append(p3d_n)
def euclid_dis_jump_uv_syn(self, p3d_n, layer, layer_next):
self.p3d.append(p3d_n)
def euclid_dis_uv_jump_syn(self, p3d_n, layer, layer_next):
pass
def euclid_dis_normal_uv_syn(self, p3d_n, layer, layer_next):
p, n, f = layer.closest_point_on_mesh(self.p3d[-1])
p3d_t = layer_next.map3dPointTo3d(layer_next, p, n)
if p3d_t is None:
raise MappingException()
self.p3d.append(p3d_t)
def euclid_dis_uv_normal_syn(self, p3d_n, layer, layer_next):
pass
"""Normal Mapping"""
def con_normal(self, layer, layer_next, dis_func):
# compute normal on layer for the last point
p, n, f = layer.closest_point_on_mesh(self.p3d[-1])
# determine new point
p3d_n = layer_next.map3dPointTo3d(layer_next, p, n)
# if there is no intersection, abort
if p3d_n is None:
raise MappingException()
dis_func(self, p3d_n, layer, layer_next)
return p3d_n
def normal_dis_euclid(self, p3d_n, layer, layer_next):
self.p3d.append(p3d_n)
def normal_dis_euclid_uv(self, p3d_n, layer, layer_next):
self.p3d.append(p3d_n)
def normal_dis_jump_uv(self, p3d_n, layer, layer_next):
p3d_t = layer_next.map3dPointTo3d(layer_next, self.p3d[-1])
self.p3d.append(p3d_t)
self.p3d = self.p3d + layer_next.interpolateUVTrackIn3D(p3d_t, p3d_n)
self.p3d.append(p3d_n)
def normal_dis_uv_jump(self, p3d_n, layer, layer_next):
p3d_t = layer.map3dPointTo3d(layer, p3d_n)
self.p3d = self.p3d + layer.interpolateUVTrackIn3D(self.p3d[-1], p3d_t)
self.p3d.append(p3d_n)
def normal_dis_normal_uv(self, p3d_n, layer, layer_next):
self.p3d.append(p3d_n)
def normal_dis_uv_normal(self, p3d_n, layer, layer_next):
self.p3d.append(p3d_n)
def normal_dis_euclid_syn(self, p3d_n, layer, layer_next):
pass
def normal_dis_euclid_uv_syn(self, p3d_n, layer, layer_next):
self.p3d.append(p3d_n)
def normal_dis_jump_uv_syn(self, p3d_n, layer, layer_next):
p3d_t = layer_next.map3dPointTo3d(layer_next, self.p3d[-1])
self.p3d.append(p3d_t)
def normal_dis_uv_jump_syn(self, p3d_n, layer, layer_next):
pass
def normal_dis_normal_uv_syn(self, p3d_n, layer, layer_next):
self.p3d.append(p3d_n)
def normal_dis_uv_normal_syn(self, p3d_n, layer, layer_next):
pass
"""Random Mapping"""
def con_random(self, layer, layer_next, dis_func):
p3d_n, _, _ = selectRandomPoint(layer_next.obj)
dis_func(self, p3d_n, layer, layer_next)
return p3d_n
def random_dis_euclid(self, p3d_n, layer, layer_next):
self.p3d.append(p3d_n)
def random_dis_euclid_uv(self, p3d_n, layer, layer_next):
self.p3d.append(p3d_n)
def random_dis_jump_uv(self, p3d_n, layer, layer_next):
p3d_t = layer_next.map3dPointTo3d(layer_next, self.p3d[-1])
self.p3d.append(p3d_t)
self.p3d = self.p3d + layer_next.interpolateUVTrackIn3D(p3d_t, p3d_n)
self.p3d.append(p3d_n)
def random_dis_uv_jump(self, p3d_n, layer, layer_next):
p3d_t = layer.map3dPointTo3d(layer, p3d_n)
self.p3d = self.p3d + layer.interpolateUVTrackIn3D(self.p3d[-1], p3d_t)
self.p3d.append(p3d_n)
def random_dis_normal_uv(self, p3d_n, layer, layer_next):
p, n, f = layer.closest_point_on_mesh(self.p3d[-1])
p3d_t = layer_next.map3dPointTo3d(layer_next, p, n)
self.p3d.append(p3d_t)
self.p3d = self.p3d + layer_next.interpolateUVTrackIn3D(p3d_t, p3d_n)
self.p3d.append(p3d_n)
def random_dis_uv_normal(self, p3d_n, layer, layer_next):
p, n, f = layer_next.closest_point_on_mesh(p3d_n)
p3d_t = layer.map3dPointTo3d(layer, p, n)
if p3d_t is None:
raise MappingException()
self.p3d = self.p3d + layer.interpolateUVTrackIn3D(self.p3d[-1], p3d_t)
self.p3d.append(p3d_t)
self.p3d.append(p3d_n)
def random_dis_euclid_syn(self, p3d_n, layer, layer_next):
pass
def random_dis_euclid_uv_syn(self, p3d_n, layer, layer_next):
self.p3d.append(p3d_n)
def random_dis_jump_uv_syn(self, p3d_n, layer, layer_next):
p3d_t = layer_next.map3dPointTo3d(layer_next, self.p3d[-1])
self.p3d.append(p3d_t)
def random_dis_uv_jump_syn(self, p3d_n, layer, layer_next):
pass
def random_dis_normal_uv_syn(self, p3d_n, layer, layer_next):
p, n, f = layer.closest_point_on_mesh(self.p3d[-1])
# determine new point
p3d_t = layer_next.map3dPointTo3d(layer_next, p, n)
if p3d_t is None:
raise MappingException()
self.p3d.append(p3d_t)
def random_dis_uv_normal_syn(self, p3d_n, layer, layer_next):
pass
"""Topological mapping"""
def con_top(self, layer, layer_next, dis_func):
p3d_n = layer.map3dPointTo3d(layer_next, self.p3d[-1])
dis_func(self, p3d_n, layer, layer_next)
return p3d_n
def top_dis_euclid(self, p3d_n, layer, layer_next):
self.p3d.append(p3d_n)
def top_dis_euclid_uv(self, p3d_n, layer, layer_next):
self.p3d.append(p3d_n)
def top_dis_jump_uv(self, p3d_n, layer, layer_next):
p3d_t = layer_next.map3dPointTo3d(layer_next, self.p3d[-1])
self.p3d.append(p3d_t)
self.p3d = self.p3d + layer_next.interpolateUVTrackIn3D(p3d_t, p3d_n)
self.p3d.append(p3d_n)
def top_dis_uv_jump(self, p3d_n, layer, layer_next):
p3d_t = layer.map3dPointTo3d(layer, p3d_n)
self.p3d = self.p3d + layer.interpolateUVTrackIn3D(self.p3d[-1], p3d_t)
self.p3d.append(p3d_n)
def top_dis_normal_uv(self, p3d_n, layer, layer_next):
p, n, f = layer.closest_point_on_mesh(self.p3d[-1])
p3d_t = layer_next.map3dPointTo3d(layer_next, p, n)
if p3d_t is None:
raise MappingException()
self.p3d.append(p3d_t)
self.p3d = self.p3d + layer_next.interpolateUVTrackIn3D(p3d_t, p3d_n)
self.p3d.append(p3d_n)
def top_dis_uv_normal(self, p3d_n, layer, layer_next):
p, n, f = layer_next.closest_point_on_mesh(p3d_n)
p3d_t = layer.map3dPointTo3d(layer, p, n)
if p3d_t is None:
raise MappingException()
self.p3d = self.p3d + layer.interpolateUVTrackIn3D(self.p3d[-1], p3d_t)
self.p3d.append(p3d_t)
self.p3d.append(p3d_n)
def top_dis_euclid_syn(self, p3d_n, layer, layer_next):
pass
def top_dis_euclid_uv_syn(self, p3d_n, layer, layer_next):
self.p3d.append(p3d_n)
def top_dis_jump_uv_syn(self, p3d_n, layer, layer_next):
p3d_t = layer_next.map3dPointTo3d(layer_next, self.p3d[-1])
self.p3d.append(p3d_t)
def top_dis_uv_jump_syn(self, p3d_n, layer, layer_next):
pass
def top_dis_normal_uv_syn(self, p3d_n, layer, layer_next):
p, n, f = layer.closest_point_on_mesh(self.p3d[-1])
# determine new point
p3d_t = layer_next.map3dPointTo3d(layer_next, p, n)
if p3d_t is None:
raise MappingException()
self.p3d.append(p3d_t)
def top_dis_uv_normal_syn(self, p3d_n, layer, layer_next):
pass
"""UV mapping"""
def con_uv(self, layer, layer_next, dis_func):
p2d_t = layer.map3dPointToUV(self.p3d[-1])
p3d_n = layer_next.mapUVPointTo3d([p2d_t])
if p3d_n == []:
raise MappingException()
p3d_n = p3d_n[0]
dis_func(self, p3d_n, layer, layer_next)
return p3d_n
def uv_dis_euclid(self, p3d_n, layer, layer_next):
self.p3d.append(p3d_n)
def uv_dis_euclid_uv(self, p3d_n, layer, layer_next):
self.p3d.append(p3d_n)
def uv_dis_jump_uv(self, p3d_n, layer, layer_next):
p3d_t = layer_next.map3dPointTo3d(layer_next, self.p3d[-1])
self.p3d.append(p3d_t)
self.p3d = self.p3d + layer_next.interpolateUVTrackIn3D(p3d_t, p3d_n)
self.p3d.append(p3d_n)
def uv_dis_uv_jump(self, p3d_n, layer, layer_next):
p3d_t = layer.map3dPointTo3d(layer, p3d_n)
self.p3d = self.p3d + layer.interpolateUVTrackIn3D(self.p3d[-1], p3d_t)
self.p3d.append(p3d_n)
def uv_dis_normal_uv(self, p3d_n, layer, layer_next):
p, n, f = layer.closest_point_on_mesh(self.p3d[-1])
p3d_t = layer_next.map3dPointTo3d(layer_next, p, n)
if p3d_t is None:
raise MappingException()
self.p3d.append(p3d_t)
self.p3d = self.p3d + layer_next.interpolateUVTrackIn3D(p3d_t, p3d_n)
self.p3d.append(p3d_n)
def uv_dis_uv_normal(self, p3d_n, layer, layer_next):
p, n, f = layer_next.closest_point_on_mesh(p3d_n)
p3d_t = layer.map3dPointTo3d(layer, p, n)
if p3d_t is None:
raise MappingException()
self.p3d = self.p3d + layer.interpolateUVTrackIn3D(self.p3d[-1], p3d_t)
self.p3d.append(p3d_t)
self.p3d.append(p3d_n)
def uv_dis_euclid_syn(self, p3d_n, layer, layer_next):
pass
def uv_dis_euclid_uv_syn(self, p3d_n, layer, layer_next):
self.p3d.append(p3d_n)
def uv_dis_jump_uv_syn(self, p3d_n, layer, layer_next):
p3d_t = layer_next.map3dPointTo3d(layer_next, self.p3d[-1])
self.p3d.append(p3d_t)
def uv_dis_uv_jump_syn(self, p3d_n, layer, layer_next):
pass
def uv_dis_normal_uv_syn(self, p3d_n, layer, layer_next):
p, n, f = layer.closest_point_on_mesh(self.p3d[-1])
# determine new point
p3d_t = layer_next.map3dPointTo3d(layer_next, p, n)
if p3d_t is None:
raise MappingException()
self.p3d.append(p3d_t)
def uv_dis_uv_normal_syn(self, p3d_n, layer, layer_next):
pass
def con_mask3d(self, layer, layer_next, dis_func):
if not checkPointInObject(layer_next.obj, self.p3d[-1]):
raise MappingException()
else:
p3d_n = self.p3d[-1]
self.p3d.append(p3d_n)
return p3d_n
connection_dict = {
MAP_euclid: con_euclid,
MAP_normal: con_normal,
MAP_random: con_random,
MAP_top: con_top,
MAP_uv: con_uv,
MAP_mask3D: con_mask3d
}
distance_dict = {
MAP_euclid: {
DIS_euclid: euclid_dis_euclid,
DIS_euclidUV: euclid_dis_euclid_uv,
DIS_jumpUV: euclid_dis_jump_uv,
DIS_UVjump: euclid_dis_uv_jump,
DIS_normalUV: euclid_dis_normal_uv,
DIS_UVnormal: euclid_dis_uv_normal
},
MAP_normal: {
DIS_euclid: normal_dis_euclid,
DIS_euclidUV: normal_dis_euclid_uv,
DIS_jumpUV: normal_dis_jump_uv,
DIS_UVjump: normal_dis_uv_jump,
DIS_normalUV: normal_dis_normal_uv,
DIS_UVnormal: normal_dis_uv_normal
},
MAP_random: {
DIS_euclid: random_dis_euclid,
DIS_euclidUV: random_dis_euclid_uv,
DIS_jumpUV: random_dis_jump_uv,
DIS_UVjump: random_dis_uv_jump,
DIS_normalUV: random_dis_normal_uv,
DIS_UVnormal: random_dis_uv_normal
},
MAP_top: {
DIS_euclid: top_dis_euclid,
DIS_euclidUV: top_dis_euclid_uv,
DIS_jumpUV: top_dis_jump_uv,
DIS_UVjump: top_dis_uv_jump,
DIS_normalUV: top_dis_normal_uv,
DIS_UVnormal: top_dis_uv_normal
},
MAP_uv: {
DIS_euclid: uv_dis_euclid,
DIS_euclidUV: uv_dis_euclid_uv,
DIS_jumpUV: uv_dis_jump_uv,
DIS_UVjump: uv_dis_uv_jump,
DIS_normalUV: uv_dis_normal_uv,
DIS_UVnormal: uv_dis_uv_normal
},
MAP_mask3D: {
DIS_euclid: None,
DIS_euclidUV: None,
DIS_jumpUV: None,
DIS_UVjump: None,
DIS_normalUV: None,
DIS_UVnormal: None
},
}
distance_dict_syn = {
MAP_euclid: {
DIS_euclid: euclid_dis_euclid_syn,
DIS_euclidUV: euclid_dis_euclid_uv_syn,
DIS_jumpUV: euclid_dis_jump_uv_syn,
DIS_UVjump: euclid_dis_uv_jump_syn,
DIS_normalUV: euclid_dis_normal_uv_syn,
DIS_UVnormal: euclid_dis_uv_normal_syn
},
MAP_normal: {
DIS_euclid: normal_dis_euclid_syn,
DIS_euclidUV: normal_dis_euclid_uv_syn,
DIS_jumpUV: normal_dis_jump_uv_syn,
DIS_UVjump: normal_dis_uv_jump_syn,
DIS_normalUV: normal_dis_normal_uv_syn,
DIS_UVnormal: normal_dis_uv_normal_syn
},
MAP_random: {
DIS_euclid: random_dis_euclid_syn,
DIS_euclidUV: random_dis_euclid_uv_syn,
DIS_jumpUV: random_dis_jump_uv_syn,
DIS_UVjump: random_dis_uv_jump_syn,
DIS_normalUV: random_dis_normal_uv_syn,
DIS_UVnormal: random_dis_uv_normal_syn
},
MAP_top: {
DIS_euclid: top_dis_euclid_syn,
DIS_euclidUV: top_dis_euclid_uv_syn,
DIS_jumpUV: top_dis_jump_uv_syn,
DIS_UVjump: top_dis_uv_jump_syn,
DIS_normalUV: top_dis_normal_uv_syn,
DIS_UVnormal: top_dis_uv_normal_syn
},
MAP_uv: {
DIS_euclid: uv_dis_euclid_syn,
DIS_euclidUV: uv_dis_euclid_uv_syn,
DIS_jumpUV: uv_dis_jump_uv_syn,
DIS_UVjump: uv_dis_uv_jump_syn,
DIS_normalUV: uv_dis_normal_uv_syn,
DIS_UVnormal: uv_dis_uv_normal_syn
},
MAP_mask3D: {
DIS_euclid: None,
DIS_euclidUV: None,
DIS_jumpUV: None,
DIS_UVjump: None,
DIS_normalUV: None,
DIS_UVnormal: None
},
} | gpl-2.0 | 2,615,298,735,748,952,000 | 37.370809 | 125 | 0.637331 | false |
CroatianMeteorNetwork/CMN-codes | HMM_radio2txt/HMM_radio2txt.py | 1 | 1416 | # Copyright 2014 Denis Vida, [email protected]
# The HMM_radio2txt is free software; you can redistribute
# it and/or modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation, version 2.
# The HMM_radio2txt is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with the HMM_radio2txt ; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from scipy.io.wavfile import read
import numpy as np
import datetime, math
numchunks = 6000
file_name = "RAD_BEDOUR_20111007_0135_BEUCCL_SYS001.wav" #WAV source file name
samprate, wavdata = read(file_name) #Get data from WAV file (samprate = samples/sec), wavdata contains raw levels data
chunks = np.array_split(wavdata, numchunks) #Split array into chunks
dbs = [np.mean(chunk) for chunk in chunks] #Calculate dB values from the mean of the chunks
print samprate
data_file = open('wav_data.txt', 'w')
data_file.write('Sample rate: '+str(samprate)+' samples/sec reduced to '+str(numchunks)+' chunks\n')
for no, line in enumerate(dbs):
data_file.write(str(no+1)+' '+str(line)+'\n')
data_file.close()
| gpl-2.0 | 3,662,256,188,912,551,000 | 34.4 | 118 | 0.750706 | false |
dstufft/potpie | potpie/pseudo/splitters.py | 1 | 5418 | # -*- coding: utf-8 -*-
import re
from polib import unescape
class ValidationError(Exception):
pass
class BaseValidator(object):
"""Base class for validators.
Implements the decorator pattern.
"""
def __init__(self, source_language=None, target_language=None, rule=None):
self.slang = source_language
self.tlang = target_language
self.rule = rule
def __call__(self, old, new):
"""Validate the `new` translation against the `old` one.
No checks are needed for deleted translations
Args:
old: The old translation.
new: The new translation.
Raises:
A ValidationError with an appropriate message.
"""
if not new or not self.precondition():
return
self.validate(old, new)
def precondition(self):
"""Check whether this validator is applicable to the situation."""
return True
def validate(self, old, new):
"""Actual validation method.
Subclasses must override this method.
Args:
old: The old translation.
new: The new translation.
Raises:
A ValidationError with an appropriate message.
"""
pass
class PrintfValidator(BaseValidator):
"""Validator that checks that the number of printf formats specifiers
is the same in the translation.
This is valid only if the plurals in the two languages are the same.
"""
printf_re = re.compile(
'%((?:(?P<ord>\d+)\$|\((?P<key>\w+)\))?(?P<fullvar>[+#-]*(?:\d+)?'\
'(?:\.\d+)?(hh\|h\|l\|ll)?(?P<type>[\w%])))'
)
def precondition(self):
"""Check if the number of plurals in the two languages is the same."""
return self.tlang.nplurals == self.slang.nplurals and \
super(PrintfValidator, self).precondition()
def validate(self, old, new):
old = unescape(old)
new = unescape(new)
old_matches = list(self.printf_re.finditer(old))
new_matches = list(self.printf_re.finditer(new))
if len(old_matches) != len(new_matches):
raise ValidationError("The number of arguments seems to differ "
"between the source string and the translation."
)
def next_splitter_or_func(string, splitters, func, pseudo_type):
"""
Helper for doing the next splitter check.
If the list is not empty, call the next splitter decorator appropriately,
otherwise call the decorated function.
"""
if splitters:
return splitters[0](string, splitters[1:])(func)(pseudo_type,
string)
else:
return func(pseudo_type, string)
class SplitterDecorators(object):
"""
A class decorator that receives a list of splitter decorator classes and
calls the first splitter from the list passing the decorated function as
an argument as well as the list of splitters without the called splitter.
In case the list of splitters is empty, it calls the decorated function
right away.
This decorator must be only used with method of classes inheriting from
``transifex.resources.formats.pseudo.PseudoTypeMixin``.
"""
def __init__(self, splitters):
self.splitters = splitters
def __call__(self, func):
def _wrapper(pseudo_type, string):
return next_splitter_or_func(string, self.splitters, func,
pseudo_type)
return _wrapper
class BaseSplitter(object):
"""
Base class decorator for splitting a given string based on a regex and
call the subsequent splitter class available in the ``splitters`` var or
the decorated method.
"""
REGEX = r''
def __init__(self, string, splitters):
self.string = string
self.splitters = splitters
def __call__(self, func):
def _wrapped(pseudo_type, string, **kwargs):
text = []
keys = [l.group() for l in self._regex_matches(string)]
nkeys = len(keys)
i = 0
for key in keys:
t = string.split(key, 1)
string = t[0]
string = next_splitter_or_func(string, self.splitters,
func, pseudo_type)
text.extend([string, key])
i += 1
string = t[1]
string = next_splitter_or_func(string, self.splitters,
func, pseudo_type)
text.append(string)
return "".join(text)
return _wrapped
@classmethod
def _regex_matches(cls, string):
return re.finditer(cls.REGEX, string)
class PrintfSplitter(BaseSplitter):
"""
Split the string on printf placeholders, such as %s, %d, %i, %(foo)s, etc.
"""
# Lets reuse the printf regex from the validators
REGEX = PrintfValidator.printf_re
class TagSplitter(BaseSplitter):
"""
Split the string on XML/HTML tags, such as <b>, </b>, <a href="">, etc.
"""
REGEX = r'(<|<)(.|\n)*?(>|>)'
class EscapedCharsSplitter(BaseSplitter):
"""
Split the string on escaped chars, such as \\\\n, \\\\t, etc.
"""
REGEX = r'(\\\\[\w]{1})'
class HTMLSpecialEntitiesSplitter(BaseSplitter):
"""
Splits the string on HTML special entities, such as <, &, etc.
"""
REGEX = r'&[a-zA-Z]+;'
| gpl-2.0 | 4,022,284,664,336,294,400 | 28.769231 | 82 | 0.591362 | false |
spasmilo/electrum | scripts/authenticator.py | 1 | 11237 | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2015 Thomas Voegtlin
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
import android
import sys
import os
import imp
import base64
script_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.join(script_dir, 'packages'))
import qrcode
imp.load_module('electrum', *imp.find_module('lib'))
from electrum import SimpleConfig, Wallet, WalletStorage, format_satoshis
from electrum import util
from electrum.transaction import Transaction
from electrum.bitcoin import base_encode, base_decode
def modal_dialog(title, msg = None):
droid.dialogCreateAlert(title,msg)
droid.dialogSetPositiveButtonText('OK')
droid.dialogShow()
droid.dialogGetResponse()
droid.dialogDismiss()
def modal_input(title, msg, value = None, etype=None):
droid.dialogCreateInput(title, msg, value, etype)
droid.dialogSetPositiveButtonText('OK')
droid.dialogSetNegativeButtonText('Cancel')
droid.dialogShow()
response = droid.dialogGetResponse()
result = response.result
droid.dialogDismiss()
if result is None:
return modal_input(title, msg, value, etype)
if result.get('which') == 'positive':
return result.get('value')
def modal_question(q, msg, pos_text = 'OK', neg_text = 'Cancel'):
droid.dialogCreateAlert(q, msg)
droid.dialogSetPositiveButtonText(pos_text)
droid.dialogSetNegativeButtonText(neg_text)
droid.dialogShow()
response = droid.dialogGetResponse()
result = response.result
droid.dialogDismiss()
if result is None:
return modal_question(q, msg, pos_text, neg_text)
return result.get('which') == 'positive'
def make_layout(s):
content = """
<LinearLayout
android:id="@+id/zz"
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:background="#ff222222">
<TextView
android:id="@+id/textElectrum"
android:text="Electrum Authenticator"
android:textSize="7pt"
android:textColor="#ff4444ff"
android:gravity="left"
android:layout_height="wrap_content"
android:layout_width="match_parent"
/>
</LinearLayout>
%s """%s
return """<?xml version="1.0" encoding="utf-8"?>
<LinearLayout xmlns:android="http://schemas.android.com/apk/res/android"
android:id="@+id/background"
android:orientation="vertical"
android:layout_width="match_parent"
android:layout_height="match_parent"
android:background="#ff000022">
%s
</LinearLayout>"""%content
def qr_layout(title):
title_view= """
<TextView android:id="@+id/addrTextView"
android:layout_width="match_parent"
android:layout_height="50"
android:text="%s"
android:textAppearance="?android:attr/textAppearanceLarge"
android:gravity="center_vertical|center_horizontal|center">
</TextView>"""%title
image_view="""
<ImageView
android:id="@+id/qrView"
android:gravity="center"
android:layout_width="match_parent"
android:antialias="false"
android:src=""
/>
"""
return make_layout(title_view + image_view)
def add_menu():
droid.clearOptionsMenu()
droid.addOptionsMenuItem("Seed", "seed", None,"")
droid.addOptionsMenuItem("Public Key", "xpub", None,"")
droid.addOptionsMenuItem("Transaction", "scan", None,"")
droid.addOptionsMenuItem("Password", "password", None,"")
def make_bitmap(data):
# fixme: this is highly inefficient
import qrcode
from electrum import bmp
qr = qrcode.QRCode()
qr.add_data(data)
bmp.save_qrcode(qr,"/sdcard/sl4a/qrcode.bmp")
droid = android.Android()
wallet = None
class Authenticator:
def __init__(self):
global wallet
self.qr_data = None
storage = WalletStorage({'wallet_path':'/sdcard/electrum/authenticator'})
if not storage.file_exists:
action = self.restore_or_create()
if not action:
exit()
password = droid.dialogGetPassword('Choose a password').result
if password:
password2 = droid.dialogGetPassword('Confirm password').result
if password != password2:
modal_dialog('Error', 'Passwords do not match')
exit()
else:
password = None
if action == 'create':
wallet = Wallet(storage)
seed = wallet.make_seed()
modal_dialog('Your seed is:', seed)
elif action == 'import':
seed = self.seed_dialog()
if not seed:
exit()
if not Wallet.is_seed(seed):
exit()
wallet = Wallet.from_seed(seed, storage)
else:
exit()
wallet.add_seed(seed, password)
wallet.create_master_keys(password)
wallet.create_main_account(password)
else:
wallet = Wallet(storage)
def restore_or_create(self):
droid.dialogCreateAlert("Seed not found", "Do you want to create a new seed, or to import it?")
droid.dialogSetPositiveButtonText('Create')
droid.dialogSetNeutralButtonText('Import')
droid.dialogSetNegativeButtonText('Cancel')
droid.dialogShow()
response = droid.dialogGetResponse().result
droid.dialogDismiss()
if not response: return
if response.get('which') == 'negative':
return
return 'import' if response.get('which') == 'neutral' else 'create'
def seed_dialog(self):
if modal_question("Enter your seed", "Input method", 'QR Code', 'mnemonic'):
code = droid.scanBarcode()
r = code.result
if r:
seed = r['extras']['SCAN_RESULT']
else:
return
else:
seed = modal_input('Mnemonic', 'Please enter your seed phrase')
return str(seed)
def show_qr(self, data):
path = "/sdcard/sl4a/qrcode.bmp"
if data:
droid.dialogCreateSpinnerProgress("please wait")
droid.dialogShow()
try:
make_bitmap(data)
finally:
droid.dialogDismiss()
else:
with open(path, 'w') as f: f.write('')
droid.fullSetProperty("qrView", "src", 'file://'+path)
self.qr_data = data
def show_title(self, title):
droid.fullSetProperty("addrTextView","text", title)
def get_password(self):
if wallet.use_encryption:
password = droid.dialogGetPassword('Password').result
try:
wallet.check_password(password)
except:
return False
return password
def main(self):
add_menu()
welcome = 'Use the menu to scan a transaction.'
droid.fullShow(qr_layout(welcome))
while True:
event = droid.eventWait().result
if not event:
continue
elif event["name"] == "key":
if event["data"]["key"] == '4':
if self.qr_data:
self.show_qr(None)
self.show_title(welcome)
else:
break
elif event["name"] == "seed":
password = self.get_password()
if password is False:
modal_dialog('Error','incorrect password')
continue
seed = wallet.get_mnemonic(password)
modal_dialog('Your seed is', seed)
elif event["name"] == "password":
self.change_password_dialog()
elif event["name"] == "xpub":
mpk = wallet.get_master_public_key()
self.show_qr(mpk)
self.show_title('master public key')
elif event["name"] == "scan":
r = droid.scanBarcode()
r = r.result
if not r:
continue
data = r['extras']['SCAN_RESULT']
data = base_decode(data.encode('utf8'), None, base=43)
data = ''.join(chr(ord(b)) for b in data).encode('hex')
tx = Transaction.deserialize(data)
#except:
# modal_dialog('Error', 'Cannot parse transaction')
# continue
if not wallet.can_sign(tx):
modal_dialog('Error', 'Cannot sign this transaction')
continue
lines = map(lambda x: x[0] + u'\t\t' + format_satoshis(x[1]) if x[1] else x[0], tx.get_outputs())
if not modal_question('Sign?', '\n'.join(lines)):
continue
password = self.get_password()
if password is False:
modal_dialog('Error','incorrect password')
continue
droid.dialogCreateSpinnerProgress("Signing")
droid.dialogShow()
wallet.sign_transaction(tx, password)
droid.dialogDismiss()
data = base_encode(str(tx).decode('hex'), base=43)
self.show_qr(data)
self.show_title('Signed Transaction')
droid.makeToast("Bye!")
def change_password_dialog(self):
if wallet.use_encryption:
password = droid.dialogGetPassword('Your seed is encrypted').result
if password is None:
return
else:
password = None
try:
wallet.check_password(password)
except Exception:
modal_dialog('Error', 'Incorrect password')
return
new_password = droid.dialogGetPassword('Choose a password').result
if new_password == None:
return
if new_password != '':
password2 = droid.dialogGetPassword('Confirm new password').result
if new_password != password2:
modal_dialog('Error', 'passwords do not match')
return
wallet.update_password(password, new_password)
if new_password:
modal_dialog('Password updated', 'Your seed is encrypted')
else:
modal_dialog('No password', 'Your seed is not encrypted')
if __name__ == "__main__":
a = Authenticator()
a.main()
| gpl-3.0 | 6,965,951,914,015,369,000 | 30.47619 | 113 | 0.575331 | false |
Wazoku/Shoreditch-Gamerunner | test.py | 1 | 1266 | ##########
#### Run the test suite for Shoreditch Gamerunner
##########
import unittest
import os, sys
import argparse
import test
def list_modules(dir, module_path = ''):
modules = []
for f in os.listdir(dir):
module_name, ext = os.path.splitext(f) # Handles no-extension files, etc.
if os.path.isdir('test/' + module_name):
modules.extend(list_modules(dir + '/' + module_name, module_path + module_name + '.'))
elif ext == '.py' and not module_name == '__init__': # Important, ignore .pyc/other files.
__import__(module_path + module_name)
modules.append(sys.modules[module_path + module_name])
return modules
def run_tests(scope = None):
suite = unittest.TestSuite()
import test
modules = []
if scope:
suite.addTests(unittest.TestLoader().loadTestsFromName(scope))
else:
for module in list_modules(os.path.dirname(test.__file__), 'test.'):
suite.addTests(unittest.TestLoader().loadTestsFromModule(module))
res = unittest.TextTestRunner(verbosity=2).run(suite)
return len(res.failures)
parser = argparse.ArgumentParser(description='Test the Settlers of Shoreditch game')
parser.add_argument('--test_scope')
args = parser.parse_args()
if args.test_scope:
sys.exit(run_tests(args.test_scope))
else:
sys.exit(run_tests()) | mit | -9,017,181,077,864,914,000 | 26.543478 | 92 | 0.696682 | false |
nschloe/quadpy | src/quadpy/t2/_dunavant/__init__.py | 1 | 2732 | import pathlib
from sympy import Rational as frac
from ...helpers import article
from .._helpers import T2Scheme, _read, register
source = article(
authors=["D.A. Dunavant"],
title="High Degree Efficient Symmetrical Gaussian Quadrature Rules for the Triangle",
journal="Article in International Journal for Numerical Methods in Engineering",
volume="21",
number="6",
pages="1129-1148",
month="jun",
year="1985",
url="https://doi.org/10.1002/nme.1620210612",
)
this_dir = pathlib.Path(__file__).resolve().parent
def dunavant_01():
d = {"centroid": [[1]]}
return T2Scheme("Dunavant 1", d, 1, source, 7.850e-17)
def dunavant_02():
d = {"d3_aa": [[frac(1, 3)], [frac(1, 6)]]}
return T2Scheme("Dunavant 2", d, 2, source, 2.220e-16)
def dunavant_03():
d = {"centroid": [[-frac(9, 16)]], "d3_aa": [[frac(25, 48)], [frac(1, 5)]]}
return T2Scheme("Dunavant 3", d, 3, source, 6.661e-16)
def dunavant_04():
return _read(this_dir / "dunavant_04.json", source)
def dunavant_05():
return _read(this_dir / "dunavant_05.json", source)
def dunavant_06():
return _read(this_dir / "dunavant_06.json", source)
def dunavant_07():
return _read(this_dir / "dunavant_07.json", source)
def dunavant_08():
return _read(this_dir / "dunavant_08.json", source)
def dunavant_09():
# DUP equals TRIEX 19
return _read(this_dir / "dunavant_09.json", source)
def dunavant_10():
return _read(this_dir / "dunavant_10.json", source)
def dunavant_11():
return _read(this_dir / "dunavant_11.json", source)
def dunavant_12():
return _read(this_dir / "dunavant_12.json", source)
def dunavant_13():
return _read(this_dir / "dunavant_13.json", source)
def dunavant_14():
return _read(this_dir / "dunavant_14.json", source)
def dunavant_15():
return _read(this_dir / "dunavant_15.json", source)
def dunavant_16():
return _read(this_dir / "dunavant_16.json", source)
def dunavant_17():
return _read(this_dir / "dunavant_17.json", source)
def dunavant_18():
return _read(this_dir / "dunavant_18.json", source)
def dunavant_19():
return _read(this_dir / "dunavant_19.json", source)
def dunavant_20():
return _read(this_dir / "dunavant_20.json", source)
register(
[
dunavant_01,
dunavant_02,
dunavant_03,
dunavant_04,
dunavant_05,
dunavant_06,
dunavant_07,
dunavant_08,
dunavant_09,
dunavant_10,
dunavant_11,
dunavant_12,
dunavant_13,
dunavant_14,
dunavant_15,
dunavant_16,
dunavant_17,
dunavant_18,
dunavant_19,
dunavant_20,
]
)
| mit | 5,399,588,959,320,617,000 | 19.984615 | 89 | 0.61327 | false |
oswjk/py-mklink-wrapper | mklink.py | 1 | 2540 | """
A wrapper script for ln.exe to make it look like the MKLINK utility
found from Windows Vista onwards. To fully utilise this, you should
also have a batch script that should look something like this:
@ECHO OFF
python %~dp0mklink.py %*
Name the file "mklink.cmd" and put it in PATH. Now you can use the
fake mklink utility like you would use the real.
You can find instruction for installing ln.exe from
http://schinagl.priv.at/nt/hardlinkshellext/hardlinkshellext.html#symboliclinksforwindowsxp
"""
import argparse
import subprocess
import sys
def MyFormatter(raw):
"""Make the help output look a little bit more like the real deal
(i.e., this omits the "usage: " part in the beginning of the help).
"""
class MyFormatter_(argparse.HelpFormatter):
def format_help(self):
return raw
return MyFormatter_
usage_str = """Creates a symbolic link.
MKLINK [[/D] | [/H] | [/J]] Link Target
/D Creates a directory symbolic link. Default is a file
symbolic link.
/H Creates a hard link instead of a symbolic link.
/J Creates a Directory Junction.
Link specifies the new symbolic link name.
Target specifies the path (relative or absolute) that the new link
refers to.
"""
parser = argparse.ArgumentParser(prog='MKLINK', prefix_chars='/',
usage=usage_str, add_help=False,
formatter_class=MyFormatter(raw=usage_str))
parser.add_argument('/?', dest='help', action='help')
group = parser.add_mutually_exclusive_group()
group.add_argument('/D', dest='symlink', default=False, action='store_true')
group.add_argument('/d', dest='symlink', default=False, action='store_true')
group.add_argument('/H', dest='hardlink', default=False, action='store_true')
group.add_argument('/h', dest='hardlink', default=False, action='store_true')
group.add_argument('/J', dest='junction', default=False, action='store_true')
group.add_argument('/j', dest='junction', default=False, action='store_true')
parser.add_argument('link')
parser.add_argument('target')
args = parser.parse_args()
if (not args.symlink) and (not args.hardlink) and (not args.junction):
args.symlink = True
if args.symlink:
sys.exit(subprocess.call(['ln.exe', '-s', args.target, args.link]))
elif args.hardlink:
sys.exit(subprocess.call(['ln.exe', args.target, args.link]))
elif args.junction:
sys.exit(subprocess.call(['ln.exe', '-j', args.target, args.link]))
else:
print("invalid options!")
sys.exit(1)
| mit | 7,813,175,411,467,521,000 | 34.277778 | 95 | 0.688189 | false |
openstack/octavia | octavia/common/utils.py | 1 | 5621 | # Copyright 2011, VMware, Inc., 2014 A10 Networks
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Borrowed from nova code base, more utilities will be added/borrowed as and
# when needed.
"""Utilities and helper functions."""
import base64
import hashlib
import re
import socket
import netaddr
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from stevedore import driver as stevedore_driver
from octavia.common import constants
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def get_hostname():
return socket.gethostname()
def base64_sha1_string(string_to_hash):
"""Get a b64-encoded sha1 hash of a string. Not intended to be secure!"""
# TODO(rm_work): applying nosec here because this is not intended to be
# secure, it's just a way to get a consistent ID. Changing this would
# break backwards compatibility with existing loadbalancers.
hash_str = hashlib.sha1(string_to_hash.encode('utf-8')).digest() # nosec
b64_str = base64.b64encode(hash_str, str.encode('_-', 'ascii'))
b64_sha1 = b64_str.decode('UTF-8')
# https://github.com/haproxy/haproxy/issues/644
return re.sub(r"^-", "x", b64_sha1)
def get_amphora_driver():
amphora_driver = stevedore_driver.DriverManager(
namespace='octavia.amphora.drivers',
name=CONF.controller_worker.amphora_driver,
invoke_on_load=True
).driver
return amphora_driver
def get_network_driver():
CONF.import_group('controller_worker', 'octavia.common.config')
network_driver = stevedore_driver.DriverManager(
namespace='octavia.network.drivers',
name=CONF.controller_worker.network_driver,
invoke_on_load=True
).driver
return network_driver
def is_ipv4(ip_address):
"""Check if ip address is IPv4 address."""
ip = netaddr.IPAddress(ip_address)
return ip.version == 4
def is_ipv6(ip_address):
"""Check if ip address is IPv6 address."""
ip = netaddr.IPAddress(ip_address)
return ip.version == 6
def is_cidr_ipv6(cidr):
"""Check if CIDR is IPv6 address with subnet prefix."""
ip = netaddr.IPNetwork(cidr)
return ip.version == 6
def is_ipv6_lla(ip_address):
"""Check if ip address is IPv6 link local address."""
ip = netaddr.IPAddress(ip_address)
return ip.version == 6 and ip.is_link_local()
def ip_port_str(ip_address, port):
"""Return IP port as string representation depending on address family."""
ip = netaddr.IPAddress(ip_address)
if ip.version == 4:
return "{ip}:{port}".format(ip=ip, port=port)
return "[{ip}]:{port}".format(ip=ip, port=port)
def netmask_to_prefix(netmask):
return netaddr.IPAddress(netmask).netmask_bits()
def ip_netmask_to_cidr(ip, netmask):
net = netaddr.IPNetwork("0.0.0.0/0")
if ip and netmask:
net = netaddr.IPNetwork(
"{ip}/{netmask}".format(ip=ip, netmask=netmask)
)
return "{ip}/{netmask}".format(ip=net.network, netmask=net.prefixlen)
def get_vip_security_group_name(loadbalancer_id):
if loadbalancer_id:
return constants.VIP_SECURITY_GROUP_PREFIX + loadbalancer_id
return None
def get_compatible_value(value):
if isinstance(value, str):
value = value.encode('utf-8')
return value
def get_compatible_server_certs_key_passphrase():
key = CONF.certificates.server_certs_key_passphrase
if isinstance(key, str):
key = key.encode('utf-8')
return base64.urlsafe_b64encode(key)
def subnet_ip_availability(nw_ip_avail, subnet_id, req_num_ips):
for subnet in nw_ip_avail.subnet_ip_availability:
if subnet['subnet_id'] == subnet_id:
return subnet['total_ips'] - subnet['used_ips'] >= req_num_ips
return None
def b(s):
return s.encode('utf-8')
def expand_expected_codes(codes):
"""Expand the expected code string in set of codes.
200-204 -> 200, 201, 202, 204
200, 203 -> 200, 203
"""
retval = set()
codes = re.split(', *', codes)
for code in codes:
if not code:
continue
if '-' in code:
low, hi = code.split('-')[:2]
retval.update(
str(i) for i in range(int(low), int(hi) + 1))
else:
retval.add(code)
return retval
class exception_logger(object):
"""Wrap a function and log raised exception
:param logger: the logger to log the exception default is LOG.exception
:returns: origin value if no exception raised; re-raise the exception if
any occurred
"""
def __init__(self, logger=None):
self.logger = logger
def __call__(self, func):
if self.logger is None:
_LOG = logging.getLogger(func.__module__)
self.logger = _LOG.exception
def call(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
with excutils.save_and_reraise_exception():
self.logger(e)
return None
return call
| apache-2.0 | -6,552,113,401,978,608,000 | 28.124352 | 78 | 0.652019 | false |
ramunasd/mapgang | mapgang/metatile.py | 1 | 1671 | #!/usr/bin/python
import os
import struct
from cStringIO import StringIO
from mapgang.constants import METATILE, META_MAGIC
class MetaTile():
def __init__(self, style, x, y, z):
self.style = style
self.x = x
self.y = y
self.z = z
self.content = StringIO()
m2 = METATILE * METATILE
# space for header
self.content.write(struct.pack("4s4i", META_MAGIC, m2, 0, 0, 0))
# space for offset/size table
self.content.write(struct.pack("2i", 0, 0) * m2)
self.sizes = {}
self.offsets = {}
def get_header(self):
return struct.pack("4s4i", META_MAGIC, METATILE * METATILE, self.x, self.y, self.z)
def write_header(self):
self.content.seek(0)
# write header
self.content.write(self.get_header())
# Write out the offset/size table
for n in range(0, METATILE * METATILE):
if n in self.sizes:
self.content.write(struct.pack("2i", self.offsets[n], self.sizes[n]))
else:
self.content.write(struct.pack("2i", 0, 0))
def write_tile(self, x, y, tile):
mask = METATILE - 1
n = (x & mask) * METATILE + (y & mask)
# seek to end
self.content.seek(0, os.SEEK_END)
# mark offset
self.offsets[n] = self.content.tell()
# write content
self.content.write(tile)
# mark size
self.sizes[n] = len(tile)
def getvalue(self):
self.write_header()
return self.content.getvalue()
def to_string(self):
return "%s/%d/%d/%d" % (self.style, self.z, self.x, self.y)
| lgpl-2.1 | 4,790,900,869,442,645,000 | 29.944444 | 91 | 0.552962 | false |
googleads/google-ads-python | google/ads/googleads/v6/services/services/customer_service/transports/__init__.py | 1 | 1035 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from typing import Dict, Type
from .base import CustomerServiceTransport
from .grpc import CustomerServiceGrpcTransport
# Compile a registry of transports.
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[CustomerServiceTransport]]
_transport_registry["grpc"] = CustomerServiceGrpcTransport
__all__ = (
"CustomerServiceTransport",
"CustomerServiceGrpcTransport",
)
| apache-2.0 | -8,818,725,108,190,158,000 | 28.571429 | 74 | 0.756522 | false |
hakril/PythonForWindows | windows/winproxy/apis/dbghelp.py | 1 | 8510 | import ctypes
import windows.generated_def as gdef
from windows.pycompat import int_types
from ..apiproxy import ApiProxy, NeededParameter
from ..error import fail_on_zero
class DbgHelpProxy(ApiProxy):
APIDLL = "dbghelp"
default_error_check = staticmethod(fail_on_zero)
# We keep the simple definition where callback UserContext are PVOID
# Be we want to be able to pass arbitrary python object (list/dict)
# So ctypes magic to make the py_object->pvoid transformation
# !! this code loose a ref to obj.
# Should still work as our calling-caller method keep a ref
def transform_pyobject_to_pvoid(obj):
if obj is None or isinstance(obj, int_types):
return obj
return ctypes.POINTER(gdef.PVOID)(ctypes.py_object(obj))[0]
@DbgHelpProxy()
def SymInitialize(hProcess, UserSearchPath, fInvadeProcess):
return SymInitialize.ctypes_function(hProcess, UserSearchPath, fInvadeProcess)
@DbgHelpProxy()
def SymCleanup(hProcess):
return SymCleanup.ctypes_function(hProcess)
@DbgHelpProxy()
def SymLoadModuleExA(hProcess, hFile, ImageName, ModuleName, BaseOfDll, DllSize, Data, Flags):
return SymLoadModuleExA.ctypes_function(hProcess, hFile, ImageName, ModuleName, BaseOfDll, DllSize, Data, Flags)
@DbgHelpProxy()
def SymLoadModuleExW(hProcess, hFile, ImageName, ModuleName, BaseOfDll, DllSize, Data, Flags):
return SymLoadModuleExW.ctypes_function(hProcess, hFile, ImageName, ModuleName, BaseOfDll, DllSize, Data, Flags)
@DbgHelpProxy()
def SymUnloadModule64(hProcess, BaseOfDll):
return SymUnloadModule64.ctypes_function(hProcess, BaseOfDll)
@DbgHelpProxy()
def SymFromAddr(hProcess, Address, Displacement, Symbol):
return SymFromAddr.ctypes_function(hProcess, Address, Displacement, Symbol)
@DbgHelpProxy()
def SymGetModuleInfo64(hProcess, dwAddr, ModuleInfo):
return SymGetModuleInfo64.ctypes_function(hProcess, dwAddr, ModuleInfo)
@DbgHelpProxy()
def SymFromName(hProcess, Name, Symbol):
return SymFromName.ctypes_function(hProcess, Name, Symbol)
@DbgHelpProxy()
def SymLoadModuleEx(hProcess, hFile, ImageName, ModuleName, BaseOfDll, DllSize, Data, Flags):
return SymLoadModuleEx.ctypes_function(hProcess, hFile, ImageName, ModuleName, BaseOfDll, DllSize, Data, Flags)
@DbgHelpProxy(error_check=None)
def SymSetOptions(SymOptions):
return SymSetOptions.ctypes_function(SymOptions)
@DbgHelpProxy(error_check=None)
def SymGetOptions():
return SymGetOptions.ctypes_function()
@DbgHelpProxy()
def SymGetSearchPath(hProcess, SearchPath, SearchPathLength=None):
if SearchPath and SearchPathLength is None:
SearchPathLength = len(SearchPath)
return SymGetSearchPath.ctypes_function(hProcess, SearchPath, SearchPathLength)
@DbgHelpProxy()
def SymGetSearchPathW(hProcess, SearchPath, SearchPathLength=None):
if SearchPath and SearchPathLength is None:
SearchPathLength = len(SearchPath)
return SymGetSearchPathW.ctypes_function(hProcess, SearchPath, SearchPathLength)
@DbgHelpProxy()
def SymSetSearchPath(hProcess, SearchPath):
return SymSetSearchPath.ctypes_function(hProcess, SearchPath)
@DbgHelpProxy()
def SymSetSearchPathW(hProcess, SearchPath):
return SymSetSearchPathW.ctypes_function(hProcess, SearchPath)
@DbgHelpProxy()
def SymGetTypeInfo(hProcess, ModBase, TypeId, GetType, pInfo):
return SymGetTypeInfo.ctypes_function(hProcess, ModBase, TypeId, GetType, pInfo)
@DbgHelpProxy()
def SymEnumSymbols(hProcess, BaseOfDll, Mask, EnumSymbolsCallback, UserContext=None):
UserContext = transform_pyobject_to_pvoid(UserContext)
return SymEnumSymbols.ctypes_function(hProcess, BaseOfDll, Mask, EnumSymbolsCallback, UserContext)
@DbgHelpProxy()
def SymEnumSymbolsEx(hProcess, BaseOfDll, Mask, EnumSymbolsCallback, UserContext=None, Options=NeededParameter):
UserContext = transform_pyobject_to_pvoid(UserContext)
return SymEnumSymbolsEx.ctypes_function(hProcess, BaseOfDll, Mask, EnumSymbolsCallback, UserContext, Options)
@DbgHelpProxy()
def SymEnumSymbolsForAddr(hProcess, Address, EnumSymbolsCallback, UserContext=None):
UserContext = transform_pyobject_to_pvoid(UserContext)
return SymEnumSymbolsForAddr.ctypes_function(hProcess, Address, EnumSymbolsCallback, UserContext)
@DbgHelpProxy()
def SymEnumSymbolsForAddrW(hProcess, Address, EnumSymbolsCallback, UserContext=None):
UserContext = transform_pyobject_to_pvoid(UserContext)
return SymEnumSymbolsForAddrW.ctypes_function(hProcess, Address, EnumSymbolsCallback, UserContext)
@DbgHelpProxy()
def SymEnumTypes(hProcess, BaseOfDll, EnumSymbolsCallback, UserContext=None):
UserContext = transform_pyobject_to_pvoid(UserContext)
return SymEnumTypes.ctypes_function(hProcess, BaseOfDll, EnumSymbolsCallback, UserContext)
@DbgHelpProxy()
def SymEnumTypesByName(hProcess, BaseOfDll, mask, EnumSymbolsCallback, UserContext=None):
UserContext = transform_pyobject_to_pvoid(UserContext)
return SymEnumTypesByName.ctypes_function(hProcess, BaseOfDll, mask, EnumSymbolsCallback, UserContext)
@DbgHelpProxy()
def SymEnumerateModules64(hProcess, EnumModulesCallback, UserContext=None):
UserContext = transform_pyobject_to_pvoid(UserContext)
return SymEnumerateModules64.ctypes_function(hProcess, EnumModulesCallback, UserContext)
@DbgHelpProxy()
def SymGetTypeFromName(hProcess, BaseOfDll, Name, Symbol):
return SymGetTypeFromName.ctypes_function(hProcess, BaseOfDll, Name, Symbol)
@DbgHelpProxy()
def SymSearch(hProcess, BaseOfDll, Index, SymTag, Mask, Address, EnumSymbolsCallback, UserContext, Options):
UserContext = transform_pyobject_to_pvoid(UserContext)
return SymSearch.ctypes_function(hProcess, BaseOfDll, Index, SymTag, Mask, Address, EnumSymbolsCallback, UserContext, Options)
@DbgHelpProxy()
def SymSearchW(hProcess, BaseOfDll, Index, SymTag, Mask, Address, EnumSymbolsCallback, UserContext, Options):
UserContext = transform_pyobject_to_pvoid(UserContext)
return SymSearchW.ctypes_function(hProcess, BaseOfDll, Index, SymTag, Mask, Address, EnumSymbolsCallback, UserContext, Options)
@DbgHelpProxy()
def SymRefreshModuleList(hProcess):
return SymRefreshModuleList.ctypes_function(hProcess)
# Helpers
@DbgHelpProxy()
def SymFunctionTableAccess(hProcess, AddrBase):
return SymFunctionTableAccess.ctypes_function(hProcess, AddrBase)
@DbgHelpProxy()
def SymFunctionTableAccess64(hProcess, AddrBase):
return SymFunctionTableAccess64.ctypes_function(hProcess, AddrBase)
@DbgHelpProxy()
def SymGetModuleBase(hProcess, dwAddr):
return SymGetModuleBase.ctypes_function(hProcess, dwAddr)
@DbgHelpProxy()
def SymGetModuleBase64(hProcess, qwAddr):
return SymGetModuleBase64.ctypes_function(hProcess, qwAddr)
@DbgHelpProxy()
def SymEnumProcesses(EnumProcessesCallback, UserContext=None):
return SymEnumProcesses.ctypes_function(EnumProcessesCallback, UserContext)
## Sym callback
@DbgHelpProxy()
def SymRegisterCallback(hProcess, CallbackFunction, UserContext=None):
return SymRegisterCallback.ctypes_function(hProcess, CallbackFunction, UserContext)
@DbgHelpProxy()
def SymRegisterCallback64(hProcess, CallbackFunction, UserContext=0):
return SymRegisterCallback64.ctypes_function(hProcess, CallbackFunction, UserContext)
@DbgHelpProxy()
def SymRegisterCallbackW64(hProcess, CallbackFunction, UserContext=0):
return SymRegisterCallbackW64.ctypes_function(hProcess, CallbackFunction, UserContext)
# Stack walk
@DbgHelpProxy()
def StackWalk64(MachineType, hProcess, hThread, StackFrame, ContextRecord, ReadMemoryRoutine, FunctionTableAccessRoutine, GetModuleBaseRoutine, TranslateAddress):
return StackWalk64.ctypes_function(MachineType, hProcess, hThread, StackFrame, ContextRecord, ReadMemoryRoutine, FunctionTableAccessRoutine, GetModuleBaseRoutine, TranslateAddress)
@DbgHelpProxy()
def StackWalkEx(MachineType, hProcess, hThread, StackFrame, ContextRecord, ReadMemoryRoutine, FunctionTableAccessRoutine, GetModuleBaseRoutine, TranslateAddress, Flags):
return StackWalkEx.ctypes_function(MachineType, hProcess, hThread, StackFrame, ContextRecord, ReadMemoryRoutine, FunctionTableAccessRoutine, GetModuleBaseRoutine, TranslateAddress, Flags)
@DbgHelpProxy()
def StackWalk(MachineType, hProcess, hThread, StackFrame, ContextRecord, ReadMemoryRoutine, FunctionTableAccessRoutine, GetModuleBaseRoutine, TranslateAddress):
return StackWalk.ctypes_function(MachineType, hProcess, hThread, StackFrame, ContextRecord, ReadMemoryRoutine, FunctionTableAccessRoutine, GetModuleBaseRoutine, TranslateAddress)
| bsd-3-clause | 2,012,260,460,677,575,700 | 43.093264 | 191 | 0.809166 | false |
ilkka/maemogcalsync | test/test_googleapi.py | 1 | 1896 | """Unit test suite for the Google API part."""
import unittest2 as unittest
from datetime import datetime, timedelta
import random
import logging
import gdata.service
import gdata.calendar
import gdata.calendar.service
import yaml
import os.path
from mock import Mock, patch
Alphabet = 'abcdefghijlkmnopqrstuvwxyz0123456789'
import maemogcalsync
from maemogcalsync import googleapi
from maemogcalsync.event import Event
Log = logging.getLogger('test.googleapi')
class TestGoogleApi(unittest.TestCase):
"""Test suite class for googleapi module."""
def setUp(self):
"""Set up fixture"""
self.captcha_error_thrown = False
def simulate_captcha_login(self, *args, **kwargs):
"""This method is a side effect for test_captcha_login"""
if self.captcha_error_thrown:
self.assertIn('captcha_token', kwargs.keys())
self.assertIn('captcha_response', kwargs.keys())
return None
self.captcha_error_thrown = True
raise gdata.service.CaptchaRequired
@patch('gdata.calendar.service.CalendarService.ClientLogin')
def test_login(self, mock):
client = googleapi.Client('username@host', 'password')
mock.assert_called_with('username@host', 'password',
captcha_token=None, captcha_response=None,
service="Maemo Gcal sync {0}".format(maemogcalsync.__version__))
@patch('gdata.calendar.service.CalendarService.ClientLogin')
def test_captcha_login(self, mock):
run_once = False
mock.side_effect = self.simulate_captcha_login
with self.assertRaises(gdata.service.CaptchaRequired):
googleapi.Client('username@host', 'password')
googleapi.Client('username@host', 'password', 'captchatoken', 'captcharesponse')
self.assertEqual(2, mock.call_count)
if __name__ == "__main__":
unittest.main()
| lgpl-3.0 | 9,218,872,609,408,522,000 | 31.135593 | 88 | 0.686181 | false |
hydroshare/hydroshare | hs_tracking/tests/test_dashboard.py | 1 | 3060 | from django.test import TestCase
from django.contrib.auth.models import Group
from hs_tracking.models import Variable
from hs_core import hydroshare
from rest_framework import status
import socket
from django.test import Client
class TestDashboard(TestCase):
def setUp(self):
self.hostname = socket.gethostname()
self.resource_url = "/resource/{res_id}/"
self.client = Client(HTTP_USER_AGENT='Mozilla/5.0') # fake use of a real browser
self.group, _ = Group.objects.get_or_create(name='Hydroshare Author')
self.admin = hydroshare.create_account(
'[email protected]',
username='admin',
first_name='administrator',
last_name='couch',
superuser=True,
groups=[]
)
self.dog = hydroshare.create_account(
'[email protected]',
username='dog',
password='foobar',
first_name='a little arfer',
last_name='last_name_dog',
superuser=False,
groups=[]
)
# set up a logged-in session
# self.client.force_authenticate(user=self.dog)
self.client.login(username='dog', password='foobar')
self.resources_to_delete = []
self.groups_to_delete = []
self.holes = hydroshare.create_resource(
resource_type='GenericResource',
owner=self.dog,
title='all about dog holes',
metadata=[],
)
self.resources_to_delete.append(self.holes.short_id)
self.squirrels = hydroshare.create_resource(
resource_type='GenericResource',
owner=self.dog,
title='where to find squirrels',
metadata=[],
)
self.resources_to_delete.append(self.squirrels.short_id)
def tearDown(self):
for r in self.resources_to_delete:
hydroshare.delete_resource(r)
for g in self.groups_to_delete:
g.delete()
self.dog.delete()
def test_blank(self):
""" nothing in tracking database at beginning """
stuff = Variable.recent_resources(self.dog)
self.assertEqual(stuff.count(), 0)
def test_view(self):
""" a view gets recorded """
response = self.client.get(self.resource_url.format(res_id=self.holes.short_id))
self.assertEqual(response.status_code, status.HTTP_200_OK)
stuff = Variable.recent_resources(self.dog)
self.assertEqual(stuff.count(), 1)
r = stuff[0]
self.assertEqual(r.short_id, self.holes.short_id)
self.assertEqual(r.public, False)
self.assertEqual(r.published, False)
self.assertEqual(r.discoverable, False)
# there's only one record!
stuff = Variable.objects.filter(resource=self.holes)
one = stuff[0]
# the record describes the request above
self.assertEqual(one.last_resource_id, self.holes.short_id)
self.assertEqual(one.landing, True)
self.assertEqual(one.rest, False)
| bsd-3-clause | -1,793,111,076,543,336,400 | 30.875 | 89 | 0.603595 | false |
vendelin8/serverApplet | main.py | 1 | 12324 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# Main module of serverApplet.
# Copyright (C) 2015 Gergely Bódi
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import gettext, locale, logging, os, shelve, signal, sys
from collections import OrderedDict
from datetime import datetime
from enum import Enum
from functools import partial
from PyQt4 import QtCore, QtGui
from queue import Queue, Empty
from time import sleep
from threading import Thread
from serverApplet.plugin import globalPluginFunctions, localPluginFunctions
from serverApplet.down import HideDialog
logger = logging.getLogger('main')
logging.getLogger('requests').setLevel(logging.WARNING)
ACCOUNTS = 'accounts'
Action = Enum('Action', 'load update')
currentDir = os.path.dirname(os.path.realpath(__file__))
class MainApp(QtGui.QApplication):
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
self.schedulerQueue = Queue()
self.periodic = []
def waiting(self, title, function, functionArgs, callback=None, *callbackArgs):
'''
Showing progress dialog for actions that the user have to wait for.
:param title: the progress dialog title (inside waiting for ...)
:param function: the long processing function
:param functionArgs: arguments for that function as a list
:param callback: called when done, if it is not None
:param *callbackArgs: optional arguments for the callback function
'''
logger.info('title: {}, function: {}, functionArgs: {}, callback: {}, callbackArgs: {}'.format(title, function, functionArgs, callback, callbackArgs))
bar = QtGui.QProgressDialog(_('Waiting for: {}...').format(title), None, 0, 0)
bar.forceShow()
q = Queue()
functionArgs.append(q) # the function must put the result to the queue
thread = Thread(target=function, args=functionArgs)
thread.start()
while True:
try: # check if the function has finished
result = q.get(True, 0.05)
logger.info('result: {}'.format(result))
break
except Empty:
self.processEvents()
thread.join()
bar.cancel()
if callback is not None:
callback(result, *callbackArgs)
def updateCallback(self, params):
'''Called to update an account params, tipically with newer lastAccess.'''
self.schedulerQueue.put((Action.update, params))
def addPeriodic(self, params):
'''Adds a new fuction to call periodicly, tipically end of the process with one or more desktop notifications.'''
self.periodic.append(params)
def addAccountToMenu(self, modulename, login, loadThread=False):
'''Adds account to the system tray icon menu.'''
newMenu = menu.addMenu('{}:{}'.format(modulename, login))
accSubMenu = menu.insertMenu(subMenuAction, newMenu)
moduleImport = pluginDict[modulename]['moduleImport']
for name, value in pluginDict[modulename]['pluginFunctions'].items():
action = newMenu.addAction(name)
action.triggered.connect(partial(getattr(moduleImport, value), login))
params = shelf[ACCOUNTS][(modulename, login)]
moduleImport.load(login, params)
if loadThread:
self.schedulerQueue.put((Action.load, {'login': login, 'modulename': modulename, 'params': params}))
def mainPeriodic(self):
'''Called periodicly to check if the scheduler thread had changed something.'''
try:
params = shelfQueue.get(True, 0.05)
shelf[ACCOUNTS][(params['modulename'], params['login'])] = params['params']
except Empty:
pass
if len(self.periodic) > 0: # there might me plugin results to show as desktop notifications
for index, periodic in enumerate(list(self.periodic)): # status: True: success, None: partial success, False: fail
status, result = periodic['function'](*periodic['args']) # result: result string to show or None
if status is True:
title = _('Success!')
elif status is None:
title = _('In progress...')
else:
title = _('Fail!')
if result is not None:
try:
from os import system
system('notify-send "{}" "{}" --icon=dialog-information'.format(title, result))
except:
trayIcon.showMessage(title, result)
if status is not None: # success or fail is final: the periodic function can be deleted
del self.periodic[index]
def newAccount(self, modulename):
'''Shows new account dialog. Clears recent one, if it already exists.'''
if hasattr(self, 'newAccountDialog'):
self.newAccountDialog.clear(modulename)
else:
self.newAccountDialog = NewAccount(w, modulename)
self.newAccountDialog.show()
class NewAccount(HideDialog):
'''New account dialog with login and password field, and test login button.'''
def __init__(self, w, modulename):
super().__init__(w)
self.modulename = modulename
lytMain = QtGui.QVBoxLayout(self)
lytForm = QtGui.QFormLayout()
self.fldLogin = QtGui.QLineEdit(self)
lytForm.addRow(_('Login'), self.fldLogin)
self.fldPassword = QtGui.QLineEdit(self)
self.fldPassword.setEchoMode(QtGui.QLineEdit.Password)
lytForm.addRow(_('Password'), self.fldPassword)
lytMain.addLayout(lytForm)
lytMain.addStretch(1)
lytButtons = QtGui.QHBoxLayout()
lytButtons.addStretch(1)
btnLogin = QtGui.QPushButton('Login', self)
btnLogin.clicked.connect(self.loginClicked)
lytButtons.addWidget(btnLogin)
lytMain.addLayout(lytButtons)
def loginClicked(self):
app.waiting(_('testing'), pluginDict[self.modulename]['moduleImport'].testLogin, [self.fldLogin.text(), self.fldPassword.text()], self.doLogin)
def clear(self, modulename):
'''Reusing the dialog for a new account.'''
self.modulename = modulename
self.fldLogin.setText('')
self.fldPassword.setText('')
self.fldLogin.setFocus()
def doLogin(self, result):
'''
Callback after checking login.
:param result: True for success, False or error string for fail
'''
if result is True:
login = self.fldLogin.text()
params = {'password': self.fldPassword.text()}
shelf[ACCOUNTS][(self.modulename, login)] = params
app.addAccountToMenu(self.modulename, login, True)
self.hide()
else:
if result is False:
result = _('Login test failed.')
QtGui.QMessageBox.critical(self, _('Error'), result)
class Scheduler(Thread):
'''Scheduler thread for cron jobs. Daemon thread with rare wake ups.'''
def __init__(self, schedulerQueue, shelfQueue, accounts):
'''
:param schedulerQueue: queue for updating values up here
:param shelfQueue: queue for updating values down there
:param accounts: the state of the accounts before starting the sceduler; after this point, synchronization is required
'''
self.schedulerQueue = schedulerQueue
self.shelfQueue = shelfQueue
super().__init__(daemon=True)
self.accounts = accounts
def run(self):
while True:
sleepTime = 3600 # if nothing interesting happens, wakes up once per hour
try: # job and global actions
action, params = self.schedulerQueue.get(True, 0.05)
modulename = params['modulename']
login = params['login']
if action == Action.load: # new account loaded (or just created)
self.accounts[(modulename, login)] = params
elif action == Action.update: # existing account updated, tipically with newer lastAccess
self.accounts[(modulename, login)].update(params)
continue # do all the modifications before sleeping again
except Empty:
pass
now = datetime.utcnow()
for modulename, login in self.accounts.keys():
moduleImport = pluginDict[modulename]['moduleImport']
diff = (moduleImport.nextCron(login) - now).total_seconds()
if diff <= 0: # time for a cron job
moduleImport.startCron(login)
else: # if the next happening is less than an hour, sleeping until it only
sleepTime = min(diff, sleepTime)
shelfQueue.put({'login': login, 'modulename': modulename, 'params': moduleImport.getParams(login)})
sleep(sleepTime)
def doQuit(*args, **kw):
'''Quiting the app.'''
shelf.close()
app.quit()
signal.signal(signal.SIGINT, doQuit) # Ctrl+C for debugging reasons, may be removed for production
if __name__ == '__main__':
logging.basicConfig(filename=os.path.join(currentDir, 'main.log'), level=logging.INFO,
format='%(asctime)s,%(funcName)s,%(lineno)d: %(message)s', datefmt='%d %H:%M:%S')
pluginDict = {}
shelfQueue = Queue()
shelf = shelve.open(os.path.join(currentDir, 'serverApplet'), writeback=True)
if ACCOUNTS not in shelf:
shelf[ACCOUNTS] = OrderedDict()
locale.setlocale(locale.LC_ALL, '') # localization
loc = locale.getlocale()
filename = os.path.join(currentDir, 'res', '{}.mo'.format(locale.getlocale()[0]))
try:
logging.debug('Opening message file {} for locale {}'.format(filename, loc[0]))
trans = gettext.GNUTranslations(open(filename, 'rb'))
except IOError:
logging.debug('Locale not found. Using default messages')
trans = gettext.NullTranslations()
trans.install()
app = MainApp(sys.argv) # GUI initialization
w = QtGui.QWidget()
trayIcon = QtGui.QSystemTrayIcon(QtGui.QIcon(os.path.join(currentDir, 'res', 'tools.png')), w)
menu = QtGui.QMenu(w)
subMenu = menu.addMenu(_('Add account'))
subMenuAction = menu.addMenu(subMenu)
for file in os.listdir(os.path.join(currentDir, 'plugin')): # loading plugins
if file.endswith('.py') and not file.startswith('__init__'):
modulename = file.split('.')[0]
action = subMenu.addAction(modulename)
action.triggered.connect(partial(app.newAccount, modulename))
pluginFunctions = localPluginFunctions(modulename)()
moduleImport = __import__('plugin.{}'.format(modulename), fromlist=globalPluginFunctions + list(pluginFunctions.values()))
moduleImport.init(app)
pluginDict[modulename] = {'moduleImport': moduleImport, 'pluginFunctions': pluginFunctions}
for (modulename, login), modulParams in shelf[ACCOUNTS].items():
app.addAccountToMenu(modulename, login)
# menu.addAction(_('Manage Accounts')) #TODO
exitAction = menu.addAction(_('Exit'))
exitAction.triggered.connect(doQuit)
trayIcon.setContextMenu(menu)
trayIcon.activated.connect(lambda: trayIcon.contextMenu().popup(QtGui.QCursor.pos()))
trayIcon.show()
scheduler = Scheduler(app.schedulerQueue, shelfQueue, shelf[ACCOUNTS])
scheduler.start()
timer = QtCore.QTimer()
timer.start(5000)
timer.timeout.connect(app.mainPeriodic)
sys.exit(app.exec_())
| gpl-2.0 | -7,590,797,168,447,875,000 | 44.472325 | 158 | 0.633368 | false |
lduarte1991/edx-platform | common/lib/xmodule/xmodule/static_content.py | 1 | 6630 | # /usr/bin/env python
"""
This module has utility functions for gathering up the static content
that is defined by XModules and XModuleDescriptors (javascript and css)
"""
import errno
import hashlib
import logging
import os
import sys
from collections import defaultdict
from docopt import docopt
from path import Path as path
from xmodule.x_module import XModuleDescriptor
LOG = logging.getLogger(__name__)
def write_module_styles(output_root):
"""Write all registered XModule css, sass, and scss files to output root."""
return _write_styles('.xmodule_display', output_root, _list_modules())
def write_module_js(output_root):
"""Write all registered XModule js and coffee files to output root."""
return _write_js(output_root, _list_modules())
def write_descriptor_styles(output_root):
"""Write all registered XModuleDescriptor css, sass, and scss files to output root."""
return _write_styles('.xmodule_edit', output_root, _list_descriptors())
def write_descriptor_js(output_root):
"""Write all registered XModuleDescriptor js and coffee files to output root."""
return _write_js(output_root, _list_descriptors())
def _list_descriptors():
"""Return a list of all registered XModuleDescriptor classes."""
return [
desc for desc in [
desc for (_, desc) in XModuleDescriptor.load_classes()
]
]
def _list_modules():
"""Return a list of all registered XModule classes."""
return [
desc.module_class
for desc
in _list_descriptors()
]
def _ensure_dir(directory):
"""Ensure that `directory` exists."""
try:
os.makedirs(directory)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
def _write_styles(selector, output_root, classes):
"""
Write the css fragments from all XModules in `classes`
into `output_root` as individual files, hashed by the contents to remove
duplicates
"""
contents = {}
css_fragments = defaultdict(set)
for class_ in classes:
class_css = class_.get_css()
for filetype in ('sass', 'scss', 'css'):
for idx, fragment in enumerate(class_css.get(filetype, [])):
css_fragments[idx, filetype, fragment].add(class_.__name__)
css_imports = defaultdict(set)
for (idx, filetype, fragment), classes in sorted(css_fragments.items()):
fragment_name = "{idx:0=3d}-{hash}.{type}".format(
idx=idx,
hash=hashlib.md5(fragment).hexdigest(),
type=filetype)
# Prepend _ so that sass just includes the files into a single file
filename = '_' + fragment_name
contents[filename] = fragment
for class_ in classes:
css_imports[class_].add(fragment_name)
module_styles_lines = [
"@import 'bourbon/bourbon';",
"@import 'lms/theme/variables';",
]
for class_, fragment_names in css_imports.items():
module_styles_lines.append("""{selector}.xmodule_{class_} {{""".format(
class_=class_, selector=selector
))
module_styles_lines.extend(' @import "{0}";'.format(name) for name in fragment_names)
module_styles_lines.append('}')
contents['_module-styles.scss'] = '\n'.join(module_styles_lines)
_write_files(output_root, contents)
def _write_js(output_root, classes):
"""
Write the javascript fragments from all XModules in `classes`
into `output_root` as individual files, hashed by the contents to remove
duplicates
"""
contents = {}
js_fragments = set()
for class_ in classes:
module_js = class_.get_javascript()
# It will enforce 000 prefix for xmodule.js.
js_fragments.add((0, 'js', module_js.get('xmodule_js')))
for filetype in ('coffee', 'js'):
for idx, fragment in enumerate(module_js.get(filetype, [])):
js_fragments.add((idx + 1, filetype, fragment))
for idx, filetype, fragment in sorted(js_fragments):
filename = "{idx:0=3d}-{hash}.{type}".format(
idx=idx,
hash=hashlib.md5(fragment).hexdigest(),
type=filetype)
contents[filename] = fragment
_write_files(output_root, contents, {'.coffee': '.js'})
return [output_root / filename for filename in contents.keys()]
def _write_files(output_root, contents, generated_suffix_map=None):
"""
Write file contents to output root.
Any files not listed in contents that exists in output_root will be deleted,
unless it matches one of the patterns in `generated_suffix_map`.
output_root (path): The root directory to write the file contents in
contents (dict): A map from filenames to file contents to be written to the output_root
generated_suffix_map (dict): Optional. Maps file suffix to generated file suffix.
For any file in contents, if the suffix matches a key in `generated_suffix_map`,
then the same filename with the suffix replaced by the value from `generated_suffix_map`
will be ignored
"""
_ensure_dir(output_root)
to_delete = set(file.basename() for file in output_root.files()) - set(contents.keys())
if generated_suffix_map:
for output_file in contents.keys():
for suffix, generated_suffix in generated_suffix_map.items():
if output_file.endswith(suffix):
to_delete.discard(output_file.replace(suffix, generated_suffix))
for extra_file in to_delete:
(output_root / extra_file).remove_p()
for filename, file_content in contents.iteritems():
output_file = output_root / filename
not_file = not output_file.isfile()
# not_file is included to short-circuit this check, because
# read_md5 depends on the file already existing
write_file = not_file or output_file.read_md5() != hashlib.md5(file_content).digest()
if write_file:
LOG.debug("Writing %s", output_file)
output_file.write_bytes(file_content)
else:
LOG.debug("%s unchanged, skipping", output_file)
def main():
"""
Generate
Usage: static_content.py <output_root>
"""
from django.conf import settings
settings.configure()
args = docopt(main.__doc__)
root = path(args['<output_root>'])
write_descriptor_js(root / 'descriptors/js')
write_descriptor_styles(root / 'descriptors/css')
write_module_js(root / 'modules/js')
write_module_styles(root / 'modules/css')
if __name__ == '__main__':
sys.exit(main())
| agpl-3.0 | 1,384,983,547,345,485,600 | 31.821782 | 96 | 0.641026 | false |
thomasahle/numberlink | gen/grid.py | 1 | 4350 | def sign(x):
if x == 0:
return x
return -1 if x < 0 else 1
class UnionFind:
def __init__(self, initial=None):
self.uf = initial or {}
def union(self, a, b):
a_par, b_par = self.find(a), self.find(b)
self.uf[a_par] = b_par
def find(self, a):
if self.uf.get(a, a) == a:
return a
par = self.find(self.uf.get(a, a))
# Path compression
self.uf[a] = par
return par
class Grid:
def __init__(self, w, h):
self.w, self.h = w, h
self.grid = {}
def __setitem__(self, key, val):
self.grid[key] = val
def __getitem__(self, key):
return self.grid.get(key, ' ')
def __repr__(self):
res = []
for y in range(self.h):
res.append(''.join(self[x, y] for x in range(self.w)))
return '\n'.join(res)
def __iter__(self):
return iter(self.grid.items())
def __contains__(self, key):
return key in self.grid
def __delitem__(self, key):
del self.grid[key]
def clear(self):
self.grid.clear()
def values(self):
return self.grid.values()
def shrink(self):
""" Returns a new grid of half the height and width """
small_grid = Grid(self.w // 2, self.h // 2)
for y in range(self.h // 2):
for x in range(self.w // 2):
small_grid[x, y] = self[2 * x + 1, 2 * y + 1]
return small_grid
def test_path(self, path, x0, y0, dx0=0, dy0=1):
""" Test whether the path is safe to draw on the grid, starting at x0, y0 """
return all(0 <= x0 - x + y < self.w and 0 <= y0 + x + y < self.h
and (x0 - x + y, y0 + x + y) not in self for x, y in path.xys(dx0, dy0))
def draw_path(self, path, x0, y0, dx0=0, dy0=1, loop=False):
""" Draws path on the grid. Asserts this is safe (no overlaps).
For non-loops, the first and the last character is not drawn,
as we don't know what shape they should have. """
ps = list(path.xys(dx0, dy0))
# For loops, add the second character, so we get all rotational tripples:
# abcda -> abcdab -> abc, bcd, cda, dab
if loop:
assert ps[0] == ps[-1], (path, ps)
ps.append(ps[1])
for i in range(1, len(ps) - 1):
xp, yp = ps[i - 1]
x, y = ps[i]
xn, yn = ps[i + 1]
self[x0 - x + y, y0 + x + y] = {
(1, 1, 1): '<', (-1, -1, -1): '<',
(1, 1, -1): '>', (-1, -1, 1): '>',
(-1, 1, 1): 'v', (1, -1, -1): 'v',
(-1, 1, -1): '^', (1, -1, 1): '^',
(0, 2, 0): '\\', (0, -2, 0): '\\',
(2, 0, 0): '/', (-2, 0, 0): '/'
}[xn - xp, yn - yp, sign((x - xp) * (yn - y) - (xn - x) * (y - yp))]
def make_tubes(self):
uf = UnionFind()
tube_grid = Grid(self.w, self.h)
for x in range(self.w):
d = '-'
for y in range(self.h):
# We union things down and to the right.
# This means ┌ gets to union twice.
for dx, dy in {
'/-': [(0, 1)], '\\-': [(1, 0), (0, 1)],
'/|': [(1, 0)],
' -': [(1, 0)], ' |': [(0, 1)],
'v|': [(0, 1)], '>|': [(1, 0)],
'v-': [(0, 1)], '>-': [(1, 0)],
}.get(self[x, y] + d, []):
uf.union((x, y), (x + dx, y + dy))
# We change alll <>v^ to x.
tube_grid[x, y] = {
'/-': '┐', '\\-': '┌',
'/|': '└', '\\|': '┘',
' -': '-', ' |': '|',
}.get(self[x, y] + d, 'x')
# We change direction on v and ^, but not on < and >.
if self[x, y] in '\\/v^':
d = '|' if d == '-' else '-'
return tube_grid, uf
def clear_path(self, path, x, y):
""" Removes everything contained in the path (loop) placed at x, y. """
path_grid = Grid(self.w, self.h)
path_grid.draw_path(path, x, y, loop=True)
for key, val in path_grid.make_tubes()[0]:
if val == '|':
self.grid.pop(key, None)
| agpl-3.0 | -1,240,742,716,527,753,000 | 33.173228 | 91 | 0.408295 | false |
zoho/books-python-wrappers | books/parser/ChartOfAccountsParser.py | 1 | 5747 | #$Id$#
from books.model.ChartOfAccount import ChartOfAccount
from books.model.ChartOfAccountList import ChartOfAccountList
from books.model.TransactionList import TransactionList
from books.model.Transaction import Transaction
from books.model.PageContext import PageContext
class ChartOfAccountsParser:
"""This class parses the json response for chart of accounts."""
def get_list(self, resp):
"""This method parses the given response and returns chart of accounts
list.
Args:
resp(dict): Dictionary containing json object for chart of accounts
list.
Returns:
instance: Chart of accounts list object.
"""
chart_of_accounts_list = ChartOfAccountList()
for value in resp['chartofaccounts']:
chart_of_accounts = ChartOfAccount()
chart_of_accounts.set_account_id(value['account_id'])
chart_of_accounts.set_account_name(value['account_name'])
chart_of_accounts.set_account_type(value['account_type'])
chart_of_accounts.set_is_active(value['is_active'])
chart_of_accounts.set_is_user_created(value['is_user_created'])
chart_of_accounts.set_is_involved_in_transaction(value[\
'is_involved_in_transaction'])
chart_of_accounts.set_is_system_account(value['is_system_account'])
chart_of_accounts_list.set_chartofaccounts(chart_of_accounts)
page_context = resp['page_context']
page_context_obj = PageContext()
page_context_obj.set_page(page_context['page'])
page_context_obj.set_per_page(page_context['per_page'])
page_context_obj.set_has_more_page(page_context['has_more_page'])
page_context_obj.set_report_name(page_context['report_name'])
page_context_obj.set_applied_filter(page_context['applied_filter'])
page_context_obj.set_sort_column(page_context['sort_column'])
page_context_obj.set_sort_order(page_context['sort_order'])
chart_of_accounts_list.set_page_context(page_context_obj)
return chart_of_accounts_list
def get_account(self, resp):
"""This method parses the given response and returns chart of
accounts object.
Args:
resp(dict): Dictionary containing json object for chart of accounts.
Returns:
instance: Chart of accounts object.
"""
chart_of_account = resp['chart_of_account']
chart_of_account_obj = ChartOfAccount()
chart_of_account_obj.set_account_id(chart_of_account['account_id'])
chart_of_account_obj.set_account_name(chart_of_account['account_name'])
chart_of_account_obj.set_is_active(chart_of_account['is_active'])
chart_of_account_obj.set_account_type(chart_of_account['account_type'])
chart_of_account_obj.set_account_type_formatted(chart_of_account[\
'account_type_formatted'])
chart_of_account_obj.set_description(chart_of_account['description'])
return chart_of_account_obj
def get_message(self, resp):
"""This method parses the given response and returns string message.
Args:
reps(dict): Dictionary containing json object for message.
Returns:
str: Success message.
"""
return resp['message']
def get_transactions_list(self, resp):
"""This method parses the given response and returns transactions list.
Args:
resp(dict): Dictionary containing json object for transactions list.
Returns:
instance: Transaction list object.
"""
transactions_list = TransactionList()
for value in resp['transactions']:
transactions = Transaction()
transactions.set_categorized_transaction_id(value[\
'categorized_transaction_id'])
transactions.set_transaction_type(value['transaction_type'])
transactions.set_transaction_id(value['transaction_id'])
transactions.set_transaction_date(value['transaction_date'])
transactions.set_transaction_type_formatted(value[\
'transaction_type_formatted'])
transactions.set_account_id(value['account_id'])
transactions.set_customer_id(value['customer_id'])
transactions.set_payee(value['payee'])
transactions.set_description(value['description'])
transactions.set_entry_number(value['entry_number'])
transactions.set_currency_id(value['currency_id'])
transactions.set_currency_code(value['currency_code'])
transactions.set_debit_or_credit(value['debit_or_credit'])
transactions.set_offset_account_name(value['offset_account_name'])
transactions.set_reference_number(value['reference_number'])
transactions.set_reconcile_status(value['reconcile_status'])
transactions.set_debit_amount(value['debit_amount'])
transactions.set_credit_amount(value['credit_amount'])
transactions_list.set_transactions(transactions)
page_context = resp['page_context']
page_context_obj = PageContext()
page_context_obj.set_page(page_context['page'])
page_context_obj.set_per_page(page_context['per_page'])
page_context_obj.set_has_more_page(page_context['has_more_page'])
page_context_obj.set_report_name(page_context['report_name'])
page_context_obj.set_sort_column(page_context['sort_column'])
page_context_obj.set_sort_order(page_context['sort_order'])
transactions_list.set_page_context(page_context_obj)
return transactions_list
| mit | 8,299,171,215,330,370,000 | 44.251969 | 80 | 0.652514 | false |
brinkframework/brink | tests/test_fields.py | 1 | 3215 | from brink import fields, models
import pytest
class DummyModel(models.Model):
title = fields.CharField()
def test_field_treat():
field = fields.Field()
assert field.validate("val") == "val"
def test_field_validate_required():
field = fields.Field(required=True)
with pytest.raises(fields.FieldRequired):
field.validate(None)
assert field.validate("val") == "val"
def test_integer_field_validate_required():
field1 = fields.IntegerField(required=True)
with pytest.raises(fields.FieldRequired):
field1.validate(None)
field2 = fields.IntegerField()
field2.validate(None)
def test_integer_field_validate_type():
field = fields.IntegerField()
with pytest.raises(fields.FieldInvalidType):
field.validate("test")
assert field.validate(10) == 10
def test_char_field_validate_required():
field1 = fields.CharField(required=True)
with pytest.raises(fields.FieldRequired):
field1.validate(None)
field2 = fields.CharField()
field2.validate(None)
def test_char_field_validate_min_length():
field = fields.CharField(min_length=5)
with pytest.raises(fields.FieldInvalidLength):
field.validate("test")
assert field.validate("testing") == "testing"
def test_char_field_validate_max_length():
field = fields.CharField(max_length=5)
with pytest.raises(fields.FieldInvalidLength):
field.validate("testing")
assert field.validate("test") == "test"
def test_char_field_validate_type():
field = fields.CharField()
with pytest.raises(fields.FieldInvalidType):
field.validate(10)
assert field.validate("test") == "test"
def test_bool_field_validate_required():
field1 = fields.BooleanField(required=True)
with pytest.raises(fields.FieldRequired):
field1.validate(None)
field2 = fields.BooleanField()
field2.validate(None)
def test_bool_field_validate_type():
field = fields.BooleanField()
with pytest.raises(fields.FieldInvalidType):
field.validate("test")
assert field.validate(True)
assert not field.validate(False)
def test_list_field_validate_subtype():
field = fields.ListField(fields.CharField())
with pytest.raises(fields.FieldInvalidType):
field.validate([1, 2])
with pytest.raises(fields.FieldInvalidType):
field.validate([1, "test"])
field.validate(["test", "test2"])
assert field.validate(None) == []
def test_reference_field_treat():
field = fields.ReferenceField(DummyModel)
model = DummyModel(id="test", title="Test")
assert field.treat(model) == "test"
def test_reference_field_show():
field = fields.ReferenceField(DummyModel)
model = field.show(DummyModel(title="Test"))
assert model.title == "Test"
def test_reference_list_field_treat():
field = fields.ReferenceListField(DummyModel)
model = DummyModel(id="test", title="Test")
assert field.treat([model]) == ["test"]
def test_reference_list_field_show():
field = fields.ReferenceListField(DummyModel)
data = DummyModel(title="Test")
models = field.show([data])
for model in models:
assert model.title == "Test"
| bsd-3-clause | 2,837,641,944,071,696,400 | 22.467153 | 50 | 0.684292 | false |
angr/cle | cle/utils.py | 1 | 3681 | import os
import contextlib
from .errors import CLEError, CLEFileNotFoundError
# https://code.woboq.org/userspace/glibc/include/libc-pointer-arith.h.html#43
def ALIGN_DOWN(base, size):
return base & -size
# https://code.woboq.org/userspace/glibc/include/libc-pointer-arith.h.html#50
def ALIGN_UP(base, size):
return ALIGN_DOWN(base + size - 1, size)
# To verify the mmap behavior you can compile and run the following program. Fact is that mmap file mappings
# always map in the entire page into memory from the file if available. If not, it gets zero padded
# pylint: disable=pointless-string-statement
"""#include <stdio.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/mman.h>
void make_test_file()
{
void* data = (void*)0xdead0000;
int fd = open("./test.data", O_CREAT | O_RDWR, S_IRUSR | S_IWUSR);
for (int i = 0; i < 0x1800; i += sizeof(void*)) // Only write 1 1/2 pages worth
{
write(fd, &data, sizeof(void*));
data += sizeof(void*);
}
close(fd);
}
int main(int argc, char* argv[])
{
make_test_file();
int fd = open("./test.data", O_RDONLY);
unsigned char* mapping = mmap(NULL, 0x123, PROT_READ, MAP_PRIVATE, fd, 4096);
for (int i=0; i < 0x1000; i++)
{
printf("%02x ", mapping[i]);
if (i % sizeof(void*) == (sizeof(void*) - 1))
printf("| ");
if (i % 16 == 15)
printf("\n");
}
}"""
def get_mmaped_data(stream, offset, length, page_size):
if offset % page_size != 0:
raise CLEError("libc helper for mmap: Invalid page offset, should be multiple of page size! Stream {}, offset {}, length: {}".format(stream, offset, length))
read_length = ALIGN_UP(length, page_size)
stream.seek(offset)
data = stream.read(read_length)
return data.ljust(read_length, b'\0')
@contextlib.contextmanager
def stream_or_path(obj, perms='rb'):
if hasattr(obj, 'read') and hasattr(obj, 'seek'):
obj.seek(0)
yield obj
else:
if not os.path.exists(obj):
raise CLEFileNotFoundError("%r is not a valid path" % obj)
with open(obj, perms) as f:
yield f
def key_bisect_floor_key(lst, key, lo=0, hi=None, keyfunc=lambda x: x):
if lo < 0:
raise ValueError('lo must be non-negative')
if hi is None:
hi = len(lst)
while lo < hi:
mid = (lo + hi) // 2
if keyfunc(lst[mid]) <= key:
lo = mid + 1
else:
hi = mid
if lo <= len(lst) and lo > 0:
return lst[lo - 1]
return None
def key_bisect_find(lst, item, lo=0, hi=None, keyfunc=lambda x: x):
if lo < 0:
raise ValueError('lo must be non-negative')
if hi is None:
hi = len(lst)
while lo < hi:
mid = (lo + hi) // 2
if keyfunc(lst[mid]) <= keyfunc(item):
lo = mid + 1
else:
hi = mid
return lo
def key_bisect_insort_left(lst, item, lo=0, hi=None, keyfunc=lambda x: x):
if lo < 0:
raise ValueError('lo must be non-negative')
if hi is None:
hi = len(lst)
while lo < hi:
mid = (lo + hi) // 2
if keyfunc(lst[mid]) < keyfunc(item):
lo = mid + 1
else:
hi = mid
lst.insert(lo, item)
def key_bisect_insort_right(lst, item, lo=0, hi=None, keyfunc=lambda x: x):
if lo < 0:
raise ValueError('lo must be non-negative')
if hi is None:
hi = len(lst)
while lo < hi:
mid = (lo + hi) // 2
if keyfunc(lst[mid]) <= keyfunc(item):
lo = mid + 1
else:
hi = mid
lst.insert(lo, item)
| bsd-2-clause | 2,866,465,400,092,293,600 | 27.984252 | 165 | 0.573485 | false |
dynamicy/FloodligtModule | apps/qos/qospath.py | 1 | 7564 | #! /usr/bin/python
"""
QoSPath.py ---------------------------------------------------------------------------------------------------
Developed By: Ryan Wallner ([email protected])
Add QoS to a specific path in the network. Utilized circuit pusher developed by KC Wang
[Note]
*circuitpusher.py is needed in the same directory for this application to run
succesfully!
USAGE:
qospath.py <add> --qos-path <name> <source-ip> <dest-ip> <policy-object> <controller-ip> <port>
qospath.py <delete> --qos-path <name> <controller-ip> <port>
*note: This adds the Quality of Service to each switch along the path between hosts
*note Policy object can exclude the "sw" ,"enqueue-port" parameters and
"ip-src", "ip-dst" and "ingress-port" match parameters.
They will be modified based on the route anyway.
[author] - rjwallner
-----------------------------------------------------------------------------------------------------------------------
"""
import sys
import os
import time
import simplejson #used to process policies and encode/decode requests
import subprocess #spawning subprocesses
##Get switches in a circuit using circuitpusher (may need to modify to get all switches in path)
##Then use the add policy to a EACH switch in in circuit using QoSPusher to add a policy along a path.
def main():
#checks
if (len(sys.argv) == 2):
if sys.argv[1] == "--help" or sys.argv[1] == "help" or sys.argv[1] == "--h" :
usage_help()
exit()
if (len(sys.argv)) == 9:
p_name = sys.argv[3]
src = sys.argv[4]
dst = sys.argv[5]
pol = sys.argv[6]
c_ip = sys.argv[7]
prt = sys.argv[8]
add(p_name,src,dst,pol,c_ip,prt)
exit()
if (len(sys.argv)) == 6:
p_name = sys.argv[3]
c_ip = sys.argv[4]
prt = sys.argv[5]
delete(p_name,c_ip,prt)
exit()
else:
usage()
exit()
def add(name, ip_src, ip_dst, p_obj, c_ip, port):
print "Trying to create a circuit from host %s to host %s..." % (ip_src, ip_dst)
c_pusher = "circuitpusher.py"
qos_pusher = "qosmanager.py"
pwd = os.getcwd()
print pwd
try:
if (os.path.exists("%s/%s" % (pwd,c_pusher))) and (os.path.exists("%s/%s" % (pwd,qos_pusher))):
print "Necessary tools confirmed.. %s , %s" % (c_pusher,qos_pusher)
else:
print "%s/%s does not exist" %(pwd,c_pusher)
print "%s/%s does not exist" %(pwd,qos_pusher)
except ValueError as e:
print "Problem finding tools...%s , %s" % (c_pusher,qos_pusher)
print e
exit(1)
#first create the circuit and wait to json to pupulate
print "create circuit!!!"
try:
cmd = "--controller=%s:%s --type ip --src %s --dst %s --add --name %s" % (c_ip,port,ip_src,ip_dst,name)
print './circuitpusher.py %s' % cmd
c_proc = subprocess.Popen('./circuitpusher.py %s' % cmd, shell=True)
print "Process %s started to create circuit" % c_proc.pid
#wait for the circuit to be created
c_proc.wait()
except Exception as e:
print "could not create circuit, Error: %s" % str(e)
try:
subprocess.Popen("cat circuits.json",shell=True).wait()
except Exception as e:
print "Error opening file, Error: %s" % str(e)
#cannot continue without file
exit()
print "Opening circuits.json in %s" % pwd
try:
circs = "circuits.json"
c_data = open(circs)
except Exception as e:
print "Error opening file: %s" % str(e)
#load data into json format
print "Creating a QoSPath from host %s to host %s..." % (ip_src, ip_dst)
time.sleep(5)
for line in c_data:
data = simplejson.loads(line)
if data['name'] != name:
continue
else:
sw_id = data['Dpid']
in_prt = data['inPort']
out_prt = data['outPort']
print"QoS applied to this switch for circuit %s" % data['name']
print "%s: in:%s out:%s" % (sw_id,in_prt,out_prt)
p = simplejson.loads(p_obj)
#add necessary match values to policy for path
p['sw'] = sw_id
p['name'] = name+"."+sw_id
#screwed up connectivity on this match, remove
#p['ingress-port'] = str(in_prt)
p['ip-src'] = ip_src
p['ip-dst'] = ip_dst
keys = p.keys()
l = len(keys)
queue = False
service = False
for i in range(l):
if keys[i] == 'queue':
queue = True
elif keys[i] == 'service':
service = True
if queue and service:
polErr()
elif queue and not service:
p['enqueue-port'] = str(out_prt)
pol = str(p)
print "Adding Queueing Rule"
sjson = simplejson.JSONEncoder(sort_keys=False,indent=3).encode(p)
print sjson
cmd = "./qosmanager.py add policy '%s' %s %s" % (sjson,c_ip,port)
p = subprocess.Popen(cmd, shell=True).wait()
elif service and not queue:
print "Adding Type of Service"
sjson = simplejson.JSONEncoder(sort_keys=False,indent=3).encode(p)
print sjson
cmd = "./qosmanager.py add policy '%s' %s %s" % (sjson,c_ip,port)
p = subprocess.Popen(cmd, shell=True).wait()
else:
polErr()
def polErr():
print """Your policy is not defined right, check to
make sure you have a service OR a queue defined"""
def delete(name,c_ip,port):
print "Trying to delete QoSPath %s" % name
# circuitpusher --controller {IP:REST_PORT} --delete --name {CIRCUIT_NAME}
try:
print "Deleting circuit"
cmd = "./circuitpusher.py --controller %s:%s --delete --name %s" % (c_ip,port,name)
subprocess.Popen(cmd,shell=True).wait()
except Exception as e:
print "Error deleting circuit, Error: %s" % str(e)
exit()
qos_s = os.popen("./qosmanager.py list policies %s %s" %(c_ip,port)).read()
qos_s = qos_s[qos_s.find("[",qos_s.find("[")+1):qos_s.rfind("]")+1]
#print qos_s
data = simplejson.loads(qos_s)
sjson = simplejson.JSONEncoder(sort_keys=False,indent=3).encode(data)
jsond = simplejson.JSONDecoder().decode(sjson)
#find policies that start with "<pathname>."
l = len(jsond)
for i in range(l):
n = jsond[i]['name']
if name in n:
pol_id = jsond[i]['policyid']
try:
cmd = "./qosmanager.py delete policy '{\"policy-id\":\"%s\"}' %s %s " % (pol_id,c_ip,port)
print cmd
subprocess.Popen(cmd,shell=True).wait()
except Exception as e:
print "Could not delete policy in path: %s" % str(e)
def usage():
print '''type "qospath.py --help" for more details
#qospath.py <add> --qos-path <name> <source-ip> <dest-ip> <policy-object> <controller-ip> <port>
#qospath.py <delete> --qos-path <name> <controller-ip> <port>
*Policy object can exclude the "sw" ,"enqueue-port" parameters and
"ip-src", "ip-dst" and "ingress-port" match parameters.
They will be modified based on the route anyway.'''
def usage_help():
print '''
###################################
QoSPath.py
Author: Ryan Wallner ([email protected])
QoSPath is a simple service that utilizes KC Wang's
CircuitPusher to push Quality of Service along a
specific path in the network.
To add a QoS Path with a Policy
*note other match fields can be added to the policy object
qospath.py add --qos-path Path-Name 10.0.0.1 10.0.0.2 '{"queue":"2"}' 127.0.0.1 8080
qospath.py add --qos-path Path-Name 10.0.0.1 10.0.0.2 '{"service":"Best Effort"}' 127.0.0.1 8080
To delete a QoS Path
qospath.py delete --qos-path "Path-Name" 127.0.0.1 8080
###################################
'''
#Call main
if __name__ == "__main__" :
main() | apache-2.0 | -4,349,914,921,500,390,000 | 35.02381 | 119 | 0.593469 | false |
JonathanSeguin/Mariana | setup.py | 1 | 2757 | from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(here, 'DESCRIPTION.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='Mariana',
version='1.0.3rc1',
description="The Cutest Deep Learning Framework",
long_description=long_description,
url='https://github.com/tariqdaouda/mariana',
author='Tariq Daouda',
author_email='[email protected]',
license='ApacheV2.0',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Scientific/Engineering :: Machine-learning',
'Topic :: Scientific/Engineering :: Deep-learning',
'Topic :: Software Development :: Libraries',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
],
keywords='Machine Learning deeplearning neural networks',
packages=find_packages(exclude=['trash']),
# List run-time dependencies here. These will be installed by pip when your
# project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/technical.html#install-requires-vs-requirements-files
install_requires=["theano", "pyGeno", "simplejson", "numpy"],
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
#~ package_data={
#~ 'sample': ['package_data.dat'],
#~ },
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages.
# see http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
#~ data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'sample=sample:main',
],
},
)
| apache-2.0 | 4,313,847,788,811,178,000 | 34.805195 | 98 | 0.660138 | false |
yardstick17/GoogleCharts | preprocessing/get_data.py | 1 | 1216 | from random import randint
import pandas as pd
def read_rating_history_dump():
rating_df = pd.read_csv('GoogleChartsFlask/data/rating_history.csv')
data = list()
for index, row in rating_df.iterrows():
data.append((row[0], row[1], row[2]))
return data
def read_daily_rating_dump():
rating_df = pd.read_csv('GoogleChartsFlask/data/daily_rating.csv')
data = []
for index, row in rating_df.iterrows():
data.append((row[0], row[1], row[2], row[3], row[4], randint(3, 5) + randint(1, 10) / 10,
randint(3, 5) + randint(1, 10) / 10, randint(3, 5) + randint(1, 10) / 10))
return data[:10]
def read_daily_rating_dump_all():
rating_df = pd.read_csv('GoogleChartsFlask/data/daily_rating.csv')
data = []
for index, row in rating_df.iterrows():
data.append((row[0], row[1], row[2], row[3], row[4], randint(3, 5) + randint(1, 10) / 10))
return data[:10]
def read_rating_hive_dump():
rating_df = pd.read_csv('hive_query_result', sep='\001', names='a b c d e'.split())
data = []
for index, row in rating_df.iterrows():
data.append((row[0], row[1], row[2], row[3], row[4], randint(3, 5) + randint(1, 10) / 10))
| mit | 2,226,139,648,123,469,800 | 33.742857 | 98 | 0.599507 | false |
nbingham1/python-html | css.py | 1 | 2211 | from collections import OrderedDict
class Rgb:
def __init__(self, r = 0.0, g = 0.0, b = 0.0):
self.r = r
self.g = g
self.b = b
def rgba(self):
return "rgb({},{},{})".format(
max(0.0, min(1.0, self.r)),
max(0.0, min(1.0, self.g)),
max(0.0, min(1.0, self.b)))
def hex(self):
return "#{:02x}{:02x}{:02x}".format(
max(0, min(255, int(255.0*self.r))),
max(0, min(255, int(255.0*self.g))),
max(0, min(255, int(255.0*self.b))))
def __str__(self):
return self.hex()
class Rgba:
def __init__(self, r = 0.0, g = 0.0, b = 0.0, a = 1.0):
self.r = r
self.g = g
self.b = b
self.a = a
def rgba(self):
return "rgba({},{},{},{})".format(
max(0.0, min(1.0, self.r)),
max(0.0, min(1.0, self.g)),
max(0.0, min(1.0, self.b)),
max(0.0, min(1.0, self.a)))
def __str__(self):
return self.rgba()
class Style:
def __init__(self, prop = OrderedDict()):
self.prop = OrderedDict()
for key,value in prop.iteritems():
self.set(key, value)
def __str__(self):
return " ".join(self.emit())
def emit(self):
result = []
for key,value in self.prop.iteritems():
result.append(str(key) + ": " + str(value) + ";")
return result
def get(self, key):
return self.prop[key]
def set(self, key, value):
#if key == "background":
#elif key == "border":
#elif key == "border-bottom":
#elif key == "border-image":
#elif key == "border-left":
#elif key == "border-radius":
#elif key == "border-right":
#elif key == "border-top":
#elif key == "margin":
#elif key == "padding":
#elif key == "font":
#elif key == "list-style":
#elif key == "animation":
#elif key == "outline":
#elif key == "column-rule":
#else:
self.prop[key] = value
def __getitem__(self, key):
return self.get(key)
def __setitem__(self, key, value):
self.set(key, value)
class Css:
def __init__(self, elems = OrderedDict()):
self.elems = elems
def __str__(self):
return "\n".join(self.emit())
def emit(self):
result = []
for selector,style in self.elems.iteritems():
result.append(selector + " {")
lines = style.emit()
for line in lines:
result.append("\t" + line)
result.append("}")
result.append("")
return result
| mit | -8,850,375,547,768,495,000 | 20.676471 | 56 | 0.565807 | false |
nuagenetworks/tempest | tempest/tests/lib/test_decorators.py | 1 | 4381 | # Copyright 2013 IBM Corp
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import testtools
from tempest.lib import base as test
from tempest.lib import decorators
from tempest.tests.lib import base
class TestSkipBecauseDecorator(base.TestCase):
def _test_skip_because_helper(self, expected_to_skip=True,
**decorator_args):
class TestFoo(test.BaseTestCase):
_interface = 'json'
@decorators.skip_because(**decorator_args)
def test_bar(self):
return 0
t = TestFoo('test_bar')
if expected_to_skip:
self.assertRaises(testtools.TestCase.skipException, t.test_bar)
else:
# assert that test_bar returned 0
self.assertEqual(TestFoo('test_bar').test_bar(), 0)
def test_skip_because_bug(self):
self._test_skip_because_helper(bug='12345')
def test_skip_because_bug_and_condition_true(self):
self._test_skip_because_helper(bug='12348', condition=True)
def test_skip_because_bug_and_condition_false(self):
self._test_skip_because_helper(expected_to_skip=False,
bug='12349', condition=False)
def test_skip_because_bug_without_bug_never_skips(self):
"""Never skip without a bug parameter."""
self._test_skip_because_helper(expected_to_skip=False,
condition=True)
self._test_skip_because_helper(expected_to_skip=False)
def test_skip_because_invalid_bug_number(self):
"""Raise ValueError if with an invalid bug number"""
self.assertRaises(ValueError, self._test_skip_because_helper,
bug='critical_bug')
class TestIdempotentIdDecorator(base.TestCase):
def _test_helper(self, _id, **decorator_args):
@decorators.idempotent_id(_id)
def foo():
"""Docstring"""
pass
return foo
def _test_helper_without_doc(self, _id, **decorator_args):
@decorators.idempotent_id(_id)
def foo():
pass
return foo
def test_positive(self):
_id = str(uuid.uuid4())
foo = self._test_helper(_id)
self.assertIn('id-%s' % _id, getattr(foo, '__testtools_attrs'))
self.assertTrue(foo.__doc__.startswith('Test idempotent id: %s' % _id))
def test_positive_without_doc(self):
_id = str(uuid.uuid4())
foo = self._test_helper_without_doc(_id)
self.assertTrue(foo.__doc__.startswith('Test idempotent id: %s' % _id))
def test_idempotent_id_not_str(self):
_id = 42
self.assertRaises(TypeError, self._test_helper, _id)
def test_idempotent_id_not_valid_uuid(self):
_id = '42'
self.assertRaises(ValueError, self._test_helper, _id)
class TestSkipUnlessAttrDecorator(base.TestCase):
def _test_skip_unless_attr(self, attr, expected_to_skip=True):
class TestFoo(test.BaseTestCase):
expected_attr = not expected_to_skip
@decorators.skip_unless_attr(attr)
def test_foo(self):
pass
t = TestFoo('test_foo')
if expected_to_skip:
self.assertRaises(testtools.TestCase.skipException,
t.test_foo())
else:
try:
t.test_foo()
except Exception:
raise testtools.TestCase.failureException()
def test_skip_attr_does_not_exist(self):
self._test_skip_unless_attr('unexpected_attr')
def test_skip_attr_false(self):
self._test_skip_unless_attr('expected_attr')
def test_no_skip_for_attr_exist_and_true(self):
self._test_skip_unless_attr('expected_attr', expected_to_skip=False)
| apache-2.0 | -7,026,969,918,760,330,000 | 33.769841 | 79 | 0.617439 | false |
suutari/shoop | shuup/default_tax/admin_module/views.py | 1 | 2508 | # This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
from django import forms
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import string_concat
from shuup.admin.utils.picotable import Column
from shuup.admin.utils.views import CreateOrUpdateView, PicotableListView
from shuup.default_tax.models import TaxRule
from shuup.utils.patterns import PATTERN_SYNTAX_HELP_TEXT
class TaxRuleForm(forms.ModelForm):
class Meta:
model = TaxRule
fields = [
"tax_classes",
"customer_tax_groups",
"country_codes_pattern",
"region_codes_pattern",
"postal_codes_pattern",
"priority",
"override_group",
"tax",
"enabled",
]
help_texts = {
"country_codes_pattern": string_concat(
PATTERN_SYNTAX_HELP_TEXT,
" ",
_("Use ISO 3166-1 country codes (US, FI etc.)")
),
"region_codes_pattern": string_concat(
PATTERN_SYNTAX_HELP_TEXT,
" ",
_("Use two letter state codes for the US")
),
"postal_codes_pattern": PATTERN_SYNTAX_HELP_TEXT,
}
def clean(self):
data = super(TaxRuleForm, self).clean()
data["country_codes_pattern"] = data["country_codes_pattern"].upper()
return data
class TaxRuleEditView(CreateOrUpdateView):
model = TaxRule
template_name = "shuup/default_tax/admin/edit.jinja"
form_class = TaxRuleForm
context_object_name = "tax_rule"
add_form_errors_as_messages = True
class TaxRuleListView(PicotableListView):
url_identifier = "default_tax.tax_rule"
model = TaxRule
default_columns = [
Column("id", _("Tax Rule")),
Column("tax", _("Tax")),
Column("tax_classes", _("Tax Classes")),
Column("customer_tax_groups", _("Customer Tax Groups")),
Column("country_codes_pattern", _("Countries")),
Column("region_codes_pattern", _("Regions")),
Column("postal_codes_pattern", _("Postal Codes")),
Column("priority", _(u"Priority")),
Column("override_group", _(u"Override Group")),
Column("enabled", _(u"Enabled")),
]
| agpl-3.0 | -7,073,731,062,335,883,000 | 32 | 77 | 0.603668 | false |
grvcTeam/grvc-ual | px4_bringup/scripts/spawn_gzmodel.py | 1 | 10779 | #!/usr/bin/env python
import subprocess
import argparse
import utils
import numpy
import os
import rospkg
import rospy
import tf2_ros
import math
import time
import xml.etree.ElementTree as ET
def main():
# Parse arguments # TODO: Too much arguments? Rethink this script
parser = argparse.ArgumentParser(description='Spawn robot in Gazebo for SITL')
parser.add_argument('-model', type=str, default="mbzirc",
help='robot model name, must match xacro description folder name')
parser.add_argument('-id', type=int, default=1,
help='robot id, used to compute sim_port')
parser.add_argument('-x', type=float, default=0.0,
help='initial x position')
parser.add_argument('-y', type=float, default=0.0,
help='initial y position')
parser.add_argument('-z', type=float, default=0.0,
help='initial z position')
parser.add_argument('-Y', type=float, default=0.0,
help='initial yaw angle')
parser.add_argument('-description_package', type=str, default="robots_description",
help='robot description package, must follow robots_description file structure')
parser.add_argument('-material', type=str, default="DarkGrey",
help='robot Gazebo/material; \
see materials/scripts/gazebo.material (at your gazebo version)')
parser.add_argument('-ual_backend', type=str, default="mavros",
help='UAL backend to use')
parser.add_argument('-frame_id', type=str, default="map",
help='initial position and yaw frame reference; id [map] refers to gazebo origin')
parser.add_argument('-append_xacro_args', type=str, nargs='+',
help='append additional arguments for xacro command')
args, unknown = parser.parse_known_args()
utils.check_unknown_args(unknown)
# Get an instance of RosPack with the default search paths
rospack = rospkg.RosPack()
# Init ros node
rospy.init_node('spawn_gzmodel_{}'.format(args.id))
# Xacro description must be in specified package
description_dir = rospack.get_path(args.description_package)
# Create temporary directory for robot sitl stuff
temp_dir = utils.temp_dir(args.id)
subprocess.call("mkdir -p " + temp_dir, shell=True)
# Get udp configuration, depending on id
udp_config = utils.udp_config(args.id)
# Check file type (xacro or sdf)
model_file_type = ""
model_file = ""
for listed_file in sorted(os.listdir(description_dir + "/models/" + args.model)):
if listed_file == "model.sdf":
model_file_type = "sdf"
model_file = "model.sdf"
break
if listed_file == "model.xacro":
model_file_type = "xacro"
model_file = "model.xacro"
break
if listed_file == args.model + ".sdf":
model_file_type = "sdf"
model_file = args.model + ".sdf"
break
if listed_file == args.model + ".xacro":
model_file_type = "xacro"
model_file = args.model + ".xacro"
break
if model_file_type == "xacro":
xacro_description = description_dir + "/models/" + args.model + "/" + model_file
# Create urdf from xacro description
temp_urdf = temp_dir + "/" + args.model + ".urdf"
xacro_args = "xacro --inorder -o " + temp_urdf + " " + \
xacro_description + \
" robot_id:=" + str(args.id) + \
" visual_material:=" + args.material + \
" enable_ground_truth:=false" + \
" enable_logging:=false" + \
" enable_camera:=false" + \
" enable_wind:=false"
if args.ual_backend == 'light':
xacro_args = xacro_args + \
" enable_mavlink_interface:=false" + \
" enable_gps_plugin:=false"
else:
xacro_args = xacro_args + \
" enable_mavlink_interface:=true" + \
" enable_gps_plugin:=true" + \
" mavlink_tcp_port:=" + str(udp_config["simulator_tcp_port"]) + \
" mavlink_udp_port:=" + str(udp_config["simulator_udp_port"])
if args.append_xacro_args:
for xacro_arg in args.append_xacro_args:
# print(xacro_arg)
xacro_args += ' '
xacro_args += xacro_arg.replace('=', ':=') # As args are passed as arg=value
# print(xacro_args)
# return
xacro_out = open(temp_dir+"/xacro.out", 'w')
xacro_err = open(temp_dir+"/xacro.err", 'w')
subprocess.call(xacro_args, shell=True, stdout=xacro_out, stderr=xacro_err)
xacro_out.close()
xacro_err.close()
# Create sdf from urdf
temp_sdf = temp_dir + "/" + args.model + ".sdf"
subprocess.call("gz sdf -p " + temp_urdf + " > " + temp_sdf, shell=True)
elif model_file_type == "sdf":
model_sdf = description_dir + "/models/" + args.model + "/" + model_file
temp_sdf = temp_dir + "/" + args.model + ".sdf"
subprocess.call("cp " + model_sdf + " " + temp_sdf, shell=True)
# Change simulation port
tree = ET.parse(temp_sdf)
root = tree.getroot()
model = root.find('model')
for plugintag in model.findall('plugin'):
if plugintag.get('name') == 'mavlink_interface':
porttag = plugintag.find('mavlink_udp_port')
porttag.text = str(udp_config["simulator_udp_port"])
porttag = plugintag.find('mavlink_tcp_port')
porttag.text = str(udp_config["simulator_tcp_port"])
# Typhoon_h480 patch - TODO use xacro instead
if args.model == 'typhoon_h480':
for plugintag in model.findall('plugin'):
if plugintag.get('name') == 'gimbal_controller':
yawtag = plugintag.find('joint_yaw')
yawtag.text = 'typhoon_h480_' + str(args.id) + '::cgo3_vertical_arm_joint'
rolltag = plugintag.find('joint_roll')
rolltag.text = 'typhoon_h480_' + str(args.id) + '::cgo3_horizontal_arm_joint'
pitchtag = plugintag.find('joint_pitch')
pitchtag.text = 'typhoon_h480_' + str(args.id) + '::cgo3_camera_joint'
imutag = plugintag.find('gimbal_imu')
imutag.text = 'typhoon_h480_' + str(args.id) + '::camera_imu'
if plugintag.get('name') == 'mavlink_interface':
controlchannelstag = plugintag.find('control_channels')
for channeltag in controlchannelstag.findall('channel'):
if channeltag.get('name') == 'gimbal_yaw':
yawtag = channeltag.find('joint_name')
yawtag.text = 'typhoon_h480_' + str(args.id) + '::cgo3_vertical_arm_joint'
if channeltag.get('name') == 'gimbal_roll':
rolltag = channeltag.find('joint_name')
rolltag.text = 'typhoon_h480_' + str(args.id) + '::cgo3_horizontal_arm_joint'
if channeltag.get('name') == 'gimbal_pitch':
pitchtag = channeltag.find('joint_name')
pitchtag.text = 'typhoon_h480_' + str(args.id) + '::cgo3_camera_joint'
for linktag in model.findall('link'):
if linktag.get('name') == 'cgo3_camera_link':
for sensortag in linktag.findall('sensor'):
if sensortag.get('name') == 'camera_imu':
sensortag.set('name', 'typhoon_h480_' + str(args.id) + '::camera_imu')
tree.write(temp_sdf)
else:
raise IOError("Couldn't find model.sdf/model.xacro/" + args.model + ".sdf/" + args.model + ".xacro description file")
# Set gravity=0 for light simulations
if args.ual_backend == 'light':
tree = ET.parse(temp_sdf)
root = tree.getroot()
model = root.find('model')
for linktag in model.findall('link'):
#if linktag.get('name') == 'base_link':
gravitytag = linktag.find('gravity')
if gravitytag == None:
gravitytag = ET.SubElement(linktag,'gravity')
gravitytag.text = '0'
tree.write(temp_sdf)
# Sleep for waiting the world to load
rospy.wait_for_service('/gazebo/spawn_sdf_model')
time.sleep(0.4)
# Minimum z to avoid collision with ground
z_min = 0.1
spawn_x = args.x
spawn_y = args.y
spawn_z = args.z
spawn_yaw = args.Y
if args.frame_id != 'map':
tf_buffer = tf2_ros.Buffer()
tf_listener = tf2_ros.TransformListener(tf_buffer)
try:
transform_stamped = tf_buffer.lookup_transform('map', args.frame_id, rospy.Time(0), rospy.Duration(10.0))
if transform_stamped.transform.rotation.x != 0:
raise ValueError('Only yaw rotations allowed at spawn; rotation.x should be 0, found {}'.format(transform_stamped.transform.rotation.x))
if transform_stamped.transform.rotation.y != 0:
raise ValueError('Only yaw rotations allowed at spawn; rotation.y should be 0, found {}'.format(transform_stamped.transform.rotation.y))
transform_yaw = 2.0 * math.atan2(transform_stamped.transform.rotation.z, transform_stamped.transform.rotation.w)
new_x = transform_stamped.transform.translation.x + spawn_x * math.cos(transform_yaw) - spawn_y * math.sin(transform_yaw)
new_y = transform_stamped.transform.translation.y + spawn_x * math.sin(transform_yaw) + spawn_y * math.cos(transform_yaw)
new_z = transform_stamped.transform.translation.z + spawn_z
new_yaw = transform_yaw + spawn_yaw
spawn_x = new_x
spawn_y = new_y
spawn_z = new_z
spawn_yaw = new_yaw
except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):
rospy.logerr('Failed to lookup transform from [{}] to [map], ignoring frame_id'.format(args.frame_id))
# Spawn robot sdf in gazebo
gzmodel_args = "rosrun gazebo_ros spawn_model -sdf" + \
" -file " + temp_sdf + \
" -model " + args.model + "_" + str(args.id) + \
" -x " + str(spawn_x) + \
" -y " + str(spawn_y) + \
" -z " + str(spawn_z + z_min) + \
" -Y " + str(spawn_yaw) + \
" __name:=spawn_" + args.model + "_" + str(args.id)
rospy.sleep(args.id)
subprocess.call(gzmodel_args, shell=True)
rospy.loginfo('Model spawned')
if __name__ == "__main__":
main()
| mit | -9,056,991,320,575,629,000 | 44.868085 | 152 | 0.567121 | false |
cournape/numscons | numscons/scons-local/scons-local-1.2.0/SCons/Platform/hpux.py | 1 | 1759 | """engine.SCons.Platform.hpux
Platform-specific initialization for HP-UX systems.
There normally shouldn't be any need to import this module directly. It
will usually be imported through the generic SCons.Platform.Platform()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Platform/hpux.py 2009/09/04 16:33:07 david"
import posix
def generate(env):
posix.generate(env)
#Based on HP-UX11i: ARG_MAX=2048000 - 3000 for environment expansion
env['MAXLINELENGTH'] = 2045000
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| bsd-3-clause | 6,774,897,390,272,396,000 | 37.23913 | 89 | 0.761228 | false |
wwrechard/pydlm | pydlm/access/_dlmGet.py | 1 | 5519 | """
===============================================================================
The code for all get methods
===============================================================================
"""
from numpy import dot
from pydlm.core._dlm import _dlm
class _dlmGet(_dlm):
""" The class containing all get methods for dlm class.
Methods:
_getComponent: get the component if it is in dlm
_getLatentState: get the latent state for a given component
_getLatentCov: get the latent covariance for a given component
_getComponentMean: get the mean of a given component
_getComponentVar: get the variance of a given component
"""
# function to get the corresponding latent state
def _getLatentState(self, name, filterType, start, end):
""" Get the latent states of a given component.
Args:
name: the name of the component.
filterType: the type of the latent states to be returned.
could be "forwardFilter", "backwardSmoother" or
"predict".
start: the start date for the latent states to be returned.
end: the end date to be returned.
Returns:
A list of latent states.
"""
end += 1
indx = self.builder.componentIndex[name]
patten = lambda x: x if x is None else x[indx[0]:(indx[1] + 1), 0]
if filterType == 'forwardFilter':
return list(map(patten, self.result.filteredState[start:end]))
elif filterType == 'backwardSmoother':
return list(map(patten, self.result.smoothedState[start:end]))
elif filterType == 'predict':
return list(map(patten, self.result.predictedState[start:end]))
else:
raise NameError('Incorrect filter type')
# function to get the corresponding latent covariance
def _getLatentCov(self, name, filterType, start, end):
""" Get the latent covariance of a given component.
Args:
name: the name of the component.
filterType: the type of the latent covariance to be returned.
could be "forwardFilter", "backwardSmoother" or
"predict".
start: the start date for the latent covariance to be returned.
end: the end date to be returned.
Returns:
A list of latent covariance.
"""
end += 1
indx = self.builder.componentIndex[name]
patten = lambda x: x if x is None \
else x[indx[0]:(indx[1] + 1), indx[0]:(indx[1] + 1)]
if filterType == 'forwardFilter':
return list(map(patten, self.result.filteredCov[start:end]))
elif filterType == 'backwardSmoother':
return list(map(patten, self.result.smoothedCov[start:end]))
elif filterType == 'predict':
return list(map(patten, self.result.predictedCov[start:end]))
else:
raise NameError('Incorrect filter type')
# function to get the component mean
def _getComponentMean(self, name, filterType, start, end):
""" Get the mean of a given component.
Args:
name: the name of the component.
filterType: the type of the mean to be returned.
could be "forwardFilter", "backwardSmoother" or
"predict".
start: the start date for the mean to be returned.
end: the end date to be returned.
Returns:
A list of mean.
"""
end += 1
comp = self._fetchComponent(name)
componentState = self._getLatentState(name=name,
filterType=filterType,
start=start, end=end)
result = []
for k, i in enumerate(range(start, end)):
if name in self.builder.dynamicComponents:
comp.updateEvaluation(i)
elif name in self.builder.automaticComponents:
comp.updateEvaluation(i, self.padded_data)
result.append(dot(comp.evaluation,
componentState[k]).tolist()[0][0])
return result
# function to get the component variance
def _getComponentVar(self, name, filterType, start, end):
""" Get the variance of a given component.
Args:
name: the name of the component.
filterType: the type of the variance to be returned.
could be "forwardFilter", "backwardSmoother" or
"predict".
start: the start date for the variance to be returned.
end: the end date to be returned.
Returns:
A list of variance.
"""
end += 1
comp = self._fetchComponent(name)
componentCov = self._getLatentCov(name=name,
filterType=filterType,
start=start, end=end)
result = []
for k, i in enumerate(range(start, end)):
if name in self.builder.dynamicComponents:
comp.updateEvaluation(i)
elif name in self.builder.automaticComponents:
comp.updateEvaluation(i, self.padded_data)
result.append(dot(
dot(comp.evaluation,
componentCov[k]), comp.evaluation.T).tolist()[0][0])
return result
| bsd-3-clause | -5,919,468,780,384,474,000 | 37.326389 | 79 | 0.548831 | false |
vineodd/PIMSim | GEM5Simulation/gem5/util/systemc/systemc_within_gem5/systemc_simple_object/SystemC_Example.py | 5 | 2697 | # Copyright 2018 Google, Inc.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
from m5.params import *
from m5.SimObject import SimObject
from SystemC import SystemC_ScModule
# This class is a subclass of sc_module, and all the special magic which makes
# that work is handled in the base classes.
class SystemC_Printer(SystemC_ScModule):
type = 'SystemC_Printer'
cxx_class = 'Printer'
cxx_header = 'systemc_simple_object/printer.hh'
# This parameter will be available in the SystemC_PrinterParams::create
# function and can be passed to the c++ object's constructor, used to set
# one of its member variables, as a parameter to one of its methods, etc.
prefix = Param.String('', 'Prefix for each word')
# This is a standard gem5 SimObject class with no special accomodation for the
# fact that one of its parameters is a systemc object.
class Gem5_Feeder(SimObject):
type = 'Gem5_Feeder'
cxx_class = 'Feeder'
cxx_header = 'systemc_simple_object/feeder.hh'
# This parameter will be a pointer to an instance of the class above.
printer = Param.SystemC_Printer('Printer for our words.')
delay = Param.Latency('1ns', 'Time to wait between each word.')
strings = VectorParam.String([], 'Words to print.')
| gpl-3.0 | -5,901,187,575,971,177,000 | 49.886792 | 78 | 0.763441 | false |
oschumac/python-uart-pi-xbridge | BGReadings.py | 1 | 3925 | #!/usr/bin/python
import json
import socket
import sys
import time
import os
import array
import math
import sqlite3
import mongo
import db
import sensor
import xdriplib
WixelData = {"_id":0,"TransmitterId":"00000","CaptureDateTime":0,"RelativeTime":0,"ReceivedSignalStrength":0,"RawValue":0,"TransmissionId":0,"BatteryLife":0,"UploadAttempts":0,"Uploaded":0,"UploaderBatteryLife":0,"FilteredValue":0 }
def oldinsertIntoWixeldata_(data) :
if sensor.SensorisActive():
CurSensor=sensor.currentSensor()
print "CurSensor->" + str(CurSensor['started_at'])
TimeDelta=((long(data['CaptureDateTime'])-long(CurSensor['started_at']))*1.0)/1000/60/60
Adjusted_raw=xdriplib.calculateAgeAdjustedRawValue(TimeDelta,int(data['RawValue']))
print "BGReadings AgeAdjustedRaw -> " + str(Adjusted_raw)
Adjusted_raw=xdriplib.calculateAgeAdjustedRawValue(TimeDelta,int(data['RawValue']))
else:
print "No Sensor Active"
Adjusted_raw=0
conn = sqlite3.connect(db.openapsDBName)
sql='insert into ' + db.tableNameWixeldata
sql+='(TransmitterId, CaptureDateTime, RelativeTime, ReceivedSignalStrength, RawValue, TransmissionId, BatteryLife, UploadAttempts, Uploaded, UploaderBatteryLife, FilteredValue, age_adjusted_raw_value ) VALUES ('
sql+=" '" + str(data['TransmitterId']) + "'"
sql+=', ' + str(data['CaptureDateTime'])
sql+=', ' + str(data['RelativeTime'])
sql+=', ' + str(data['ReceivedSignalStrength'])
sql+=', ' + str(data['RawValue'])
sql+=', ' + str(data['TransmissionId'])
sql+=', ' + str(data['BatteryLife'])
sql+=', ' + str(data['UploadAttempts'])
sql+=', ' + str(data['Uploaded'])
sql+=', ' + str(data['UploaderBatteryLife'])
sql+=', ' + str(Adjusted_raw)
sql+=', ' + str(data['FilteredValue']) + ' )'
#print "(BGReadings)(insertIntoWixel) SQL->" + sql
conn.execute(sql)
conn.commit()
print "Records created successfully";
conn.close()
def oldgetrawData_():
wdata=WixelData
sql = 'select _id, TransmitterId, CaptureDateTime, RelativeTime, ReceivedSignalStrength, RawValue, TransmissionId, BatteryLife, UploadAttempts, Uploaded, UploaderBatteryLife, FilteredValue '
sql+= 'from ' + db.tableNameWixeldata + ' order by CaptureDateTime desc limit 1'
#print "(BGReadings)(getrawData) SQL->" + sql
conn = sqlite3.connect(db.openapsDBName)
cur = conn.cursor()
cur.execute(sql)
data = cur.fetchone()
conn.close()
wdata=WixelData
if data!=None:
wdata['_id']=data[0]
wdata['TransmitterId']=data[1]
wdata['CaptureDateTime']=data[2]
wdata['RelativeTime']=data[3]
wdata['ReceivedSignalStrength']=data[4]
wdata['RawValue']=data[5]
wdata['TransmissionId']=data[6]
wdata['BatteryLife']=data[7]
wdata['UploadAttempts']=data[8]
wdata['Uploaded']=data[9]
wdata['UploaderBatteryLife']=data[10]
wdata['FilteredValue']=data[11]
else:
print "(BGReadings)(getrawData) No data available"
return wdata;
def oldinitBGReadings_():
initDB()
def oldlatestRaw_(anzahl):
sql = 'select RawValue, CaptureDateTime, age_adjusted_raw_value as Timestamp '
sql+= 'from ' + db.tableNameWixeldata + ' order by CaptureDateTime desc limit ' + str(anzahl)
conn = sqlite3.connect(db.openapsDBName)
cur = conn.cursor()
cur.execute(sql)
data = cur.fetchall()
conn.close()
return data;
def oldtest_():
mydata = {"_id":1,"TransmitterId":"66PNX","CaptureDateTime":0,"RelativeTime":0,"ReceivedSignalStrength":0,"RawValue":0,"TransmissionId":0,"BatteryLife":0,"UploadAttempts":0,"Uploaded":0,"UploaderBatteryLife":0,"FilteredValue":0 }
mydata['CaptureDateTime']=long(time.time())
mydata['RelativeTime']=2121313
mydata['RawValue']="155000"
mydata['FilteredValue']="155000"
mydata['BatteryLife']="240"
mydata['TransmitterId']="00000"
mydata['ReceivedSignalStrength']=0
mydata['TransmissionId']=0
print "Time adjusted raw" + str(xdriplib.calculateAgeAdjustedRawValue(5,155000))
insertIntoWixeldata(mydata)
if __name__ == "__main__":
test()
| mit | -7,661,033,420,010,583,000 | 31.983193 | 232 | 0.713885 | false |
diegojromerolopez/djanban | src/djanban/apps/work_hours_packages/migrations/0001_initial.py | 1 | 2517 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-26 14:15
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('multiboards', '0004_auto_20170526_1615'),
('members', '0023_auto_20170519_1715'),
('boards', '0068_auto_20170515_1844'),
]
operations = [
migrations.CreateModel(
name='WorkHoursPackage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256, verbose_name='Name of this package')),
('description', models.TextField(help_text='Long description of this pakage describing thetype of work the workers must do', verbose_name='Description of this package')),
('number_of_hours', models.PositiveIntegerField(help_text='Number of hours of this package.', verbose_name='Number of hours')),
('is_paid', models.BooleanField(default=False, help_text='Has the client paid for this package', verbose_name='Is this package paid?')),
('payment_datetime', models.DateField(blank=True, default=None, null=True, verbose_name='When this package was paid')),
('start_work_date', models.DateField(verbose_name='Start date')),
('end_work_date', models.DateField(verbose_name='End date')),
('board', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='work_hours_packages', to='boards.Board', verbose_name='Board')),
('creator', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='created_work_hours_packages', to='members.Member', verbose_name='Member')),
('label', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='work_hours_packages', to='boards.Label', verbose_name='Label')),
('members', models.ManyToManyField(blank=True, related_name='work_hours_packages', to='members.Member', verbose_name='Member')),
('multiboard', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='work_hours_packages', to='multiboards.Multiboard', verbose_name='Multiboard')),
],
),
]
| mit | -4,146,718,177,459,947,500 | 65.236842 | 224 | 0.657926 | false |
coffeemakr/torweb | doc/conf.py | 1 | 8854 | # -*- coding: utf-8 -*-
#
# Torweb documentation build configuration file, created by
# sphinx-quickstart on Sun Jan 3 13:37:43 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import alabaster
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'alabaster'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Torweb'
copyright = u'2016, coffeemakr'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0'
# The full version, including alpha/beta/rc tags.
release = '0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
autodoc_default_flags = ['members', 'show-inheritance']
autoclass_content = 'both'
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
html_theme_path = [alabaster.get_path()]
html_theme_options = {
'logo': 'logo.png',
'github_user': 'bitprophet',
'github_repo': 'alabaster',
}
html_theme = 'alabaster'
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'searchbox.html',
'donate.html',
]
}
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Torwebdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Torweb.tex', u'Torweb Documentation',
u'coffeemakr', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'torweb', u'Torweb Documentation',
[u'coffeemakr'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Torweb', u'Torweb Documentation',
u'coffeemakr', 'Torweb', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| gpl-2.0 | 9,221,430,299,245,530,000 | 29.426117 | 79 | 0.69991 | false |
cnobile2012/inventory | inventory/regions/admin.py | 1 | 3582 | # -*- coding: utf-8 -*-
#
# inventory/regions/admin.py
#
"""
Country, Language, and Timezone region admin.
"""
__docformat__ = "restructuredtext en"
from django.contrib import admin
from django.utils.translation import gettext_lazy as _
from .models import Country, Subdivision, Language, TimeZone, Currency
from .forms import (
CountryForm, SubdivisionForm, LanguageForm, TimeZoneForm, CurrencyForm)
#
# CountryAdmin
#
@admin.register(Country)
class CountryAdmin(admin.ModelAdmin):
fieldsets = (
(None, {'fields': ('country', 'code',)}),
(_('Status'), {'classes': ('collapse',),
'fields': ('active',)}),
)
list_display = ('code', 'country', 'active',)
readonly_fields = ('country', 'code',)
list_editable = ('active',)
search_fields = ('code', 'country',)
list_filter = ('active', 'code',)
ordering = ('country',)
form = CountryForm
#
# SubdivisionAdmin
#
@admin.register(Subdivision)
class SubdivisionAdmin(admin.ModelAdmin):
fieldsets = (
(None, {'fields': ('subdivision_name', 'country', 'code',)}),
(_("Status"), {'classes': ('collapse',),
'fields': ('active',)}),
)
ordering = ('country__country', 'subdivision_name',)
readonly_fields = ('subdivision_name', 'country', 'code',)
list_display = ('subdivision_name', 'country', 'code', 'active',)
list_editable = ('active',)
list_filter = ('active', 'country__country',)
search_fields = ('subdivision_name', 'code', 'country__code',
'country__country',)
form = SubdivisionForm
#
# Language
#
@admin.register(Language)
class LanguageAdmin(admin.ModelAdmin):
fieldsets = (
(None, {'fields': ('locale', 'country', 'code',)}),
(_("Status"), {'classes': ('collapse',),
'fields': ('active',)}),
)
ordering = ('locale',)
readonly_fields = ('locale', 'country', 'code',)
list_display = ('locale', 'country', 'code', 'active',)
list_editable = ('active',)
list_filter = ('active', 'country__country',)
search_fields = ('locale', 'country__code', 'country__country',)
form = LanguageForm
#
# TimeZone
#
@admin.register(TimeZone)
class TimeZoneAdmin(admin.ModelAdmin):
fieldsets = (
(None, {'fields': ('zone', 'coordinates', 'country', 'desc',)}),
(_("Status"), {'classes': ('collapse',),
'fields': ('active',)}),
)
ordering = ('zone',)
readonly_fields = ('zone', 'coordinates', 'country', 'desc',)
list_display = ('zone', 'country', 'coordinates', 'desc', 'active',)
list_editable = ('active',)
list_filter = ('active', 'country__country',)
search_fields = ('country__country', 'country__code', 'zone', 'desc',)
form = TimeZoneForm
#
# Currency
#
@admin.register(Currency)
class CurrencyAdmin(admin.ModelAdmin):
fieldsets = (
(None, {'fields': ('currency', 'country', 'alphabetic_code',
'numeric_code', 'minor_unit', 'symbol',)}),
(_('Status'), {'classes': ('collapse',),
'fields': ('active',)}),
)
readonly_fields = ('currency', 'country', 'alphabetic_code',
'numeric_code', 'minor_unit', 'symbol',)
list_display = ('currency', 'country', 'symbol', 'active',)
list_editable = ('active',)
list_filter = ('active', 'country__country',)
search_fields = ('currency', 'country__country', 'alphabetic_code',
'numeric_code',)
form = CurrencyForm
| mit | -2,082,895,579,007,227,400 | 30.699115 | 75 | 0.56756 | false |
robwarm/gpaw-symm | doc/install/Bull/customize_curie_gpu.py | 1 | 1844 | import os
scalapack = True
hdf5 = False
# ld: /usr/local/phdf5-1.8.5/lib/libhdf5.a(H5.o): relocation R_X86_64_32 against `.rodata.str1.4' can not be used when making a shared object; recompile with -fPIC
compiler = 'icc'
mpi='/opt/mpi/bullxmpi/1.1.16.5'
mkl='/usr/local/Intel_compilers/c/composer_xe_2011_sp1.7.256/mkl/lib/intel64'
intel='/usr/local/Intel_compilers/c/composer_xe_2011_sp1.7.256/compiler/lib/intel64'
hdf='/usr/local/phdf5-1.8.5'
#
# cublasZdgmm does not exist in cuda 4.2
# /tmp/ipo_iccjq2M5h1.o: In function `cudgmm':
# ipo_out1.c:(.text.hot0001d+0x522b): undefined reference to `cublasZdgmm'
# strings /usr/local/cuda-4.2/lib64/libcublas.so | grep "cublasZdgmm"
cuda='/usr/local/cuda-4.2' # comment out if no cuda
libraries =[
'cublas', 'cufft', 'cuda', # comment out if no cuda
'cudart', # comment out if no cuda
#'mkl_def',
'mkl_scalapack_lp64', 'mkl_intel_lp64', 'mkl_sequential',
'mkl_core', 'mkl_blacs_openmpi_lp64',
#'hdf5',
'mpi',
]
library_dirs =[
intel,
os.path.join(mpi, 'lib'),
mkl,
os.path.join(cuda, 'lib64'), # comment out if no cuda
#os.path.join(hdf, 'lib'),
]
include_dirs +=[
os.path.join(mpi, 'include'),
os.path.join(cuda, 'include'), # comment out if no cuda
#os.path.join(hdf, 'include'),
]
extra_link_args =[
'-Wl,-rpath=' + intel +
',-rpath=' + os.path.join(mpi, 'lib') +
',-rpath=' + os.path.join(cuda, 'lib64') + # comment out if no cuda
',-rpath=' + mkl
#',-rpath=' + os.path.join(hdf, 'lib')
]
extra_compile_args =['-xHOST', '-O3', '-ipo', '-std=c99', '-fPIC', '-Wall']
extra_objects += ['./c/cukernels.o']
define_macros += [('GPAW_NO_UNDERSCORE_CBLACS', '1')]
define_macros += [('GPAW_NO_UNDERSCORE_CSCALAPACK', '1')]
mpicompiler = os.path.join(mpi, 'bin', 'mpicc')
mpilinker = mpicompiler
| gpl-3.0 | -5,547,694,432,866,405,000 | 33.792453 | 163 | 0.633406 | false |
brahle/fitmarket-python-api | setup.py | 1 | 3056 | # coding: utf-8
"""
Fitmarket
Mali broj ljudi - donori - dijele dnevna mjerenja svoje težine. Iz dnevne težine jednog donora određujemo vrijednosti dviju dionica: - dionica X ima vrijednost koja odgovara težini donora na taj dan. - inverzna dionica ~X ima vrijednost (150 kg - X). Primjetimo da: - kako X raste, ~X pada. - X + ~X = 150 kg Svaki igrač počinje igru sa 10,000 kg raspoloživog novca. Igrač koristi taj novac za trgovanje dionicama. Ukupna vrijednost igrača je zbroj rapoloživog novca i aktualne vrijednosti svih dionica koje posjeduje. Cilj igre je maksimizirati ukupnu vrijednost dobrim predviđanjem kretanja vrijednosti dionica. Na primjer, u prvom danu igrac kupi 125 dionica \"X\" za 80 kg. U drugom danu, dionica naraste na 82 kg. Ako igrac proda sve dionice \"X\", zaradio je 2 kg * 125 = 250 kg! Igra ne dopušta donoru da trguje vlastitim dionicama.
OpenAPI spec version: 1.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
from setuptools import setup, find_packages
NAME = "fitmarket_api"
VERSION = "1.0.0"
# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
REQUIRES = ["urllib3 >= 1.15", "six >= 1.10", "certifi", "python-dateutil"]
setup(
name=NAME,
version=VERSION,
description="Fitmarket",
author_email="",
url="",
keywords=["Swagger", "Fitmarket"],
install_requires=REQUIRES,
packages=find_packages(),
include_package_data=True,
long_description="""\
Mali broj ljudi - donori - dijele dnevna mjerenja svoje težine. Iz dnevne težine jednog donora određujemo vrijednosti dviju dionica: - dionica X ima vrijednost koja odgovara težini donora na taj dan. - inverzna dionica ~X ima vrijednost (150 kg - X). Primjetimo da: - kako X raste, ~X pada. - X + ~X = 150 kg Svaki igrač počinje igru sa 10,000 kg raspoloživog novca. Igrač koristi taj novac za trgovanje dionicama. Ukupna vrijednost igrača je zbroj rapoloživog novca i aktualne vrijednosti svih dionica koje posjeduje. Cilj igre je maksimizirati ukupnu vrijednost dobrim predviđanjem kretanja vrijednosti dionica. Na primjer, u prvom danu igrac kupi 125 dionica \"X\" za 80 kg. U drugom danu, dionica naraste na 82 kg. Ako igrac proda sve dionice \"X\", zaradio je 2 kg * 125 = 250 kg! Igra ne dopušta donoru da trguje vlastitim dionicama.
"""
)
| apache-2.0 | -2,895,560,746,953,038,300 | 55.148148 | 880 | 0.735158 | false |
SaMnCo/charm-dashing | lib/charmhelpers/core/hookenv.py | 1 | 13812 | "Interactions with the Juju environment"
# Copyright 2013 Canonical Ltd.
#
# Authors:
# Charm Helpers Developers <[email protected]>
import os
import json
import yaml
import subprocess
import sys
import UserDict
from subprocess import CalledProcessError
CRITICAL = "CRITICAL"
ERROR = "ERROR"
WARNING = "WARNING"
INFO = "INFO"
DEBUG = "DEBUG"
MARKER = object()
cache = {}
def cached(func):
"""Cache return values for multiple executions of func + args
For example::
@cached
def unit_get(attribute):
pass
unit_get('test')
will cache the result of unit_get + 'test' for future calls.
"""
def wrapper(*args, **kwargs):
global cache
key = str((func, args, kwargs))
try:
return cache[key]
except KeyError:
res = func(*args, **kwargs)
cache[key] = res
return res
return wrapper
def flush(key):
"""Flushes any entries from function cache where the
key is found in the function+args """
flush_list = []
for item in cache:
if key in item:
flush_list.append(item)
for item in flush_list:
del cache[item]
def log(message, level=None):
"""Write a message to the juju log"""
command = ['juju-log']
if level:
command += ['-l', level]
command += [message]
subprocess.call(command)
class Serializable(UserDict.IterableUserDict):
"""Wrapper, an object that can be serialized to yaml or json"""
def __init__(self, obj):
# wrap the object
UserDict.IterableUserDict.__init__(self)
self.data = obj
def __getattr__(self, attr):
# See if this object has attribute.
if attr in ("json", "yaml", "data"):
return self.__dict__[attr]
# Check for attribute in wrapped object.
got = getattr(self.data, attr, MARKER)
if got is not MARKER:
return got
# Proxy to the wrapped object via dict interface.
try:
return self.data[attr]
except KeyError:
raise AttributeError(attr)
def __getstate__(self):
# Pickle as a standard dictionary.
return self.data
def __setstate__(self, state):
# Unpickle into our wrapper.
self.data = state
def json(self):
"""Serialize the object to json"""
return json.dumps(self.data)
def yaml(self):
"""Serialize the object to yaml"""
return yaml.dump(self.data)
def execution_environment():
"""A convenient bundling of the current execution context"""
context = {}
context['conf'] = config()
if relation_id():
context['reltype'] = relation_type()
context['relid'] = relation_id()
context['rel'] = relation_get()
context['unit'] = local_unit()
context['rels'] = relations()
context['env'] = os.environ
return context
def in_relation_hook():
"""Determine whether we're running in a relation hook"""
return 'JUJU_RELATION' in os.environ
def relation_type():
"""The scope for the current relation hook"""
return os.environ.get('JUJU_RELATION', None)
def relation_id():
"""The relation ID for the current relation hook"""
return os.environ.get('JUJU_RELATION_ID', None)
def local_unit():
"""Local unit ID"""
return os.environ['JUJU_UNIT_NAME']
def remote_unit():
"""The remote unit for the current relation hook"""
return os.environ['JUJU_REMOTE_UNIT']
def service_name():
"""The name service group this unit belongs to"""
return local_unit().split('/')[0]
def hook_name():
"""The name of the currently executing hook"""
return os.path.basename(sys.argv[0])
class Config(dict):
"""A Juju charm config dictionary that can write itself to
disk (as json) and track which values have changed since
the previous hook invocation.
Do not instantiate this object directly - instead call
``hookenv.config()``
Example usage::
>>> # inside a hook
>>> from charmhelpers.core import hookenv
>>> config = hookenv.config()
>>> config['foo']
'bar'
>>> config['mykey'] = 'myval'
>>> config.save()
>>> # user runs `juju set mycharm foo=baz`
>>> # now we're inside subsequent config-changed hook
>>> config = hookenv.config()
>>> config['foo']
'baz'
>>> # test to see if this val has changed since last hook
>>> config.changed('foo')
True
>>> # what was the previous value?
>>> config.previous('foo')
'bar'
>>> # keys/values that we add are preserved across hooks
>>> config['mykey']
'myval'
>>> # don't forget to save at the end of hook!
>>> config.save()
"""
CONFIG_FILE_NAME = '.juju-persistent-config'
def __init__(self, *args, **kw):
super(Config, self).__init__(*args, **kw)
self._prev_dict = None
self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
if os.path.exists(self.path):
self.load_previous()
def load_previous(self, path=None):
"""Load previous copy of config from disk so that current values
can be compared to previous values.
:param path:
File path from which to load the previous config. If `None`,
config is loaded from the default location. If `path` is
specified, subsequent `save()` calls will write to the same
path.
"""
self.path = path or self.path
with open(self.path) as f:
self._prev_dict = json.load(f)
def changed(self, key):
"""Return true if the value for this key has changed since
the last save.
"""
if self._prev_dict is None:
return True
return self.previous(key) != self.get(key)
def previous(self, key):
"""Return previous value for this key, or None if there
is no "previous" value.
"""
if self._prev_dict:
return self._prev_dict.get(key)
return None
def save(self):
"""Save this config to disk.
Preserves items in _prev_dict that do not exist in self.
"""
if self._prev_dict:
for k, v in self._prev_dict.iteritems():
if k not in self:
self[k] = v
with open(self.path, 'w') as f:
json.dump(self, f)
@cached
def config(scope=None):
"""Juju charm configuration"""
config_cmd_line = ['config-get']
if scope is not None:
config_cmd_line.append(scope)
config_cmd_line.append('--format=json')
try:
config_data = json.loads(subprocess.check_output(config_cmd_line))
if scope is not None:
return config_data
return Config(config_data)
except ValueError:
return None
@cached
def relation_get(attribute=None, unit=None, rid=None):
"""Get relation information"""
_args = ['relation-get', '--format=json']
if rid:
_args.append('-r')
_args.append(rid)
_args.append(attribute or '-')
if unit:
_args.append(unit)
try:
return json.loads(subprocess.check_output(_args))
except ValueError:
return None
except CalledProcessError, e:
if e.returncode == 2:
return None
raise
def relation_set(relation_id=None, relation_settings={}, **kwargs):
"""Set relation information for the current unit"""
relation_cmd_line = ['relation-set']
if relation_id is not None:
relation_cmd_line.extend(('-r', relation_id))
for k, v in (relation_settings.items() + kwargs.items()):
if v is None:
relation_cmd_line.append('{}='.format(k))
else:
relation_cmd_line.append('{}={}'.format(k, v))
subprocess.check_call(relation_cmd_line)
# Flush cache of any relation-gets for local unit
flush(local_unit())
@cached
def relation_ids(reltype=None):
"""A list of relation_ids"""
reltype = reltype or relation_type()
relid_cmd_line = ['relation-ids', '--format=json']
if reltype is not None:
relid_cmd_line.append(reltype)
return json.loads(subprocess.check_output(relid_cmd_line)) or []
return []
@cached
def related_units(relid=None):
"""A list of related units"""
relid = relid or relation_id()
units_cmd_line = ['relation-list', '--format=json']
if relid is not None:
units_cmd_line.extend(('-r', relid))
return json.loads(subprocess.check_output(units_cmd_line)) or []
@cached
def relation_for_unit(unit=None, rid=None):
"""Get the json represenation of a unit's relation"""
unit = unit or remote_unit()
relation = relation_get(unit=unit, rid=rid)
for key in relation:
if key.endswith('-list'):
relation[key] = relation[key].split()
relation['__unit__'] = unit
return relation
@cached
def relations_for_id(relid=None):
"""Get relations of a specific relation ID"""
relation_data = []
relid = relid or relation_ids()
for unit in related_units(relid):
unit_data = relation_for_unit(unit, relid)
unit_data['__relid__'] = relid
relation_data.append(unit_data)
return relation_data
@cached
def relations_of_type(reltype=None):
"""Get relations of a specific type"""
relation_data = []
reltype = reltype or relation_type()
for relid in relation_ids(reltype):
for relation in relations_for_id(relid):
relation['__relid__'] = relid
relation_data.append(relation)
return relation_data
@cached
def relation_types():
"""Get a list of relation types supported by this charm"""
charmdir = os.environ.get('CHARM_DIR', '')
mdf = open(os.path.join(charmdir, 'metadata.yaml'))
md = yaml.safe_load(mdf)
rel_types = []
for key in ('provides', 'requires', 'peers'):
section = md.get(key)
if section:
rel_types.extend(section.keys())
mdf.close()
return rel_types
@cached
def relations():
"""Get a nested dictionary of relation data for all related units"""
rels = {}
for reltype in relation_types():
relids = {}
for relid in relation_ids(reltype):
units = {local_unit(): relation_get(unit=local_unit(), rid=relid)}
for unit in related_units(relid):
reldata = relation_get(unit=unit, rid=relid)
units[unit] = reldata
relids[relid] = units
rels[reltype] = relids
return rels
@cached
def is_relation_made(relation, keys='private-address'):
'''
Determine whether a relation is established by checking for
presence of key(s). If a list of keys is provided, they
must all be present for the relation to be identified as made
'''
if isinstance(keys, str):
keys = [keys]
for r_id in relation_ids(relation):
for unit in related_units(r_id):
context = {}
for k in keys:
context[k] = relation_get(k, rid=r_id,
unit=unit)
if None not in context.values():
return True
return False
def open_port(port, protocol="TCP"):
"""Open a service network port"""
_args = ['open-port']
_args.append('{}/{}'.format(port, protocol))
subprocess.check_call(_args)
def close_port(port, protocol="TCP"):
"""Close a service network port"""
_args = ['close-port']
_args.append('{}/{}'.format(port, protocol))
subprocess.check_call(_args)
@cached
def unit_get(attribute):
"""Get the unit ID for the remote unit"""
_args = ['unit-get', '--format=json', attribute]
try:
return json.loads(subprocess.check_output(_args))
except ValueError:
return None
def unit_private_ip():
"""Get this unit's private IP address"""
return unit_get('private-address')
class UnregisteredHookError(Exception):
"""Raised when an undefined hook is called"""
pass
class Hooks(object):
"""A convenient handler for hook functions.
Example::
hooks = Hooks()
# register a hook, taking its name from the function name
@hooks.hook()
def install():
pass # your code here
# register a hook, providing a custom hook name
@hooks.hook("config-changed")
def config_changed():
pass # your code here
if __name__ == "__main__":
# execute a hook based on the name the program is called by
hooks.execute(sys.argv)
"""
def __init__(self):
super(Hooks, self).__init__()
self._hooks = {}
def register(self, name, function):
"""Register a hook"""
self._hooks[name] = function
def execute(self, args):
"""Execute a registered hook based on args[0]"""
hook_name = os.path.basename(args[0])
if hook_name in self._hooks:
self._hooks[hook_name]()
else:
raise UnregisteredHookError(hook_name)
def hook(self, *hook_names):
"""Decorator, registering them as hooks"""
def wrapper(decorated):
for hook_name in hook_names:
self.register(hook_name, decorated)
else:
self.register(decorated.__name__, decorated)
if '_' in decorated.__name__:
self.register(
decorated.__name__.replace('_', '-'), decorated)
return decorated
return wrapper
def charm_dir():
"""Return the root directory of the current charm"""
return os.environ.get('CHARM_DIR')
| agpl-3.0 | 376,197,498,269,458,700 | 26.679359 | 78 | 0.588257 | false |
Schevo/schevo | schevo/database2.py | 1 | 88861 | """Schevo database, format 2."""
# Copyright (c) 2001-2009 ElevenCraft Inc.
# See LICENSE for details.
import sys
from schevo.lib import optimize
import operator
import os
import random
try:
import louie
except ImportError:
# Dummy module.
class louie(object):
@staticmethod
def send(*args, **kw):
pass
from schevo import base
from schevo import change
from schevo.change import CREATE, UPDATE, DELETE
from schevo.constant import UNASSIGNED
from schevo.counter import schema_counter
from schevo import error
from schevo.entity import Entity
from schevo.expression import Expression
from schevo.extent import Extent
from schevo.field import Entity as EntityField
from schevo.field import not_fget
from schevo.lib import module
from schevo.mt.dummy import dummy_lock
from schevo.namespace import NamespaceExtension
from schevo.placeholder import Placeholder
import schevo.schema
from schevo.signal import TransactionExecuted
from schevo.trace import log
from schevo.transaction import (
CallableWrapper, Combination, Initialize, Populate, Transaction)
class Database(base.Database):
"""Schevo database, format 2.
See doc/SchevoInternalDatabaseStructures.txt for detailed information on
data structures.
"""
# By default, don't dispatch signals. Set to True to dispatch
# TransactionExecuted signals.
dispatch = False
# See dummy_lock documentation.
read_lock = dummy_lock
write_lock = dummy_lock
def __init__(self, backend):
"""Create a database.
- `backend`: The storage backend instance to use.
"""
self._sync_count = 0
self.backend = backend
# Aliases to classes in the backend.
self._BTree = backend.BTree
self._PDict = backend.PDict
self._PList = backend.PList
self._conflict_exceptions = getattr(backend, 'conflict_exceptions', ())
self._root = backend.get_root()
# Shortcuts to coarse-grained commit and rollback.
self._commit = backend.commit
self._rollback = backend.rollback
# Keep track of schema modules remembered.
self._remembered = []
# Initialization.
self._create_schevo_structures()
self._commit()
# Index to extent instances assigned by _sync.
self._extents = {}
# Index to entity classes assigned by _sync.
self._entity_classes = {}
# Vars used in transaction processing.
self._bulk_mode = False
self._executing = []
# Shortcuts.
schevo = self._root['SCHEVO']
self._extent_name_id = schevo['extent_name_id']
self._extent_maps_by_id = schevo['extents']
self._update_extent_maps_by_name()
# Plugin support.
self._plugins = []
def __repr__(self):
return '<Database %r :: V %r>' % (self.label, self.version)
@property
def _extent_id_name(self):
return dict((v, k) for k, v in self._extent_name_id.items())
def close(self):
"""Close the database."""
assert log(1, 'Stopping plugins.')
p = self._plugins
while p:
assert log(2, 'Stopping', p)
p.pop().close()
assert log(1, 'Closing storage.')
self.backend.close()
remembered = self._remembered
while remembered:
module.forget(remembered.pop())
def execute(self, *transactions, **kw):
"""Execute transaction(s)."""
if self._executing:
# Pass-through outer transactions.
return self._execute(*transactions, **kw)
else:
# Try outer transactions up to 10 times if conflicts occur.
remaining_attempts = 10
while remaining_attempts > 0:
try:
return self._execute(*transactions, **kw)
except self._conflict_exceptions:
remaining_attempts -= 1
for tx in transactions:
tx._executing = False
raise error.BackendConflictError()
def _execute(self, *transactions, **kw):
strict = kw.get('strict', True)
executing = self._executing
if len(transactions) == 0:
raise RuntimeError('Must supply at least one transaction.')
if len(transactions) > 1:
if not executing:
raise RuntimeError(
'Must supply only one top-level transaction.')
else:
# Multiple transactions are treated as a single
# transaction containing subtransactions.
tx = Combination(transactions)
else:
tx = transactions[0]
if tx._executed:
raise error.TransactionAlreadyExecuted(tx)
raise error.TransactionAlreadyExecuted('%r already executed.' % tx)
if not executing:
# Bulk mode can only be set on an outermost transaction
# and effects all inner transactions.
self._bulk_mode = kw.get('bulk_mode', False)
# Outermost transaction must be executed strict.
strict = True
# Bulk mode minimizes transaction metadata.
bulk_mode = self._bulk_mode
executing.append(tx)
assert log(1, 'Begin executing [%i]' % len(executing), tx)
try:
retval = tx._execute(self)
assert log(2, 'Result was', repr(retval))
# Enforce any indices relaxed by the transaction.
for extent_name, index_spec in frozenset(tx._relaxed):
assert log(2, 'Enforcing index', extent_name, index_spec)
self._enforce_index_field_ids(extent_name, *index_spec)
# If the transaction must be executed with strict
# validation, perform that validation now.
if strict:
c = tx._changes_requiring_validation
assert log(
2, 'Validating', len(c), 'changes requiring validation')
self._validate_changes(c)
except Exception, e:
assert log(1, e, 'was raised; undoing side-effects.')
if bulk_mode:
assert log(2, 'Bulk Mode transaction; storage rollback.')
self._rollback()
elif len(executing) == 1:
assert log(2, 'Outer transaction; storage rollback.')
self._rollback()
else:
assert log(2, 'Inner transaction; inverting.')
inversions = tx._inversions
while len(inversions):
method, args, kw = inversions.pop()
# Make sure the inverse operation doesn't append
# an inversion itself.
self._executing = None
# Perform the inversion.
method(*args, **kw)
# Restore state.
self._executing = executing
# Get rid of the current transaction on the stack since
# we're done undoing it.
executing.pop()
# Allow exception to bubble up.
raise
assert log(1, ' Done executing [%i]' % len(executing), tx)
tx._executed = True
# Post-transaction
if bulk_mode and len(executing) > 1:
assert log(2, 'Bulk Mode inner transaction.')
e2 = executing[-2]
e1 = executing[-1]
if not strict:
e2._changes_requiring_validation.extend(
e1._changes_requiring_validation)
elif bulk_mode:
assert log(2, 'Bulk Mode outer transaction; storage commit.')
# Done executing the outermost transaction. Use
# Durus-based commit.
self._commit()
elif len(executing) > 1:
assert log(2, 'Inner transaction; record inversions and changes.')
# Append the inversions from this transaction to the next
# outer transaction.
e2 = executing[-2]
e1 = executing[-1]
e2._inversions.extend(e1._inversions)
# Also append the changes made from this transaction.
e2._changes_requiring_notification.extend(
e1._changes_requiring_notification)
if not strict:
e2._changes_requiring_validation.extend(
e1._changes_requiring_validation)
else:
assert log(2, 'Outer transaction; storage commit.')
# Done executing the outermost transaction. Use
# Durus-based commit.
self._commit()
# Send a signal if told to do so.
if self.dispatch:
assert log(2, 'Dispatching TransactionExecuted signal.')
louie.send(TransactionExecuted, sender=self, transaction=tx)
executing.pop()
return retval
def extent(self, extent_name):
"""Return the named extent instance."""
return self._extents[extent_name]
def extent_names(self):
"""Return a sorted list of extent names."""
return sorted(self._extent_maps_by_name.keys())
def extents(self):
"""Return a list of extent instances sorted by name."""
extent = self.extent
return [extent(name) for name in self.extent_names()]
def pack(self):
"""Pack the database."""
if os.environ.get('SCHEVO_NOPACK', '').strip() != '1':
self.backend.pack()
def populate(self, sample_name=''):
"""Populate the database with sample data."""
tx = Populate(sample_name)
self.execute(tx)
@property
def format(self):
return self._root['SCHEVO']['format']
@property
def schema_source(self):
return self._root['SCHEVO']['schema_source']
@property
def version(self):
return self._root['SCHEVO']['version']
def _get_label(self):
SCHEVO = self._root['SCHEVO']
if 'label' not in SCHEVO:
# Older database, no label stored in it.
return u'Schevo Database'
else:
return SCHEVO['label']
def _set_label(self, new_label):
if self._executing:
raise error.DatabaseExecutingTransaction(
'Cannot change database label while executing a transaction.')
self._root['SCHEVO']['label'] = unicode(new_label)
self._commit()
label = property(_get_label, _set_label)
_label = property(_get_label, _set_label)
def _append_change(self, typ, extent_name, oid):
executing = self._executing
if executing:
info = (typ, extent_name, oid)
tx = executing[-1]
tx._changes_requiring_validation.append(info)
if not self._bulk_mode:
tx._changes_requiring_notification.append(info)
def _append_inversion(self, method, *args, **kw):
"""Append an inversion to a transaction if one is being
executed."""
if self._bulk_mode:
return
executing = self._executing
if executing:
executing[-1]._inversions.append((method, args, kw))
def _by_entity_oids(self, extent_name, *index_spec):
"""Return a list of OIDs from an extent sorted by index_spec."""
extent_map = self._extent_map(extent_name)
indices = extent_map['indices']
index_map = extent_map['index_map']
# Separate index_spec into two tuples, one containing field
# names and one containing 'ascending' bools.
field_names = []
ascending = []
for field_name in index_spec:
if field_name.startswith('-'):
field_names.append(field_name[1:])
ascending.append(False)
else:
field_names.append(field_name)
ascending.append(True)
index_spec = _field_ids(extent_map, field_names)
if index_spec not in indices:
# Specific index not found; look for an index where
# index_spec matches the beginning of that index's spec.
if index_spec not in index_map:
# None found.
raise error.IndexDoesNotExist(
extent_name,
_field_names(extent_map, index_spec),
)
# Use the first index found.
index_spec = index_map[index_spec][0]
oids = []
unique, branch = indices[index_spec]
_walk_index(branch, ascending, oids)
return oids
def _create_entity(self, extent_name, fields, related_entities,
oid=None, rev=None):
"""Create a new entity in an extent; return the oid.
- `extent_name`: Name of the extent to create a new entity in.
- `fields`: Dictionary of field_name:field_value mappings, where
each field_value is the value to be stored in the database, as
returned by a field instance's `_dump` method.
- `related_entities`: Dictionary of field_name:related_entity_set
mappings, where each related_entity_set is the set of entities
stored in the field's structure, as returned by a field
instance's `_entities_in_value` method.
- `oid`: (optional) Specific OID to create the entity as; used
for importing data, e.g. from an XML document.
- `rev`: (optional) Specific revision to create the entity as; see
`oid`.
"""
extent_map = self._extent_map(extent_name)
entities = extent_map['entities']
old_next_oid = extent_map['next_oid']
field_name_id = extent_map['field_name_id']
extent_name_id = self._extent_name_id
extent_maps_by_id = self._extent_maps_by_id
indices_added = []
ia_append = indices_added.append
links_created = []
lc_append = links_created.append
BTree = self._BTree
PDict = self._PDict
try:
if oid is None:
oid = extent_map['next_oid']
extent_map['next_oid'] += 1
if rev is None:
rev = 0
if oid in entities:
raise error.EntityExists(extent_name, oid)
# Create fields_by_id dict with field-id:field-value items.
fields_by_id = PDict()
for name, value in fields.iteritems():
field_id = field_name_id[name]
fields_by_id[field_id] = value
# Create related_entities_by_id dict with
# field-id:related-entities items.
new_links = []
nl_append = new_links.append
related_entities_by_id = PDict()
for name, related_entity_set in related_entities.iteritems():
field_id = field_name_id[name]
related_entities_by_id[field_id] = related_entity_set
for placeholder in related_entity_set:
other_extent_id = placeholder.extent_id
other_oid = placeholder.oid
nl_append((field_id, other_extent_id, other_oid))
# Make sure fields that weren't specified are set to
# UNASSIGNED.
setdefault = fields_by_id.setdefault
for field_id in field_name_id.itervalues():
setdefault(field_id, UNASSIGNED)
# Update index mappings.
indices = extent_map['indices']
for index_spec in indices.iterkeys():
field_values = tuple(fields_by_id[field_id]
for field_id in index_spec)
# Find out if the index has been relaxed.
relaxed_specs = self._relaxed[extent_name]
if index_spec in relaxed_specs:
txns, relaxed = relaxed_specs[index_spec]
else:
relaxed = None
_index_add(extent_map, index_spec, relaxed, oid, field_values,
BTree)
ia_append((extent_map, index_spec, oid, field_values))
# Update links from this entity to another entity.
referrer_extent_id = extent_name_id[extent_name]
for referrer_field_id, other_extent_id, other_oid in new_links:
other_extent_map = extent_maps_by_id[other_extent_id]
try:
other_entity_map = other_extent_map['entities'][other_oid]
except KeyError:
field_id_name = extent_map['field_id_name']
field_name = field_id_name[referrer_field_id]
other_extent_map = extent_maps_by_id[other_extent_id]
other_extent_name = other_extent_map['name']
raise error.EntityDoesNotExist(
other_extent_name, field_name=field_name)
# Add a link to the other entity.
links = other_entity_map['links']
link_key = (referrer_extent_id, referrer_field_id)
if link_key not in links: # XXX Should already be there.
links[link_key] = BTree()
links[link_key][oid] = None
other_entity_map['link_count'] += 1
lc_append((other_entity_map, links, link_key, oid))
# Create the actual entity.
entity_map = entities[oid] = PDict()
entity_map['fields'] = fields_by_id
# XXX flesh out links based on who is capable of linking
# to this one.
entity_map['link_count'] = 0
entity_map['links'] = PDict()
entity_map['related_entities'] = related_entities_by_id
entity_map['rev'] = rev
# Update the extent.
extent_map['len'] += 1
# Allow inversion of this operation.
self._append_inversion(self._delete_entity, extent_name, oid)
# Keep track of changes.
append_change = self._append_change
append_change(CREATE, extent_name, oid)
return oid
except:
# Revert changes made during create attempt.
for _e, _i, _o, _f in indices_added:
_index_remove(_e, _i, _o, _f)
for other_entity_map, links, link_key, oid in links_created:
del links[link_key][oid]
other_entity_map['link_count'] -= 1
extent_map['next_oid'] = old_next_oid
raise
def _delete_entity(self, extent_name, oid):
entity_map, extent_map = self._entity_extent_map(extent_name, oid)
all_field_ids = set(extent_map['field_id_name'].iterkeys())
extent_id = extent_map['id']
extent_name_id = self._extent_name_id
extent_maps_by_id = self._extent_maps_by_id
field_name_id = extent_map['field_name_id']
link_count = entity_map['link_count']
links = entity_map['links']
# Disallow deletion if other entities refer to this one,
# unless all references are merely from ourself or an entity
# that will be deleted.
deletes = set()
executing = self._executing
if executing:
tx = executing[-1]
deletes.update([(extent_name_id[del_entity_cls.__name__], del_oid)
for del_entity_cls, del_oid in tx._deletes])
deletes.update([(extent_name_id[del_entity_cls.__name__], del_oid)
for del_entity_cls, del_oid in tx._known_deletes])
for (other_extent_id, other_field_id), others in links.iteritems():
for other_oid in others:
if (other_extent_id, other_oid) in deletes:
continue
# Give up as soon as we find one outside reference.
if (other_extent_id, other_oid) != (extent_id, oid):
entity = self._entity(extent_name, oid)
referring_entity = self._entity(other_extent_id, other_oid)
other_field_name = extent_maps_by_id[other_extent_id][
'field_id_name'][other_field_id]
raise error.DeleteRestricted(
entity=entity,
referring_entity=referring_entity,
referring_field_name=other_field_name
)
# Get old values for use in a potential inversion.
old_fields = self._entity_fields(extent_name, oid)
old_related_entities = self._entity_related_entities(extent_name, oid)
old_rev = entity_map['rev']
# Remove index mappings.
indices = extent_map['indices']
fields_by_id = entity_map['fields']
for index_spec in indices.iterkeys():
field_values = tuple(fields_by_id.get(f_id, UNASSIGNED)
for f_id in index_spec)
_index_remove(extent_map, index_spec, oid, field_values)
# Delete links from this entity to other entities.
related_entities = entity_map['related_entities']
referrer_extent_id = extent_name_id[extent_name]
for referrer_field_id, related_set in related_entities.iteritems():
# If a field once existed, but no longer does, there will
# still be a related entity set for it in related_entities.
# Only process the fields that still exist.
if referrer_field_id in all_field_ids:
for other_value in related_set:
# Remove the link to the other entity.
other_extent_id = other_value.extent_id
other_oid = other_value.oid
link_key = (referrer_extent_id, referrer_field_id)
other_extent_map = extent_maps_by_id[other_extent_id]
if other_oid in other_extent_map['entities']:
other_entity_map = other_extent_map[
'entities'][other_oid]
links = other_entity_map['links']
other_links = links[link_key]
# The following check is due to scenarios like this:
# Entity A and entity B are both being deleted in a
# cascade delete scenario. Entity B refers to entity A.
# Entity A has already been deleted. Entity B is now
# being deleted. We must now ignore any information
# about entity A that is attached to entity B.
if oid in other_links:
del other_links[oid]
other_entity_map['link_count'] -= 1
del extent_map['entities'][oid]
extent_map['len'] -= 1
# Allow inversion of this operation.
self._append_inversion(
self._create_entity, extent_name, old_fields,
old_related_entities, oid, old_rev)
# Keep track of changes.
append_change = self._append_change
append_change(DELETE, extent_name, oid)
def _enforce_index(self, extent_name, *index_spec):
"""Call _enforce_index after converting index_spec from field
names to field IDs."""
extent_map = self._extent_map(extent_name)
index_spec = _field_ids(extent_map, index_spec)
return self._enforce_index_field_ids(extent_name, *index_spec)
def _enforce_index_field_ids(self, extent_name, *index_spec):
"""Validate and begin enforcing constraints on the specified
index if it was relaxed within the currently-executing
transaction."""
executing = self._executing
if not executing:
# No-op if called outside a transaction.
return
# Find the index to re-enforce.
extent_map = self._extent_map(extent_name)
indices = extent_map['indices']
if index_spec not in indices:
raise error.IndexDoesNotExist(
extent_name,
_field_names(extent_map, index_spec),
)
# Find out if it has been relaxed.
current_txn = executing[-1]
relaxed = self._relaxed[extent_name]
txns, added = relaxed.get(index_spec, ([], []))
if not txns:
# Was never relaxed; no-op.
return
if current_txn in txns:
current_txn._relaxed.remove((extent_name, index_spec))
txns.remove(current_txn)
# If no more transactions have relaxed this index, enforce it.
if not txns:
BTree = self._BTree
for _extent_map, _index_spec, _oid, _field_values in added:
_index_validate(_extent_map, _index_spec, _oid, _field_values,
BTree)
def _entity(self, extent_name, oid):
"""Return the entity instance."""
EntityClass = self._entity_classes[extent_name]
return EntityClass(oid)
def _entity_field(self, extent_name, oid, name):
"""Return the value of a field in an entity in named extent
with given OID."""
entity_map, extent_map = self._entity_extent_map(extent_name, oid)
field_name_id = extent_map['field_name_id']
field_id = field_name_id[name]
value = entity_map['fields'][field_id]
return value
def _entity_field_rev(self, extent_name, oid, name):
"""Return a tuple of (value, rev) of a field in an entity in
named extent with given OID."""
value = self._entity_field(extent_name, oid, name)
rev = self._entity_rev(extent_name, oid)
return value, rev
def _entity_fields(self, extent_name, oid):
"""Return a dictionary of field values for an entity in
`extent` with given OID."""
entity_classes = self._entity_classes
entity_map, extent_map = self._entity_extent_map(extent_name, oid)
field_id_name = extent_map['field_id_name']
fields = {}
for field_id, value in entity_map['fields'].iteritems():
# During database evolution, it may turn out that fields
# get removed. For time efficiency reasons, Schevo does
# not iterate through all entities to remove existing
# data. Therefore, when getting entity fields from the
# database here, ignore fields that exist in the entity
# but no longer exist in the extent.
field_name = field_id_name.get(field_id, None)
if field_name:
fields[field_name] = value
return fields
def _entity_links(self, extent_name, oid, other_extent_name=None,
other_field_name=None, return_count=False):
"""Return dictionary of (extent_name, field_name): entity_list
pairs, or list of linking entities if `other_extent_name` and
`other_field_name` are supplied; return link count instead if
`return_count` is True."""
assert log(1, '_entity_links', extent_name, oid, other_extent_name,
other_field_name, return_count)
entity_classes = self._entity_classes
entity_map = self._entity_map(extent_name, oid)
entity_links = entity_map['links']
extent_maps_by_id = self._extent_maps_by_id
if other_extent_name is not None and other_field_name is not None:
# Both extent name and field name were provided.
other_extent_map = self._extent_map(other_extent_name)
other_extent_id = other_extent_map['id']
try:
other_field_id = other_extent_map['field_name_id'][
other_field_name]
except KeyError:
raise error.FieldDoesNotExist(
other_extent_name, other_field_name)
key = (other_extent_id, other_field_id)
# Default to a dict since it has the same API as a BTree
# for our use but is faster and will stay empty anyway.
btree = entity_links.get(key, {})
if return_count:
count = len(btree)
assert log(2, 'returning len(btree)', count)
return count
else:
EntityClass = entity_classes[other_extent_name]
others = [EntityClass(oid) for oid in btree]
return others
# Shortcut if we only care about the count, with no specificity.
link_count = entity_map['link_count']
if return_count and other_extent_name is None:
assert log(2, 'returning link_count', link_count)
return link_count
# Build links tree.
specific_extent_name = other_extent_name
if return_count:
links = 0
else:
links = {}
if link_count == 0:
# No links; no need to traverse.
assert log(2, 'no links - returning', links)
return links
for key, btree in entity_links.iteritems():
other_extent_id, other_field_id = key
other_extent_map = extent_maps_by_id[other_extent_id]
other_extent_name = other_extent_map['name']
if (specific_extent_name
and specific_extent_name != other_extent_name
):
assert log(2, 'Skipping', other_extent_name)
continue
if return_count:
links += len(btree)
else:
other_field_name = other_extent_map['field_id_name'][
other_field_id]
if specific_extent_name:
link_key = other_field_name
else:
link_key = (other_extent_name, other_field_name)
EntityClass = entity_classes[other_extent_name]
others = [EntityClass(oid) for oid in btree]
if others:
links[link_key] = others
if return_count:
assert log(2, 'returning links', links)
return links
def _entity_related_entities(self, extent_name, oid):
"""Return a dictionary of related entity sets for an entity in
`extent` with given OID."""
entity_classes = self._entity_classes
entity_map, extent_map = self._entity_extent_map(extent_name, oid)
field_id_name = extent_map['field_id_name']
related_entities = {}
for field_id, related in entity_map['related_entities'].iteritems():
# During database evolution, it may turn out that fields
# get removed. For time efficiency reasons, Schevo does
# not iterate through all entities to remove existing
# data. Therefore, when getting entity fields from the
# database here, ignore fields that exist in the entity
# but no longer exist in the extent.
field_name = field_id_name.get(field_id, None)
if field_name:
related_entities[field_name] = related
return related_entities
def _entity_rev(self, extent_name, oid):
"""Return the revision of an entity in `extent` with given
OID."""
entity_map = self._entity_map(extent_name, oid)
return entity_map['rev']
def _extent_contains_oid(self, extent_name, oid):
extent_map = self._extent_map(extent_name)
return oid in extent_map['entities']
def _extent_len(self, extent_name):
"""Return the number of entities in the named extent."""
extent_map = self._extent_map(extent_name)
return extent_map['len']
def _extent_next_oid(self, extent_name):
"""Return the next OID to be assigned in the named extent."""
extent_map = self._extent_map(extent_name)
return extent_map['next_oid']
def _find_entity_oids(self, extent_name, criterion):
"""Return sequence of entity OIDs matching given field value(s)."""
assert log(1, extent_name, criterion)
extent_map = self._extent_map(extent_name)
entity_maps = extent_map['entities']
# No criterion: return all entities.
if criterion is None:
assert log(2, 'Return all oids.')
return list(entity_maps.keys())
# Equality intersection: use optimized lookup.
try:
criteria = criterion.single_extent_field_equality_criteria()
except ValueError:
pass
else:
extent_names = frozenset(key._extent for key in criteria)
if len(extent_names) > 1:
raise ValueError('Must use fields from same extent.')
return self._find_entity_oids_field_equality(
extent_name, criteria)
# More complex lookup.
return self._find_entity_oids_general_criterion(extent_name, criterion)
def _find_entity_oids_general_criterion(self, extent_name, criterion):
if (isinstance(criterion.left, Expression)
and isinstance(criterion.right, Expression)
):
left_oids = self._find_entity_oids_general_criterion(
extent_name, criterion.left)
right_oids = self._find_entity_oids_general_criterion(
extent_name, criterion.right)
return criterion.op(left_oids, right_oids)
elif (isinstance(criterion.left, type)
and issubclass(criterion.left, base.Field)
):
return self._find_entity_oids_field_criterion(
extent_name, criterion)
else:
raise ValueError('Cannot evaluate criterion', criterion)
def _find_entity_oids_field_criterion(self, extent_name, criterion):
extent_map = self._extent_map(extent_name)
entity_maps = extent_map['entities']
FieldClass, value, op = criterion.left, criterion.right, criterion.op
# Make sure extent name matches.
if FieldClass._extent.name != extent_name:
raise ValueError(
'Criterion extent does not match query extent.', criterion)
# Optimize for equality and inequality.
if op == operator.eq:
return set(self._find_entity_oids_field_equality(
extent_name, {FieldClass: value}))
if op == operator.ne:
all = entity_maps.keys()
matching = self._find_entity_oids_field_equality(
extent_name, {FieldClass: value})
return set(all) - set(matching)
# Create a writable field to convert the value and get its
# _dump'd representation.
field_id = extent_map['field_name_id'][FieldClass.name]
EntityClass = self._entity_classes[extent_name]
FieldClass = EntityClass._field_spec[FieldClass.name]
class TemporaryField(FieldClass):
readonly = False
field = TemporaryField(None)
field.set(value)
value = field._dump()
# Additional operators.
# XXX: Brute force for now.
if op in (operator.lt, operator.le, operator.gt, operator.ge):
results = []
append = results.append
for oid, entity_map in entity_maps.iteritems():
if op(entity_map['fields'].get(field_id, UNASSIGNED), value):
append(oid)
return set(results)
def _find_entity_oids_field_equality(self, extent_name, criteria):
extent_map = self._extent_map(extent_name)
entity_maps = extent_map['entities']
EntityClass = self._entity_classes[extent_name]
extent_name_id = self._extent_name_id
indices = extent_map['indices']
normalized_index_map = extent_map['normalized_index_map']
field_id_name = extent_map['field_id_name']
field_name_id = extent_map['field_name_id']
# Convert from field_name:value to field_id:value.
field_id_value = {}
field_name_value = {}
for field_class, value in criteria.iteritems():
field_name = field_class.name
try:
field_id = field_name_id[field_name]
except KeyError:
raise error.FieldDoesNotExist(extent_name, field_name)
# Create a writable field to convert the value and get its
# _dump'd representation.
class TemporaryField(field_class):
readonly = False
field = TemporaryField(None)
field.set(value)
value = field._dump()
field_id_value[field_id] = value
field_name_value[field_name] = value
# Get results, using indexes and shortcuts where possible.
results = []
field_ids = tuple(sorted(field_id_value))
assert log(3, 'field_ids', field_ids)
len_field_ids = len(field_ids)
# First, see if we can take advantage of entity links.
if len_field_ids == 1:
field_id = field_ids[0]
field_name = field_id_name[field_id]
value = field_name_value[field_name]
if isinstance(value, Entity):
# We can take advantage of entity links.
entity_map = self._entity_map(value._extent.name, value._oid)
entity_links = entity_map['links']
extent_id = extent_map['id']
key = (extent_id, field_id)
linkmap = entity_links.get(key, {})
results = linkmap.keys()
return results
# Next, see if the fields given can be found in an index. If
# so, use the index to return matches.
index_spec = None
if field_ids in normalized_index_map:
for spec in normalized_index_map[field_ids]:
if len(spec) == len_field_ids:
index_spec = spec
break
if index_spec is not None:
# We found an index to use.
assert log(2, 'Use index spec:', index_spec)
unique, branch = indices[index_spec]
match = True
for field_id in index_spec:
field_value = field_id_value[field_id]
if field_value not in branch:
# No matches found.
match = False
break
branch = branch[field_value]
if match:
# Now we're at a leaf that matches all of the
# criteria, so return the OIDs in that leaf.
results = list(branch.keys())
else:
# Fields aren't indexed, so use brute force.
assert log(2, 'Use brute force.')
append = results.append
for oid, entity_map in entity_maps.iteritems():
fields = entity_map['fields']
match = True
for field_id, value in field_id_value.iteritems():
if fields.get(field_id, UNASSIGNED) != value:
match = False
break
if match:
append(oid)
assert log(2, 'Result count', len(results))
return results
def _relax_index(self, extent_name, *index_spec):
"""Relax constraints on the specified index until a matching
enforce_index is called, or the currently-executing
transaction finishes, whichever occurs first."""
executing = self._executing
if not executing:
raise RuntimeError('Indexes can only be relaxed inside '
'transaction execution.')
# ID-ify the index_spec.
extent_map = self._extent_map(extent_name)
index_spec = _field_ids(extent_map, index_spec)
# Find the index to relax.
indices = extent_map['indices']
if index_spec not in indices:
raise error.IndexDoesNotExist(
extent_name,
_field_names(extent_map, index_spec),
)
# Keep track of the relaxation.
current_txn = executing[-1]
relaxed = self._relaxed[extent_name]
txns, added = relaxed.setdefault(index_spec, ([], []))
txns.append(current_txn)
current_txn._relaxed.add((extent_name, index_spec))
def _set_extent_next_oid(self, extent_name, next_oid):
extent_map = self._extent_map(extent_name)
extent_map['next_oid'] = next_oid
def _update_entity(self, extent_name, oid, fields, related_entities,
rev=None):
"""Update an existing entity in an extent.
- `extent_name`: Name of the extent to create a new entity in.
- `oid`: OID of the entity to update.
- `fields`: Dictionary of field_name:field_value mappings to change,
where each field_value is the value to be stored in the database, as
returned by a field instance's `_dump` method.
- `related_entities`: Dictionary of field_name:related_entity_set
mappings, where each related_entity_set is the set of entities
stored in the field's structure, as returned by a field instance's
`_entities_in_value` method.
- `rev`: (optional) Specific revision to update the entity to.
"""
# XXX: Could be optimized to update mappings only when
# necessary.
entity_classes = self._entity_classes
entity_map, extent_map = self._entity_extent_map(extent_name, oid)
field_name_id = extent_map['field_name_id']
extent_name_id = self._extent_name_id
extent_maps_by_id = self._extent_maps_by_id
indices_added = []
indices_removed = []
new_links = []
links_created = []
links_deleted = []
ia_append = indices_added.append
ir_append = indices_removed.append
nl_append = new_links.append
lc_append = links_created.append
ld_append = links_deleted.append
BTree = self._BTree
try:
# Get old values for use in a potential inversion.
old_fields = self._entity_fields(extent_name, oid)
updating_related = len(related_entities) > 0
if updating_related:
old_related_entities = self._entity_related_entities(
extent_name, oid)
else:
old_related_entities = {}
old_rev = entity_map['rev']
# Manage entity references.
if updating_related:
for name, related_entity_set in related_entities.iteritems():
field_id = field_name_id[name]
for placeholder in related_entity_set:
other_extent_id = placeholder.extent_id
other_oid = placeholder.oid
nl_append((field_id, other_extent_id, other_oid))
# Get fields, and set UNASSIGNED for any fields that are
# new since the last time the entity was stored.
fields_by_id = entity_map['fields']
all_field_ids = set(extent_map['field_id_name'])
new_field_ids = all_field_ids - set(fields_by_id)
fields_by_id.update(dict(
(field_id, UNASSIGNED) for field_id in new_field_ids))
# Create ephemeral fields for creating new mappings.
new_fields_by_id = dict(fields_by_id)
for name, value in fields.iteritems():
new_fields_by_id[field_name_id[name]] = value
if updating_related:
new_related_entities_by_id = dict(
(field_name_id[name], related_entities[name])
for name in related_entities
)
# Remove existing index mappings.
indices = extent_map['indices']
for index_spec in indices.iterkeys():
field_values = tuple(fields_by_id[field_id]
for field_id in index_spec)
# Find out if the index has been relaxed.
relaxed_specs = self._relaxed[extent_name]
if index_spec in relaxed_specs:
txns, relaxed = relaxed_specs[index_spec]
else:
relaxed = None
_index_remove(extent_map, index_spec, oid, field_values)
ir_append((extent_map, index_spec, relaxed, oid, field_values))
if updating_related:
# Delete links from this entity to other entities.
related_entities_by_id = entity_map['related_entities']
referrer_extent_id = extent_name_id[extent_name]
new_field_ids = frozenset(new_fields_by_id)
for (referrer_field_id,
related_set) in related_entities_by_id.iteritems():
# If a field once existed, but no longer does, there will
# still be a related entity set for it in related_entities.
# Only process the fields that still exist.
if referrer_field_id in all_field_ids:
# Remove only the links that no longer exist.
new_related_entities = new_related_entities_by_id.get(
referrer_field_id, set())
for other_value in related_set - new_related_entities:
# Remove the link to the other entity.
other_extent_id = other_value.extent_id
other_oid = other_value.oid
link_key = (referrer_extent_id, referrer_field_id)
other_extent_map = extent_maps_by_id[
other_extent_id]
other_entity_map = other_extent_map['entities'][
other_oid]
links = other_entity_map['links']
other_links = links[link_key]
del other_links[oid]
other_entity_map['link_count'] -= 1
ld_append((other_entity_map, links, link_key, oid))
# Create new index mappings.
for index_spec in indices.iterkeys():
field_values = tuple(new_fields_by_id[field_id]
for field_id in index_spec)
# Find out if the index has been relaxed.
relaxed_specs = self._relaxed[extent_name]
if index_spec in relaxed_specs:
txns, relaxed = relaxed_specs[index_spec]
else:
relaxed = None
_index_add(extent_map, index_spec, relaxed, oid, field_values,
BTree)
ia_append((extent_map, index_spec, oid, field_values))
if updating_related:
# Update links from this entity to another entity.
referrer_extent_id = extent_name_id[extent_name]
for referrer_field_id, other_extent_id, other_oid in new_links:
other_extent_map = extent_maps_by_id[other_extent_id]
try:
other_entity_map = other_extent_map['entities'][
other_oid]
except KeyError:
field_id_name = extent_map['field_id_name']
field_name = field_id_name[referrer_field_id]
other_extent_map = extent_maps_by_id[other_extent_id]
other_extent_name = other_extent_map['name']
raise error.EntityDoesNotExist(
other_extent_name, field_name=field_name)
# Add a link to the other entity.
links = other_entity_map['links']
link_key = (referrer_extent_id, referrer_field_id)
if link_key not in links: # XXX Should already be there.
mapping = links[link_key] = BTree()
else:
mapping = links[link_key]
if oid not in mapping:
# Only add the link if it's not already there.
links[link_key][oid] = None
other_entity_map['link_count'] += 1
lc_append((other_entity_map, links, link_key, oid))
# Update actual fields and related entities.
for name, value in fields.iteritems():
fields_by_id[field_name_id[name]] = value
if updating_related:
for name, value in related_entities.iteritems():
related_entities_by_id[field_name_id[name]] = value
# Update revision.
if rev is None:
entity_map['rev'] += 1
else:
entity_map['rev'] = rev
# Allow inversion of this operation.
self._append_inversion(
self._update_entity, extent_name, oid, old_fields,
old_related_entities, old_rev)
# Keep track of changes.
append_change = self._append_change
append_change(UPDATE, extent_name, oid)
except:
# Revert changes made during update attempt.
for _e, _i, _o, _f in indices_added:
_index_remove(_e, _i, _o, _f)
for _e, _i, _r, _o, _f in indices_removed:
_index_add(_e, _i, _r, _o, _f, BTree)
for other_entity_map, links, link_key, oid in links_created:
del links[link_key][oid]
other_entity_map['link_count'] -= 1
for other_entity_map, links, link_key, oid in links_deleted:
links[link_key][oid] = None
other_entity_map['link_count'] += 1
raise
def _create_extent(self, extent_name, field_names, entity_field_names,
key_spec=None, index_spec=None):
"""Create a new extent with a given name."""
BTree = self._BTree
PList = self._PList
PDict = self._PDict
if extent_name in self._extent_maps_by_name:
raise error.ExtentExists(extent_name)
if key_spec is None:
key_spec = []
if index_spec is None:
index_spec = []
extent_map = PDict()
extent_id = self._unique_extent_id()
indices = extent_map['indices'] = PDict()
extent_map['index_map'] = PDict()
normalized_index_map = extent_map[
'normalized_index_map'] = PDict()
extent_map['entities'] = BTree()
field_id_name = extent_map['field_id_name'] = PDict()
field_name_id = extent_map['field_name_id'] = PDict()
extent_map['id'] = extent_id
extent_map['len'] = 0
extent_map['name'] = extent_name
extent_map['next_oid'] = 1
self._extent_name_id[extent_name] = extent_id
self._extent_maps_by_id[extent_id] = extent_map
self._extent_maps_by_name[extent_name] = extent_map
# Give each field name a unique ID.
for name in field_names:
field_id = self._unique_field_id(extent_name)
field_id_name[field_id] = name
field_name_id[name] = field_id
# Convert field names to field IDs in key spec and create
# index structures.
for field_names in key_spec:
i_spec = _field_ids(extent_map, field_names)
_create_index(extent_map, i_spec, True, BTree, PList)
# Convert field names to field IDs in index spec and create
# index structures.
for field_names in index_spec:
i_spec = _field_ids(extent_map, field_names)
# Although we tell it unique=False, it may find a subset
# key, which will cause this superset to be unique=True.
_create_index(extent_map, i_spec, False, BTree, PList)
# Convert field names to field IDs for entity field names.
extent_map['entity_field_ids'] = _field_ids(
extent_map, entity_field_names)
def _delete_extent(self, extent_name):
"""Remove a named extent."""
# XXX: Need to check for links to any entity in this extent,
# and fail to remove it if so.
#
# Iterate through all entities in the extent to delete, and
# remove bidirectional link information from any entities they
# point to.
extent_map = self._extent_map(extent_name)
extent_id = extent_map['id']
for oid, entity_map in extent_map['entities'].iteritems():
related_entities = entity_map['related_entities'].iteritems()
for field_id, related_entity_set in related_entities:
for related_entity in related_entity_set:
rel_extent_id = related_entity.extent_id
rel_oid = related_entity.oid
rel_extent_map = self._extent_maps_by_id.get(
rel_extent_id, None)
if rel_extent_map is not None:
rel_entity_map = rel_extent_map['entities'][rel_oid]
rel_links = rel_entity_map['links']
key = (extent_id, field_id)
if key in rel_links:
link_count = len(rel_links[key])
del rel_links[key]
rel_entity_map['link_count'] -= link_count
# Delete the extent.
del self._extent_name_id[extent_name]
del self._extent_maps_by_id[extent_id]
del self._extent_maps_by_name[extent_name]
def _create_schevo_structures(self):
"""Create or update Schevo structures in the database."""
root = self._root
PDict = self._PDict
if 'SCHEVO' not in root:
schevo = root['SCHEVO'] = PDict()
schevo['format'] = 2
schevo['version'] = 0
schevo['extent_name_id'] = PDict()
schevo['extents'] = PDict()
schevo['schema_source'] = None
def _entity_map(self, extent_name, oid):
"""Return an entity PDict corresponding to named
`extent` and OID."""
extent_map = self._extent_map(extent_name)
try:
entity_map = extent_map['entities'][oid]
except KeyError:
raise error.EntityDoesNotExist(extent_name, oid=oid)
return entity_map
def _entity_extent_map(self, extent_name, oid):
"""Return an (entity PDict, extent PDict)
tuple corresponding to named `extent` and OID."""
extent_map = self._extent_map(extent_name)
try:
entity_map = extent_map['entities'][oid]
except KeyError:
raise error.EntityDoesNotExist(extent_name, oid=oid)
return entity_map, extent_map
def _evolve(self, schema_source, version):
"""Evolve the database to a new schema definition.
- `schema_source`: String containing the source code for the
schema to be evolved to.
- `version`: Integer with the version number of the new schema
source. Must be the current database version, plus 1.
"""
current_version = self.version
expected_version = current_version + 1
if version != self.version + 1:
raise error.DatabaseVersionMismatch(
current_version, expected_version, version)
def call(module, name):
fn = getattr(module, name, None)
if callable(fn):
tx = CallableWrapper(fn)
# Trick the database into not performing a
# storage-level commit.
self._executing = [Transaction()]
try:
self.execute(tx)
finally:
self._executing = []
# Load the new schema.
schema_name = schema_counter.next_schema_name()
schema_module = self._import_from_source(schema_source, schema_name)
try:
# Execute `before_evolve` function if defined.
call(schema_module, 'before_evolve')
# Perform first pass of evolution.
self._sync(schema_source, initialize=False, commit=False,
evolving=True)
# Execute `during_evolve` function if defined.
call(self._schema_module, 'during_evolve')
# Perform standard schema synchronization.
self._sync(schema_source, initialize=False, commit=False,
evolving=False)
# Execute `after_evolve` function if defined.
call(self._schema_module, 'after_evolve')
except:
self._rollback()
# Re-raise exception.
raise
else:
self._root['SCHEVO']['version'] = version
self._commit()
def _extent_map(self, extent_name):
"""Return an extent PDict corresponding to `extent_name`."""
try:
return self._extent_maps_by_name[extent_name]
except KeyError:
raise error.ExtentDoesNotExist(extent_name)
def _import_from_source(self, source, module_name):
"""Import a schema module from a string containing source code."""
# Now that prerequisites are loaded, load this schema.
schema_module = module.from_string(source, module_name)
# Remember the schema module.
module.remember(schema_module)
self._remembered.append(schema_module)
# Expose this database to the schema module.
schema_module.db = self
# Return the schema module.
return schema_module
def _initialize(self):
"""Populate the database with initial data."""
tx = Initialize()
self.execute(tx)
def _on_open(self):
"""Allow schema to run code after the database is opened."""
if hasattr(self, '_schema_module'):
# An empty database created without a schema source will
# not have a schema module.
fn = getattr(self._schema_module, 'on_open', None)
if callable(fn):
fn(self)
def _remove_stale_links(self, extent_id, field_id, FieldClass):
# Remove links from this field to other entities that are held
# in the structures for those other entities.
allow = FieldClass.allow
for other_name in allow:
other_extent_map = self._extent_map(other_name)
other_entities = other_extent_map['entities']
for other_entity in other_entities.itervalues():
other_link_count = other_entity['link_count']
other_links = other_entity['links']
referrer_key = (extent_id, field_id)
if referrer_key in other_links:
referrers = other_links[referrer_key]
other_link_count -= len(referrers)
del other_links[referrer_key]
other_entity['link_count'] = other_link_count
def _schema_format_compatibility_check(self, schema):
"""Return None if the given schema is compatible with this
database engine's format, or raise an error when the first
incompatibility is found.
- `schema`: The schema to check.
"""
pass
def _sync(self, schema_source=None, schema_version=None,
initialize=True, commit=True, evolving=False):
"""Synchronize the database with a schema definition.
- `schema_source`: String containing the source code for a
schema. If `None`, the schema source contained in the
database itself will be used.
- `schema_version`: If set, the schema version to use for a
newly-created database. If set to something other than None
for an existing database, raises a ValueError.
- `initialize`: True if a new database should be populated
with initial values defined in the schema.
- `commit`: True if a successful synchronization should commit
to the storage backend. False if the caller of `_sync` will
handle this task.
- `evolving`: True if the synchronization is occuring during a
database evolution.
"""
self._sync_count += 1
sync_schema_changes = True
locked = False
try:
SCHEVO = self._root['SCHEVO']
# Import old schema.
old_schema_source = SCHEVO['schema_source']
if old_schema_source is not None:
old_schema_module = None
schevo.schema.start(self, evolving)
locked = True
schema_name = schema_counter.next_schema_name()
try:
old_schema_module = self._import_from_source(
old_schema_source, schema_name)
finally:
old_schema = schevo.schema.finish(self, old_schema_module)
locked = False
self._old_schema = old_schema
self._old_schema_module = old_schema_module
else:
old_schema = self._old_schema = None
old_schema_module = self._old_schema_module = None
# Import current schema.
if schema_source is None:
schema_source = old_schema_source
if schema_source is None:
# No schema source was specified and this is a new
# database, so _sync becomes a no-op.
return
else:
# No schema source was specified and this is an
# existing database with a defined schema.
sync_schema_changes = False
if schema_source == old_schema_source:
# If the same source, it'll be the same schema.
schema = old_schema
schema_module = old_schema_module
else:
schema_module = None
schevo.schema.start(self, evolving)
locked = True
schema_name = schema_counter.next_schema_name()
try:
schema_module = self._import_from_source(
schema_source, schema_name)
finally:
schema = schevo.schema.finish(self, schema_module)
locked = False
self._schema_format_compatibility_check(schema)
self.schema = schema
self._schema_module = schema_module
# Expose database-level namespaces and make the database
# the object that the namespace is associated with, for
# more effective use with repr().
self.q = schema.q
self.q._i = self
self.t = schema.t
self.t._i = self
self.Q = schema.Q
self.Q._i = self
# Create an extenders namespace.
self.x = DatabaseExtenders('x', self, self._schema_module)
# If the schema has changed then sync with it.
if sync_schema_changes:
# Update schema source stored in database.
SCHEVO['schema_source'] = schema_source
self._sync_extents(schema, evolving)
# Create extent instances.
E = schema.E
extents = self._extents = {}
relaxed = self._relaxed = {}
entity_classes = self._entity_classes = {}
extent_name_id = self._extent_name_id
for e_name in self.extent_names():
e_id = extent_name_id[e_name]
EntityClass = E[e_name]
extent = Extent(self, e_name, e_id, EntityClass)
extents[e_id] = extents[e_name] = extent
relaxed[e_name] = {}
entity_classes[e_id] = entity_classes[e_name] = EntityClass
# Decorate this Database instance to support the
# following syntax within schema code, for example:
# tx = db.Foo.t.create()
setattr(self, e_name, extent)
# Initialize a new database.
if SCHEVO['version'] == 0:
if schema_version is None:
schema_version = 1
SCHEVO['version'] = schema_version
# Populate with initial data, unless overridden such as
# when importing from an XML file.
if initialize:
self._initialize()
elif schema_version is not None:
# Do not allow schema_version to differ from existing
# version if opening an existing database.
if SCHEVO['version'] != schema_version:
raise ValueError(
'Existing database; schema_version must be set to '
'None or to the current version of the database.')
except:
if locked:
schevo.schema.import_lock.release()
if commit:
self._rollback()
raise
else:
if commit:
self._commit()
self._on_open()
def _sync_extents(self, schema, evolving):
"""Synchronize the extents based on the schema."""
E = schema.E
old_schema = self._old_schema
# Rename extents in the database whose entity class definition
# has a `_was` attribute.
in_schema = set(iter(E))
if evolving:
for extent_name in in_schema:
EntityClass = E[extent_name]
was_named = EntityClass._was
if was_named is not None:
# Change the name of the existing extent in the
# database.
extent_name_id = self._extent_name_id
extent_map = self._extent_map(was_named)
extent_id = extent_map['id']
extent_map['name'] = extent_name
del extent_name_id[was_named]
extent_name_id[extent_name] = extent_id
self._update_extent_maps_by_name()
# Create extents that are in schema but not in db.
in_db = set(self.extent_names())
to_create = in_schema - in_db
for extent_name in to_create:
if extent_name.startswith('_'):
# Do not bother with hidden classes.
continue
EntityClass = E[extent_name]
field_spec = EntityClass._field_spec
field_names = field_spec.keys()
entity_field_names = []
for name in field_names:
FieldClass = field_spec[name]
if FieldClass.may_store_entities and not FieldClass.fget:
entity_field_names.append(name)
key_spec = EntityClass._key_spec
index_spec = EntityClass._index_spec
self._create_extent(
extent_name, field_names, entity_field_names,
key_spec, index_spec)
# Remove extents that are in the db but not in the schema.
in_db = set(self.extent_names())
to_remove = in_db - in_schema
for extent_name in to_remove:
if extent_name.startswith('_'):
# Do not bother with hidden classes.
continue
# Check for links made from entities in this extent to
# other entities, where the other entities maintain those
# link structures.
if old_schema:
extent_map = self._extent_map(extent_name)
field_name_id = extent_map['field_name_id']
extent_id = extent_map['id']
# The old extent name will not exist in the old schema
# if it was an evolve_only class definition, and we
# are not in the process of evolving.
if extent_name in old_schema.E:
for old_field_name, FieldClass in (
old_schema.E[extent_name]._field_spec.iteritems()
):
old_field_id = field_name_id[old_field_name]
if issubclass(FieldClass, EntityField):
self._remove_stale_links(
extent_id, old_field_id, FieldClass)
# Delete the extent. XXX: Need to skip system extents?
self._delete_extent(extent_name)
# Update entity_field_ids, field_id_name, and field_name_id
# for all extents.
for extent_name in self.extent_names():
EntityClass = E[extent_name]
field_spec = EntityClass._field_spec
extent_map = self._extent_map(extent_name)
extent_id = extent_map['id']
entity_field_ids = set(extent_map['entity_field_ids'])
field_id_name = extent_map['field_id_name']
field_name_id = extent_map['field_name_id']
# Rename fields with 'was' attribute.
existing_field_names = set(field_name_id.keys())
new_field_names = set(field_spec.keys())
if evolving:
for field_name in new_field_names:
FieldClass = field_spec[field_name]
was_named = FieldClass.was
if was_named is not None:
if was_named not in existing_field_names:
raise error.FieldDoesNotExist(
extent_name, was_named, field_name)
# Rename the field.
field_id = field_name_id[was_named]
del field_name_id[was_named]
field_name_id[field_name] = field_id
field_id_name[field_id] = field_name
# Remove from the set of existing field names.
existing_field_names.remove(was_named)
# Remove fields that no longer exist.
old_field_names = existing_field_names - new_field_names
for old_field_name in old_field_names:
old_field_id = field_name_id[old_field_name]
if old_schema:
# Get the field spec for the field being deleted.
# It may not exist in the old schema, if it was only
# there in an _evolve_only class definition.
if extent_name in old_schema.E:
FieldClass = old_schema.E[extent_name]._field_spec.get(
old_field_name, None)
if (FieldClass is not None and
issubclass(FieldClass, EntityField)):
self._remove_stale_links(
extent_id, old_field_id, FieldClass)
if old_field_id in entity_field_ids:
entity_field_ids.remove(old_field_id)
del field_name_id[old_field_name]
del field_id_name[old_field_id]
# Create fields IDs for new fields.
existing_field_names = set(field_name_id.keys())
fields_to_create = new_field_names - existing_field_names
for field_name in fields_to_create:
field_id = self._unique_field_id(extent_name)
field_name_id[field_name] = field_id
field_id_name[field_id] = field_name
# Check for entity field.
FieldClass = field_spec[field_name]
if (FieldClass.may_store_entities and not FieldClass.fget):
entity_field_ids.add(field_id)
extent_map['entity_field_ids'] = tuple(entity_field_ids)
# Update index specs for all extents.
for extent_name in self.extent_names():
# Skip system extents.
EntityClass = E[extent_name]
key_spec = EntityClass._key_spec
index_spec = EntityClass._index_spec
self._update_extent_key_spec(extent_name, key_spec, index_spec)
def _unique_extent_id(self):
"""Return an unused random extent ID."""
extent_name_id = self._extent_name_id
while True:
extent_id = random.randint(0, 2**31)
if extent_id not in extent_name_id:
return extent_id
def _unique_field_id(self, extent_name):
"""Return an unused random field ID."""
field_id_name = self._extent_map(extent_name)['field_id_name']
while True:
field_id = random.randint(0, 2**31)
if field_id not in field_id_name:
return field_id
def _update_extent_maps_by_name(self):
extent_maps_by_name = self._extent_maps_by_name = {}
for extent in self._extent_maps_by_id.itervalues():
extent_maps_by_name[extent['name']] = extent
def _update_extent_key_spec(self, extent_name, key_spec, index_spec):
"""Update an existing extent to match given key spec."""
extent_map = self._extent_map(extent_name)
entities = extent_map['entities']
indices = extent_map['indices']
key_spec_ids = [_field_ids(extent_map, field_names)
for field_names in key_spec]
index_spec_ids = [_field_ids(extent_map, field_names)
for field_names in index_spec]
BTree = self._BTree
PList = self._PList
# Convert key indices that have been changed to non-unique
# incides.
for i_spec in index_spec_ids:
if i_spec not in key_spec and i_spec in indices:
unique, branch = indices[i_spec]
indices[i_spec] = (False, branch)
# Create new key indices for those that don't exist.
for i_spec in key_spec_ids:
if i_spec not in indices:
# Create a new unique index and populate it.
_create_index(
extent_map, i_spec, True, BTree, PList)
for oid in entities:
fields_by_id = entities[oid]['fields']
field_values = tuple(fields_by_id.get(field_id, UNASSIGNED)
for field_id in i_spec)
_index_add(extent_map, i_spec, None, oid, field_values,
BTree)
# Create new non-unique indices for those that don't exist.
for i_spec in index_spec_ids:
if i_spec not in indices:
# Create a new non-unique index and populate it.
_create_index(extent_map, i_spec, False, BTree, PList)
for oid in entities:
fields_by_id = entities[oid]['fields']
field_values = tuple(fields_by_id.get(field_id, UNASSIGNED)
for field_id in i_spec)
_index_add(extent_map, i_spec, None, oid, field_values,
BTree)
# Remove key indices that no longer exist.
to_remove = set(indices) - set(key_spec_ids + index_spec_ids)
for i_spec in to_remove:
_delete_index(extent_map, i_spec)
# Check non-unique indices to see if any are supersets of
# unique indices. If any found, change them to 'unique' and
# validate them.
#
# XXX: Needs testing.
for i_spec, (unique, branch) in list(indices.items()):
# Look for unique index supersets of this index, and make
# it unique if any exist.
if not unique:
spec_set = set(index_spec)
for i_spec in indices:
compare_set = set(i_spec)
if compare_set.issuperset(spec_set):
unique = True
break
if unique:
# Should be unique but isn't; alter and validate.
indices[i_spec] = (unique, branch)
for oid in entities:
fields_by_id = entities[oid]['fields']
field_values = tuple(fields_by_id[field_id]
for field_id in i_spec)
_index_validate(extent_map, i_spec, oid, field_values,
BTree)
def _validate_changes(self, changes):
# Here we are applying rules defined by the entity itself, not
# the transaction, since transactions may relax certain rules.
entity_classes = self._entity_classes
changes = change.normalize(changes)
for typ, extent_name, oid in changes:
if typ in (CREATE, UPDATE):
EntityClass = entity_classes[extent_name]
entity = EntityClass(oid)
field_map = entity.s.field_map(not_fget)
for field in field_map.itervalues():
field.validate(field._value)
def _reset_all(self):
"""Clear all entities, indices, etc. in the database.
FOR USE WITH SINGLE-SCHEMA UNIT TESTS.
NOT INDENDED FOR GENERAL USE.
"""
BTree = self._BTree
for extent_name in self.extent_names():
extent_map = self._extent_map(extent_name)
extent_map['entities'] = BTree()
extent_map['len'] = 0
extent_map['next_oid'] = 1
indices = extent_map['indices']
for index_spec, (unique, index_tree) in list(indices.items()):
indices[index_spec] = (unique, BTree())
self._commit()
self.dispatch = Database.dispatch
self.label = Database.label
self._initialize()
self._on_open()
def _create_index(extent_map, index_spec, unique, BTree, PList):
"""Create a new index in the extent with the given spec and
uniqueness flag."""
assert log(1, extent_map['name'])
assert log(1, 'index_spec', index_spec)
indices = extent_map['indices']
index_map = extent_map['index_map']
normalized_index_map = extent_map['normalized_index_map']
# Look for unique index subsets of this index, and make it unique
# if any exist.
if not unique:
spec_set = set(index_spec)
for i_spec in indices:
compare_set = set(i_spec)
if compare_set.issubset(spec_set):
unique = True
break
# Continue with index creation.
assert log(2, 'unique', unique)
assert log(
2, 'normalized_index_map.keys()', list(normalized_index_map.keys()))
partial_specs = _partial_index_specs(index_spec)
assert log(3, 'partial_specs', partial_specs)
normalized_specs = _normalized_index_specs(partial_specs)
assert log(3, 'normalized_specs', normalized_specs)
index_root = BTree()
indices[index_spec] = (unique, index_root)
for partial_spec in partial_specs:
L = index_map.setdefault(partial_spec, PList())
L.append(index_spec)
for normalized_spec in normalized_specs:
L = normalized_index_map.setdefault(normalized_spec, PList())
L.append(index_spec)
assert log(
2, 'normalized_index_map.keys()', list(normalized_index_map.keys()))
def _delete_index(extent_map, index_spec):
indices = extent_map['indices']
index_map = extent_map['index_map']
normalized_index_map = extent_map['normalized_index_map']
partial_specs = _partial_index_specs(index_spec)
normalized_specs = _normalized_index_specs(partial_specs)
del indices[index_spec]
for partial_spec in partial_specs:
L = index_map[partial_spec]
L.remove(index_spec)
if not L:
del index_map[partial_spec]
for normalized_spec in normalized_specs:
if normalized_spec in normalized_index_map:
L = normalized_index_map[normalized_spec]
L.remove(index_spec)
if not L:
del normalized_index_map[normalized_spec]
def _field_ids(extent_map, field_names):
"""Convert a (field-name, ...) tuple to a (field-id, ...)
tuple for the given extent map."""
field_name_id = extent_map['field_name_id']
return tuple(field_name_id[name] for name in field_names)
def _field_names(extent_map, field_ids):
"""Convert a (field-id, ...) tuple to a (field-name, ...) tuple
for the given extent map."""
field_id_name = extent_map['field_id_name']
return tuple(field_id_name[id] for id in field_ids)
def _index_add(extent_map, index_spec, relaxed, oid, field_values, BTree):
"""Add an entry to the specified index, of entity oid having the
given values in order of the index spec."""
indices = extent_map['indices']
unique, branch = indices[index_spec]
# Traverse branches to find a leaf.
for field_id, field_value in zip(index_spec, field_values):
if field_value in branch:
branch = branch[field_value]
else:
new_branch = BTree()
branch[field_value] = new_branch
branch = new_branch
# Raise error if unique index and not an empty leaf.
if unique and len(branch) and relaxed is None:
_index_clean(extent_map, index_spec, field_values)
raise error.KeyCollision(
extent_map['name'],
_field_names(extent_map, index_spec),
field_values,
)
# Inject the OID into the leaf.
branch[oid] = True
# Keep track of the addition if relaxed.
if relaxed is not None:
relaxed.append((extent_map, index_spec, oid, field_values))
def _index_clean(extent_map, index_spec, field_values):
"""Remove stale branches from the specified index."""
indices = extent_map['indices']
unique, branch = indices[index_spec]
_index_clean_branch(branch, field_values)
def _index_clean_branch(branch, field_values):
"""Recursively clean a branch of stale child branches."""
branch_value = field_values[0]
child_values = field_values[1:]
if branch_value in branch:
if child_values:
# Clean children first.
_index_clean_branch(branch[branch_value], child_values)
# Clean ourself if empty.
if not len(branch[branch_value]):
del branch[branch_value]
def _index_remove(extent_map, index_spec, oid, field_values):
"""Remove an entry from the specified index, of entity oid having
the given values in order of the index spec."""
indices = extent_map['indices']
unique, branch = indices[index_spec]
# Traverse branches to find a leaf.
for field_id, field_value in zip(index_spec, field_values):
if field_value not in branch:
# Was never indexed for some reason, so stop traversing.
break
branch = branch[field_value]
if oid in branch:
del branch[oid]
_index_clean(extent_map, index_spec, field_values)
def _index_validate(extent_map, index_spec, oid, field_values, BTree):
"""Validate the index entry for uniqueness."""
indices = extent_map['indices']
unique, branch = indices[index_spec]
# Traverse branches to find a leaf.
for field_id, field_value in zip(index_spec, field_values):
if field_value in branch:
branch = branch[field_value]
else:
new_branch = BTree()
branch[field_value] = new_branch
branch = new_branch
# Raise error if unique index and not an empty leaf.
if unique and len(branch) > 1:
_index_clean(extent_map, index_spec, field_values)
raise error.KeyCollision(
extent_map['name'],
_field_names(extent_map, index_spec),
field_values,
)
def _normalized_index_specs(index_specs):
"""Return normalized index specs based on index_specs."""
return [tuple(sorted(spec)) for spec in index_specs]
def _partial_index_specs(index_spec):
"""Return a list of partial index specs based on index_spec."""
return [tuple(index_spec[:x+1]) for x in xrange(len(index_spec))]
def _walk_index(branch, ascending_seq, result_list):
"""Recursively walk a branch of an index, appending OIDs found to
result_list.
- `branch`: The branch to start at.
- `ascending_seq`: The sequence of ascending flags corresponding
to the current branch.
- `result_list`: List to append OIDs to.
"""
if len(ascending_seq):
# We are at a branch.
ascending, inner_ascending = ascending_seq[0], ascending_seq[1:]
if ascending:
for key, inner_branch in branch.iteritems():
_walk_index(inner_branch, inner_ascending, result_list)
else:
# XXX: SchevoZodb backend requires us to use
# `reversed(branch.keys())` rather than
# `reversed(branch)`.
keys = reversed(branch.keys())
for key in keys:
inner_branch = branch[key]
_walk_index(inner_branch, inner_ascending, result_list)
else:
# We are at a leaf.
result_list.extend(branch.iterkeys())
class DatabaseExtenders(NamespaceExtension):
"""Methods that extend the functionality of a database."""
__slots__ = NamespaceExtension.__slots__
_readonly = False
def __init__(self, name, instance, schema_module):
NamespaceExtension.__init__(self, name, instance)
# Expose functions through this namespace.
for name in dir(schema_module):
# Extender functions always have x_ prefix.
if name.startswith('x_'):
function = getattr(schema_module, name)
# Drop the 'x_' prefix.
name = name[2:]
self._set(name, function)
def convert_from_format1(backend):
"""Convert a database from format 1 to format 2.
- `backend`: Open backend connection to the database to convert.
Assumes that the database has already been verified to be a format 1
database.
"""
root = backend.get_root()
schevo = root['SCHEVO']
extent_name_id = schevo['extent_name_id']
extents = schevo['extents']
# For each extent in the database...
for extent_name, extent_id in extent_name_id.iteritems():
extent = extents[extent_id]
entity_field_ids = frozenset(extent['entity_field_ids'])
# For each entity in the extent...
for entity_oid, entity in extent['entities'].iteritems():
fields = entity['fields']
related_entities = entity['related_entities'] = backend.PDict()
# For each entity field in the entity...
for field_id in entity_field_ids:
related_entity_set = set()
# If the value is an entity reference, turn it into a
# Placeholder. Store the value, and also add it to the
# set of related entities.
value = fields.get(field_id, UNASSIGNED)
if isinstance(value, tuple):
p = Placeholder.new(*value)
fields[field_id] = p
related_entity_set.add(p)
related_entities[field_id] = frozenset(related_entity_set)
# For each index...
indices = extent['indices']
for index_spec, (unique, index_tree) in indices.iteritems():
# Convert all (extent_id, oid) tuples to Placeholder instances in
# extent indices.
_convert_index_from_format1(
entity_field_ids, index_spec, index_tree)
# Bump format from 1 to 2.
schevo['format'] = 2
def _convert_index_from_format1(entity_field_ids, index_spec, index_tree):
current_field_id, next_index_spec = index_spec[0], index_spec[1:]
is_entity_field = current_field_id in entity_field_ids
for key, child_tree in index_tree.items():
if is_entity_field and isinstance(key, tuple):
# Convert entity tuple to Placeholder.
p = Placeholder.new(*key)
# Replace old key with new key.
del index_tree[key]
index_tree[p] = child_tree
# Recurse into child structures if not at a leaf.
if len(next_index_spec) > 0:
_convert_index_from_format1(
entity_field_ids, next_index_spec, child_tree)
optimize.bind_all(sys.modules[__name__]) # Last line of module.
| mit | -3,918,668,678,691,859,500 | 43.209453 | 80 | 0.558569 | false |
dalejung/nbx | nbx/nbmanager/tagged_gist/notebook_gisthub.py | 1 | 6300 | import github
import nbformat
from .gisthub import gisthub, _hashtags
import nbx.compat as compat
def parse_tags(desc):
# real tags and not system-like tags
tags = _hashtags(desc)
if '#notebook' in tags:
tags.remove('#notebook')
if '#inactive' in tags:
tags.remove('#inactive')
return tags
class NotebookGist(object):
"""
A single notebook abstraction over Gist. Normally a gist can have
mutliple files. A notebook gist pretends to be a single file.
"""
# instead of having a bunch of @property getters, define
# attrs to grab from .gist here.
_gist_attrs = ['id', 'files', 'active', 'edit', 'updated_at',
'created_at', 'public']
def __init__(self, gist, gisthub):
self.gist = gist
self.gisthub = gisthub
# unique identifier name
self.suffix = "[{0}].ipynb".format(self.id)
super(NotebookGist, self).__init__()
_name = None
@property
def name(self):
if self._name is None:
self._name = self.gist.name
return self._name
@name.setter
def name(self, value):
self._name = value
# recompute keyname
@property
def key_name(self):
return self.name + ' ' + self.suffix
_tags = None
@property
def tags(self):
if self._tags is None:
self._tags = self.gist.tags[:]
if '#notebook' in self._tags:
self._tags.remove('#notebook')
return self._tags
@tags.setter
def tags(self, tags):
self._tags = tags
def __getattr__(self, name):
if name in self._gist_attrs:
return getattr(self.gist, name)
raise AttributeError("{name} not found on .gist".format(name=name))
_notebook_content = None
@property
def notebook_content(self):
if self._notebook_content is None:
# refresh and grab file contents
file = self._get_notebook_file()
if file:
self._notebook_content = file.content
return self._notebook_content
@notebook_content.setter
def notebook_content(self, content):
if isinstance(content, compat.string_types):
self._notebook_content = content
return
try:
# maybe this is a notebook
content = nbformat.writes(content, version=nbformat.NO_CONVERT)
self._notebook_content = content
except:
raise
@property
def revisions(self):
# only return revisions for the .ipynb file
fn = self._get_notebook_file()
revisions = self.gist.revisions_for_file(fn.filename)
# convert to basic commit log. Dont' want NotebookManager
# needing to know github.GistHistoryState internals
commits = []
for state in revisions:
commit = {
'id': state.version,
'commit_date': state.committed_at
}
commits.append(commit)
return commits
def get_revision_content(self, commit_id):
fobj = self._get_notebook_file()
rev_fobj = self.gist.get_revision_file(commit_id, fobj.filename)
return rev_fobj['content']
def _refresh(self):
self.gist = self.gisthub.refresh_gist(self)
def _get_notebook_file(self):
"""
Will return the first notebook in a gist.
Iterate in sorted order so this is stable
don't know if the files order is defined per github api
"""
self._refresh()
for key in sorted(self.gist.files):
file = self.gist.files[key]
if file.filename.endswith(".ipynb"):
return file
def _edit(self, desc=None, files=None):
if desc is None:
desc = self.description
self.gist.edit(desc, files)
def _generate_payload(self):
" Gather payload to sent to Github. "
gfile = self._get_notebook_file()
file = github.InputFileContent(self.notebook_content)
files = {gfile.filename: file}
description = self._generate_description()
return {'files':files, 'description': description}
def _generate_description(self):
""" genrate the Gist description. """
name = self.name
# system type of tags
tags = ['#notebook']
if not self.active:
tags.append('#inactive')
# add the normal tags
tags += self.tags
tagstring = " ".join(tags)
description = "{name} {tags}".format(name=name, tags=tagstring)
return description
def __repr__(self):
out = "NotebookGist(name={name}, active={active}, " + \
"public={public}, tags={tags})"
return out.format(public=self.public, name=self.name,
tags=self.tags, active=self.active)
def strip_gist_id(self, key_name):
" small util to remove gist_id suffix "
# really we're assuming this will only match once, seems fine
return key_name.replace(' '+self.suffix, '')
class NotebookGistHub(object):
def __init__(self, gisthub):
self.gisthub = gisthub
def _wrap_results(self, results):
wrapped = {}
for key, gists in results.items():
# convert to NotebookGist
items = [NotebookGist(gist, self) for gist in gists]
# index by key_name
items = dict([(gist.key_name, gist) for gist in items])
wrapped[key] = items
return wrapped
def query(self, *args, **kwargs):
kwargs['filter_tag'] = '#notebook'
results = self.gisthub.query(*args, **kwargs)
return self._wrap_results(results)
def refresh_gist(self, gist):
return self.gisthub.refresh_gist(gist)
def save(self, gist):
payload = gist._generate_payload()
gist._edit(payload['description'], payload['files'])
self.gisthub.update_gist(gist.gist)
def create_gist(self, name, tags, content='', public=True):
gist = self.gisthub.create_gist(name, tags, content, public)
nb = NotebookGist(gist, self)
return nb
def notebook_gisthub(user, password):
g = gisthub(user, password)
return NotebookGistHub(g)
| mit | 5,413,795,379,214,303,000 | 30.5 | 75 | 0.584127 | false |
partofthething/home-assistant | homeassistant/components/plaato/config_flow.py | 2 | 7438 | """Config flow for Plaato."""
import logging
from pyplaato.plaato import PlaatoDeviceType
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_SCAN_INTERVAL, CONF_TOKEN, CONF_WEBHOOK_ID
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from .const import (
CONF_CLOUDHOOK,
CONF_DEVICE_NAME,
CONF_DEVICE_TYPE,
CONF_USE_WEBHOOK,
DEFAULT_SCAN_INTERVAL,
DOCS_URL,
PLACEHOLDER_DEVICE_NAME,
PLACEHOLDER_DEVICE_TYPE,
PLACEHOLDER_DOCS_URL,
PLACEHOLDER_WEBHOOK_URL,
)
from .const import DOMAIN # pylint:disable=unused-import
_LOGGER = logging.getLogger(__package__)
class PlaatoConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handles a Plaato config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
def __init__(self):
"""Initialize."""
self._init_info = {}
async def async_step_user(self, user_input=None):
"""Handle user step."""
if user_input is not None:
self._init_info[CONF_DEVICE_TYPE] = PlaatoDeviceType(
user_input[CONF_DEVICE_TYPE]
)
self._init_info[CONF_DEVICE_NAME] = user_input[CONF_DEVICE_NAME]
return await self.async_step_api_method()
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{
vol.Required(
CONF_DEVICE_NAME,
default=self._init_info.get(CONF_DEVICE_NAME, None),
): str,
vol.Required(
CONF_DEVICE_TYPE,
default=self._init_info.get(CONF_DEVICE_TYPE, None),
): vol.In(list(PlaatoDeviceType)),
}
),
)
async def async_step_api_method(self, user_input=None):
"""Handle device type step."""
device_type = self._init_info[CONF_DEVICE_TYPE]
if user_input is not None:
token = user_input.get(CONF_TOKEN, None)
use_webhook = user_input.get(CONF_USE_WEBHOOK, False)
if not token and not use_webhook:
errors = {"base": PlaatoConfigFlow._get_error(device_type)}
return await self._show_api_method_form(device_type, errors)
self._init_info[CONF_USE_WEBHOOK] = use_webhook
self._init_info[CONF_TOKEN] = token
return await self.async_step_webhook()
return await self._show_api_method_form(device_type)
async def async_step_webhook(self, user_input=None):
"""Validate config step."""
use_webhook = self._init_info[CONF_USE_WEBHOOK]
if use_webhook and user_input is None:
webhook_id, webhook_url, cloudhook = await self._get_webhook_id()
self._init_info[CONF_WEBHOOK_ID] = webhook_id
self._init_info[CONF_CLOUDHOOK] = cloudhook
return self.async_show_form(
step_id="webhook",
description_placeholders={
PLACEHOLDER_WEBHOOK_URL: webhook_url,
PLACEHOLDER_DOCS_URL: DOCS_URL,
},
)
return await self._async_create_entry()
async def _async_create_entry(self):
"""Create the entry step."""
webhook_id = self._init_info.get(CONF_WEBHOOK_ID, None)
auth_token = self._init_info[CONF_TOKEN]
device_name = self._init_info[CONF_DEVICE_NAME]
device_type = self._init_info[CONF_DEVICE_TYPE]
unique_id = auth_token if auth_token else webhook_id
await self.async_set_unique_id(unique_id)
self._abort_if_unique_id_configured()
return self.async_create_entry(
title=device_type.name,
data=self._init_info,
description_placeholders={
PLACEHOLDER_DEVICE_TYPE: device_type.name,
PLACEHOLDER_DEVICE_NAME: device_name,
},
)
async def _show_api_method_form(
self, device_type: PlaatoDeviceType, errors: dict = None
):
data_schema = vol.Schema({vol.Optional(CONF_TOKEN, default=""): str})
if device_type == PlaatoDeviceType.Airlock:
data_schema = data_schema.extend(
{vol.Optional(CONF_USE_WEBHOOK, default=False): bool}
)
return self.async_show_form(
step_id="api_method",
data_schema=data_schema,
errors=errors,
description_placeholders={PLACEHOLDER_DEVICE_TYPE: device_type.name},
)
async def _get_webhook_id(self):
"""Generate webhook ID."""
webhook_id = self.hass.components.webhook.async_generate_id()
if self.hass.components.cloud.async_active_subscription():
webhook_url = await self.hass.components.cloud.async_create_cloudhook(
webhook_id
)
cloudhook = True
else:
webhook_url = self.hass.components.webhook.async_generate_url(webhook_id)
cloudhook = False
return webhook_id, webhook_url, cloudhook
@staticmethod
def _get_error(device_type: PlaatoDeviceType):
if device_type == PlaatoDeviceType.Airlock:
return "no_api_method"
return "no_auth_token"
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return PlaatoOptionsFlowHandler(config_entry)
class PlaatoOptionsFlowHandler(config_entries.OptionsFlow):
"""Handle Plaato options."""
def __init__(self, config_entry: ConfigEntry):
"""Initialize domain options flow."""
super().__init__()
self._config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Manage the options."""
use_webhook = self._config_entry.data.get(CONF_USE_WEBHOOK, False)
if use_webhook:
return await self.async_step_webhook()
return await self.async_step_user()
async def async_step_user(self, user_input=None):
"""Manage the options."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{
vol.Optional(
CONF_SCAN_INTERVAL,
default=self._config_entry.options.get(
CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL
),
): cv.positive_int
}
),
)
async def async_step_webhook(self, user_input=None):
"""Manage the options for webhook device."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
webhook_id = self._config_entry.data.get(CONF_WEBHOOK_ID, None)
webhook_url = (
""
if webhook_id is None
else self.hass.components.webhook.async_generate_url(webhook_id)
)
return self.async_show_form(
step_id="webhook",
description_placeholders={PLACEHOLDER_WEBHOOK_URL: webhook_url},
)
| mit | 1,609,999,554,006,146,000 | 32.35426 | 85 | 0.583625 | false |
v-legoff/pa-poc1 | dc/yaml/connector.py | 1 | 8550 | # Copyright (c) 2012 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Module defining the YAMLConnector class."""
import os
driver = True
try:
import yaml
except ImportError:
driver = False
from dc.connector import DataConnector
from dc import exceptions
from model import exceptions as mod_exceptions
from model.functions import *
class YAMLConnector(DataConnector):
"""Data connector for YAML.
This data connector should read and write datas in YML format, using
the yaml library.
A very short example:
# Table: users
- id: 1
username: admin
email_address: [email protected]
"""
name = "yaml"
def __init__(self):
"""Check the driver presence.
If not found, raise a DriverNotFound exception.
"""
if not driver:
raise exceptions.DriverNotFound(
"the yaml library can not be found")
self.location = None
self.auto_increments = {}
self.to_update = set()
def setup(self, location=None):
"""Setup the data connector."""
if location is None:
raise exceptions.InsufficientConfiguration(
"the location for storing datas was not specified for " \
"the YAML data connector")
location = location.replace("\\", "/")
if location.startswith("~"):
location = os.path.expanduser("~") + location[1:]
if location.endswith("/"):
location = location[:-1]
if not os.path.exists(location):
# Try to create it
os.makedirs(location)
if not os.access(location, os.R_OK):
raise exceptions.DriverInitializationError(
"cannot read in {}".format(location))
if not os.access(location, os.W_OK):
raise exceptions.DriverInitializationError(
"cannot write in {}".format(location))
DataConnector.__init__(self)
self.location = location
self.files = {}
def close(self):
"""Close the data connector (nothing to be done)."""
pass
def destroy(self):
"""Erase EVERY stored data."""
for file in os.listdir(self.location):
os.remove(self.location + "/" + file)
self.clear_cache()
def record_model(self, model):
"""Record the given model."""
name = DataConnector.record_model(self, model)
filename = self.location + "/" + name + ".yml"
if os.path.exists(filename):
with open(filename, "r") as file:
self.read_table(name, file)
self.files[name] = filename
def read_table(self, table_name, file):
"""Read a whoe table contained in a file.
This file is supposed to be formatted as a YAML file. Furthermore,
the 'yaml.load' function should return a list of dictionaries.
The first dictionary describes some table informations, as
the status of the autoincrement fields. Each following dictionary
is a line of data which sould describe a model object.
"""
name = table_name
content = file.read()
datas = yaml.load(content)
if not isinstance(datas, list):
raise exceptions.DataFormattingError(
"the file {} must contain a YAML formatted list".format(
self.files[name]))
class_table = self.models[name]
class_datas = datas[0]
if not isinstance(class_datas, dict):
raise exceptions.DataFormattingError(
"the table informations are not stored in a YAML " \
"dictionary in the file {}".format(self.files[name]))
self.read_table_header(name, class_datas)
objects = {}
for line in datas[1:]:
object = class_table.build(**line)
pkey = get_pkey_values(object)
if len(pkey) == 1:
pkey = pkey[0]
objects[pkey] = object
self.objects_tree[name] = objects
def read_table_header(self, name, datas):
"""Read the table header.
This header should describe some informations concerning the
table (as the autoincrement fields).
"""
auto_increments = datas.get("auto_increments", [])
self.auto_increments[name] = auto_increments
def loop(self):
"""Write the YAML tables."""
for table in self.to_update:
self.write_table(table)
self.to_update.clear()
def write_table(self, name):
"""Write the table in a file."""
# First, we get the header
header = {}
if name in self.auto_increments:
header["auto_increments"] = self.auto_increments[name]
# Next we browse the object
objects = []
for object in self.objects_tree[name].values():
objects.append(object.__dict__)
objects.insert(0, header)
content = yaml.dump(objects, default_flow_style=False)
with open(self.location + "/" + name + ".yml", "w") as file:
file.write(content)
def get_all_objects(self, model):
"""Return all the model's object in a list."""
name = get_name(model)
return list(self.objects_tree.get(name, {}).values())
def find_object(self, model, pkey_values):
"""Return, if found, the selected object.
Raise a model.exceptions.ObjectNotFound if not found.
"""
# Look for the object in the cached tree
object = self.get_from_cache(model, pkey_values)
if object:
return object
raise mod_exceptions.ObjectNotFound(model, pkey_values)
def add_object(self, object):
"""Save the object, issued from a model."""
name = get_name(type(object))
fields = get_fields(type(object))
auto_increments = self.auto_increments.get(name, {})
for field in fields:
if not field.auto_increment:
continue
value = auto_increments.get(field.field_name, 1)
update_attr(object, field.field_name, value)
auto_increments[field.field_name] = value + 1
self.cache_object(object)
self.auto_increments[name] = auto_increments
self.to_update.add(name)
def update_object(self, object, attribute):
"""Update an object."""
self.check_update(object)
name = get_name(type(object))
self.to_update.add(name)
def remove_object(self, object):
"""Delete the object."""
# Delete from cache only
self.uncache_object(object)
name = get_name(type(object))
self.to_update.add(name)
| bsd-3-clause | -1,075,166,643,516,734,300 | 34.185185 | 79 | 0.598713 | false |
tuzonghua/CloudBot | plugins/youtube.py | 1 | 5598 | import re
import time
import isodate
import requests
from cloudbot import hook
from cloudbot.util import timeformat
from cloudbot.util.formatting import pluralize
youtube_re = re.compile(r'(?:youtube.*?(?:v=|/v/)|youtu\.be/|yooouuutuuube.*?id=)([-_a-zA-Z0-9]+)', re.I)
base_url = 'https://www.googleapis.com/youtube/v3/'
api_url = base_url + 'videos?part=contentDetails%2C+snippet%2C+statistics&id={}&key={}'
search_api_url = base_url + 'search?part=id&maxResults=1'
playlist_api_url = base_url + 'playlists?part=snippet%2CcontentDetails%2Cstatus'
video_url = "http://youtu.be/%s"
err_no_api = "The YouTube API is off in the Google Developers Console."
def get_video_description(video_id):
json = requests.get(api_url.format(video_id, dev_key)).json()
if json.get('error'):
if json['error']['code'] == 403:
return err_no_api
else:
return
data = json['items']
snippet = data[0]['snippet']
statistics = data[0]['statistics']
content_details = data[0]['contentDetails']
out = '\x02{}\x02'.format(snippet['title'])
if not content_details.get('duration'):
return out
length = isodate.parse_duration(content_details['duration'])
out += ' - length \x02{}\x02'.format(timeformat.format_time(int(length.total_seconds()), simple=True))
total_votes = float(statistics.get('likeCount', 0)) + float(statistics.get('dislikeCount', 0))
if total_votes != 0:
# format
likes = pluralize(int(statistics.get('likeCount', 0)), "like")
dislikes = pluralize(int(statistics.get('dislikeCount', 0)), "dislike")
percent = 100 * float(statistics.get('likeCount', 0)) / total_votes
out += ' - {}, {} (\x02{:.1f}\x02%)'.format(likes,
dislikes, percent)
if 'viewCount' in statistics:
views = int(statistics['viewCount'])
out += ' - \x02{:,}\x02 view{}'.format(views, "s"[views == 1:])
uploader = snippet['channelTitle']
upload_time = time.strptime(snippet['publishedAt'], "%Y-%m-%dT%H:%M:%S.000Z")
out += ' - \x02{}\x02 on \x02{}\x02'.format(uploader,
time.strftime("%Y.%m.%d", upload_time))
if 'contentRating' in content_details:
out += ' - \x034NSFW\x02'
return out
@hook.on_start()
def load_key(bot):
global dev_key
dev_key = bot.config.get("api_keys", {}).get("google_dev_key", None)
@hook.regex(youtube_re)
def youtube_url(match):
return get_video_description(match.group(1))
@hook.command("youtube", "you", "yt", "y")
def youtube(text):
"""youtube <query> -- Returns the first YouTube search result for <query>."""
if not dev_key:
return "This command requires a Google Developers Console API key."
json = requests.get(search_api_url, params={"q": text, "key": dev_key, "type": "video"}).json()
if json.get('error'):
if json['error']['code'] == 403:
return err_no_api
else:
return 'Error performing search.'
if json['pageInfo']['totalResults'] == 0:
return 'No results found.'
video_id = json['items'][0]['id']['videoId']
return get_video_description(video_id) + " - " + video_url % video_id
@hook.command("youtime", "ytime")
def youtime(text):
"""youtime <query> -- Gets the total run time of the first YouTube search result for <query>."""
if not dev_key:
return "This command requires a Google Developers Console API key."
json = requests.get(search_api_url, params={"q": text, "key": dev_key, "type": "video"}).json()
if json.get('error'):
if json['error']['code'] == 403:
return err_no_api
else:
return 'Error performing search.'
if json['pageInfo']['totalResults'] == 0:
return 'No results found.'
video_id = json['items'][0]['id']['videoId']
json = requests.get(api_url.format(video_id, dev_key)).json()
if json.get('error'):
return
data = json['items']
snippet = data[0]['snippet']
content_details = data[0]['contentDetails']
statistics = data[0]['statistics']
if not content_details.get('duration'):
return
length = isodate.parse_duration(content_details['duration'])
l_sec = int(length.total_seconds())
views = int(statistics['viewCount'])
total = int(l_sec * views)
length_text = timeformat.format_time(l_sec, simple=True)
total_text = timeformat.format_time(total, accuracy=8)
return 'The video \x02{}\x02 has a length of {} and has been viewed {:,} times for ' \
'a total run time of {}!'.format(snippet['title'], length_text, views,
total_text)
ytpl_re = re.compile(r'(.*:)//(www.youtube.com/playlist|youtube.com/playlist)(:[0-9]+)?(.*)', re.I)
@hook.regex(ytpl_re)
def ytplaylist_url(match):
location = match.group(4).split("=")[-1]
json = requests.get(playlist_api_url, params={"id": location, "key": dev_key}).json()
if json.get('error'):
if json['error']['code'] == 403:
return err_no_api
else:
return 'Error looking up playlist.'
data = json['items']
snippet = data[0]['snippet']
content_details = data[0]['contentDetails']
title = snippet['title']
author = snippet['channelTitle']
num_videos = int(content_details['itemCount'])
count_videos = ' - \x02{:,}\x02 video{}'.format(num_videos, "s"[num_videos == 1:])
return "\x02{}\x02 {} - \x02{}\x02".format(title, count_videos, author)
| gpl-3.0 | -2,821,733,049,324,311,000 | 32.321429 | 106 | 0.602358 | false |
brennie/reviewboard | reviewboard/reviews/tests/test_entries.py | 1 | 31486 | """Unit tests for review request page entries."""
from __future__ import unicode_literals
from datetime import timedelta
from django.contrib.auth.models import AnonymousUser
from django.test.client import RequestFactory
from django.utils import six
from djblets.testing.decorators import add_fixtures
from reviewboard.changedescs.models import ChangeDescription
from reviewboard.reviews.detail import (ChangeEntry,
InitialStatusUpdatesEntry,
ReviewEntry,
ReviewRequestPageData,
StatusUpdatesEntryMixin)
from reviewboard.reviews.models import GeneralComment, StatusUpdate
from reviewboard.testing import TestCase
class StatusUpdatesEntryMixinTests(TestCase):
"""Unit tests for StatusUpdatesEntryMixin."""
def test_add_update_with_done_failure(self):
"""Testing StatusUpdatesEntryMixin.add_update with DONE_FAILURE"""
status_update = StatusUpdate(state=StatusUpdate.DONE_FAILURE)
entry = StatusUpdatesEntryMixin()
entry.add_update(status_update)
self.assertEqual(entry.status_updates, [status_update])
self.assertEqual(status_update.header_class,
'status-update-state-failure')
def test_add_update_with_error(self):
"""Testing StatusUpdatesEntryMixin.add_update with ERROR"""
status_update = StatusUpdate(state=StatusUpdate.ERROR)
entry = StatusUpdatesEntryMixin()
entry.add_update(status_update)
self.assertEqual(entry.status_updates, [status_update])
self.assertEqual(status_update.header_class,
'status-update-state-failure')
def test_add_update_with_timeout(self):
"""Testing StatusUpdatesEntryMixin.add_update with TIMEOUT"""
status_update = StatusUpdate(state=StatusUpdate.TIMEOUT)
entry = StatusUpdatesEntryMixin()
entry.add_update(status_update)
self.assertEqual(entry.status_updates, [status_update])
self.assertEqual(status_update.header_class,
'status-update-state-failure')
def test_add_update_with_pending(self):
"""Testing StatusUpdatesEntryMixin.add_update with PENDING"""
status_update = StatusUpdate(state=StatusUpdate.PENDING)
entry = StatusUpdatesEntryMixin()
entry.add_update(status_update)
self.assertEqual(entry.status_updates, [status_update])
self.assertEqual(status_update.header_class,
'status-update-state-pending')
def test_add_update_with_done_success(self):
"""Testing StatusUpdatesEntryMixin.add_update with DONE_SUCCESS"""
status_update = StatusUpdate(state=StatusUpdate.DONE_SUCCESS)
entry = StatusUpdatesEntryMixin()
entry.add_update(status_update)
self.assertEqual(entry.status_updates, [status_update])
self.assertEqual(status_update.header_class,
'status-update-state-success')
def test_add_update_html_rendering(self):
"""Testing StatusUpdatesEntryMixin.add_update HTML rendering"""
status_update = StatusUpdate(state=StatusUpdate.DONE_SUCCESS,
description='My description.',
summary='My summary.')
entry = StatusUpdatesEntryMixin()
entry.add_update(status_update)
self.assertHTMLEqual(
status_update.summary_html,
('<div class="status-update-summary-entry'
' status-update-state-success">\n'
' <span class="summary">My summary.</span>\n'
' My description.\n'
'</div>'))
def test_add_update_html_rendering_with_url(self):
"""Testing StatusUpdatesEntryMixin.add_update HTML rendering with URL
"""
status_update = StatusUpdate(state=StatusUpdate.DONE_SUCCESS,
description='My description.',
summary='My summary.',
url='https://example.com/')
entry = StatusUpdatesEntryMixin()
entry.add_update(status_update)
self.assertHTMLEqual(
status_update.summary_html,
('<div class="status-update-summary-entry'
' status-update-state-success">\n'
' <span class="summary">My summary.</span>\n'
' My description.\n'
' <a href="https://example.com/">https://example.com/</a>'
'</div>'))
def test_add_update_html_rendering_with_url_and_text(self):
"""Testing StatusUpdatesEntryMixin.add_update HTML rendering with URL
and URL text
"""
status_update = StatusUpdate(state=StatusUpdate.DONE_SUCCESS,
description='My description.',
summary='My summary.',
url='https://example.com/',
url_text='My URL')
entry = StatusUpdatesEntryMixin()
entry.add_update(status_update)
self.assertHTMLEqual(
status_update.summary_html,
('<div class="status-update-summary-entry'
' status-update-state-success">\n'
' <span class="summary">My summary.</span>\n'
' My description.\n'
' <a href="https://example.com/">My URL</a>'
'</div>'))
def test_add_update_html_rendering_with_timeout(self):
"""Testing StatusUpdatesEntryMixin.add_update HTML rendering with
timeout
"""
status_update = StatusUpdate(state=StatusUpdate.TIMEOUT,
description='My description.',
summary='My summary.')
entry = StatusUpdatesEntryMixin()
entry.add_update(status_update)
self.assertHTMLEqual(
status_update.summary_html,
('<div class="status-update-summary-entry'
' status-update-state-failure">\n'
' <span class="summary">My summary.</span>\n'
' timed out.\n'
'</div>'))
@add_fixtures(['test_users'])
def test_add_comment(self):
"""Testing StatusUpdatesEntryMixin.add_comment"""
review_request = self.create_review_request()
review = self.create_review(review_request)
comment = self.create_general_comment(review)
# This is needed by the entry's add_comment(). It's normally built when
# creating the entries and their data.
comment.review_obj = review
status_update = self.create_status_update(
review_request=review_request,
review=review)
entry = StatusUpdatesEntryMixin()
entry.add_update(status_update)
entry.add_comment('general_comments', comment)
self.assertEqual(status_update.comments['general_comments'], [comment])
def test_finalize_with_all_states(self):
"""Testing StatusUpdatesEntryMixin.finalize with all states"""
entry = StatusUpdatesEntryMixin()
entry.add_update(StatusUpdate(state=StatusUpdate.DONE_FAILURE))
for i in range(2):
entry.add_update(StatusUpdate(state=StatusUpdate.DONE_SUCCESS))
for i in range(3):
entry.add_update(StatusUpdate(state=StatusUpdate.PENDING))
for i in range(4):
entry.add_update(StatusUpdate(state=StatusUpdate.ERROR))
for i in range(5):
entry.add_update(StatusUpdate(state=StatusUpdate.TIMEOUT))
entry.finalize()
self.assertEqual(
entry.state_summary,
'1 failed, 2 succeeded, 3 pending, 4 failed with error, '
'5 timed out')
def test_finalize_with_done_failure(self):
"""Testing StatusUpdatesEntryMixin.finalize with DONE_FAILURE"""
entry = StatusUpdatesEntryMixin()
entry.add_update(StatusUpdate(state=StatusUpdate.DONE_FAILURE))
entry.finalize()
self.assertEqual(entry.state_summary, '1 failed')
self.assertEqual(entry.state_summary_class,
'status-update-state-failure')
def test_finalize_with_error(self):
"""Testing StatusUpdatesEntryMixin.finalize with ERROR"""
entry = StatusUpdatesEntryMixin()
entry.add_update(StatusUpdate(state=StatusUpdate.ERROR))
entry.finalize()
self.assertEqual(entry.state_summary, '1 failed with error')
self.assertEqual(entry.state_summary_class,
'status-update-state-failure')
def test_finalize_with_timeout(self):
"""Testing StatusUpdatesEntryMixin.finalize with TIMEOUT"""
entry = StatusUpdatesEntryMixin()
entry.add_update(StatusUpdate(state=StatusUpdate.TIMEOUT))
entry.finalize()
self.assertEqual(entry.state_summary, '1 timed out')
self.assertEqual(entry.state_summary_class,
'status-update-state-failure')
def test_finalize_with_pending(self):
"""Testing StatusUpdatesEntryMixin.finalize with PENDING"""
entry = StatusUpdatesEntryMixin()
entry.add_update(StatusUpdate(state=StatusUpdate.PENDING))
entry.finalize()
self.assertEqual(entry.state_summary, '1 pending')
self.assertEqual(entry.state_summary_class,
'status-update-state-pending')
def test_finalize_with_done_success(self):
"""Testing StatusUpdatesEntryMixin.finalize with DONE_SUCCESS"""
entry = StatusUpdatesEntryMixin()
entry.add_update(StatusUpdate(state=StatusUpdate.DONE_SUCCESS))
entry.finalize()
self.assertEqual(entry.state_summary, '1 succeeded')
self.assertEqual(entry.state_summary_class,
'status-update-state-success')
def test_finalize_with_failures_take_precedence(self):
"""Testing StatusUpdatesEntryMixin.finalize with failures taking
precedence over PENDING and DONE_SUCCESS
"""
entry = StatusUpdatesEntryMixin()
entry.add_update(StatusUpdate(state=StatusUpdate.DONE_FAILURE))
entry.add_update(StatusUpdate(state=StatusUpdate.PENDING))
entry.add_update(StatusUpdate(state=StatusUpdate.DONE_SUCCESS))
entry.finalize()
self.assertEqual(entry.state_summary,
'1 failed, 1 succeeded, 1 pending')
self.assertEqual(entry.state_summary_class,
'status-update-state-failure')
def test_finalize_with_pending_take_precedence(self):
"""Testing StatusUpdatesEntryMixin.finalize with PENDING taking
precedence SUCCESS
"""
entry = StatusUpdatesEntryMixin()
entry.add_update(StatusUpdate(state=StatusUpdate.PENDING))
entry.add_update(StatusUpdate(state=StatusUpdate.DONE_SUCCESS))
entry.finalize()
self.assertEqual(entry.state_summary, '1 succeeded, 1 pending')
self.assertEqual(entry.state_summary_class,
'status-update-state-pending')
@add_fixtures(['test_users'])
def test_populate_status_updates(self):
"""Testing StatusUpdatesEntryMixin.populate_status_updates"""
review_request = self.create_review_request()
review = self.create_review(review_request, public=True)
comment = self.create_general_comment(review)
# This state is normally set in ReviewRequestPageData.
comment._type = 'general_comments'
comment.review_obj = review
status_updates = [
StatusUpdate(state=StatusUpdate.PENDING),
StatusUpdate(state=StatusUpdate.DONE_FAILURE,
review=review)
]
request = RequestFactory().get('/r/1/')
request.user = AnonymousUser()
data = ReviewRequestPageData(review_request=review_request,
request=request)
data.review_comments[review.pk] = [comment]
entry = StatusUpdatesEntryMixin()
entry.collapsed = True
entry.populate_status_updates(status_updates, data)
self.assertTrue(entry.collapsed)
self.assertEqual(entry.status_updates, status_updates)
status_update = entry.status_updates[0]
self.assertIsNone(status_update.review)
self.assertEqual(
status_update.comments,
{
'diff_comments': [],
'screenshot_comments': [],
'file_attachment_comments': [],
'general_comments': [],
})
status_update = entry.status_updates[1]
self.assertEqual(status_update.review, review)
self.assertEqual(
status_update.comments,
{
'diff_comments': [],
'screenshot_comments': [],
'file_attachment_comments': [],
'general_comments': [comment],
})
@add_fixtures(['test_users'])
def test_populate_status_updates_with_draft_replies(self):
"""Testing StatusUpdatesEntryMixin.populate_status_updates with
draft replies
"""
review_request = self.create_review_request()
review = self.create_review(review_request, public=True)
comment = self.create_general_comment(review)
reply = self.create_reply(review)
reply_comment = self.create_general_comment(reply, reply_to=comment)
# This state is normally set in ReviewRequestPageData.
comment._type = 'general_comments'
comment.review_obj = review
status_updates = [
StatusUpdate(state=StatusUpdate.PENDING),
StatusUpdate(state=StatusUpdate.DONE_FAILURE,
review=review)
]
request = RequestFactory().get('/r/1/')
request.user = AnonymousUser()
data = ReviewRequestPageData(review_request=review_request,
request=request)
data.review_comments[review.pk] = [comment]
data.draft_reply_comments[review.pk] = [reply_comment]
entry = StatusUpdatesEntryMixin()
entry.collapsed = True
entry.populate_status_updates(status_updates, data)
self.assertFalse(entry.collapsed)
self.assertEqual(entry.status_updates, status_updates)
status_update = entry.status_updates[0]
self.assertIsNone(status_update.review)
self.assertEqual(
status_update.comments,
{
'diff_comments': [],
'screenshot_comments': [],
'file_attachment_comments': [],
'general_comments': [],
})
status_update = entry.status_updates[1]
self.assertEqual(status_update.review, review)
self.assertEqual(
status_update.comments,
{
'diff_comments': [],
'screenshot_comments': [],
'file_attachment_comments': [],
'general_comments': [comment],
})
class InitialStatusUpdatesEntryTests(TestCase):
"""Unit tests for InitialStatusUpdatesEntry."""
fixtures = ['test_users']
def setUp(self):
super(InitialStatusUpdatesEntryTests, self).setUp()
self.request = RequestFactory().get('/r/1/')
self.request.user = AnonymousUser()
self.review_request = self.create_review_request()
self.review = self.create_review(self.review_request, public=True)
self.general_comment = self.create_general_comment(self.review,
issue_opened=False)
self.status_update = self.create_status_update(self.review_request,
review=self.review)
self.data = ReviewRequestPageData(review_request=self.review_request,
request=self.request)
def test_build_entries(self):
"""Testing InitialStatusUpdatesEntry.build_entries"""
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
entries = list(InitialStatusUpdatesEntry.build_entries(self.data))
self.assertEqual(len(entries), 1)
entry = entries[0]
self.assertFalse(entry.collapsed)
self.assertEqual(entry.status_updates, [self.status_update])
self.assertEqual(
entry.status_updates_by_review,
{
self.review.pk: self.status_update,
})
self.assertEqual(
entry.status_updates[0].comments,
{
'diff_comments': [],
'screenshot_comments': [],
'file_attachment_comments': [],
'general_comments': [self.general_comment],
})
def test_build_entries_with_changedesc(self):
"""Testing InitialStatusUpdatesEntry.build_entries with
ChangeDescription following this entry
"""
self.review_request.changedescs.create(public=True)
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
entries = list(InitialStatusUpdatesEntry.build_entries(self.data))
self.assertEqual(len(entries), 1)
entry = entries[0]
self.assertTrue(entry.collapsed)
self.assertEqual(entry.status_updates, [self.status_update])
self.assertEqual(
entry.status_updates_by_review,
{
self.review.pk: self.status_update,
})
status_update = entry.status_updates[0]
self.assertEqual(status_update.review, self.review)
self.assertIsNone(status_update.change_description)
self.assertEqual(
status_update.comments,
{
'diff_comments': [],
'screenshot_comments': [],
'file_attachment_comments': [],
'general_comments': [self.general_comment],
})
class ReviewEntryTests(TestCase):
"""Unit tests for ReviewEntry."""
fixtures = ['test_users']
def setUp(self):
super(ReviewEntryTests, self).setUp()
self.request = RequestFactory().get('/r/1/')
self.request.user = AnonymousUser()
self.review_request = self.create_review_request()
self.review = self.create_review(self.review_request,
id=123,
public=True)
self.data = ReviewRequestPageData(review_request=self.review_request,
request=self.request)
def test_get_dom_element_id(self):
"""Testing ReviewEntry.get_dom_element_id"""
entry = ReviewEntry(request=self.request,
review_request=self.review_request,
review=self.review,
collapsed=False,
data=self.data)
self.assertEqual(entry.get_dom_element_id(), 'review123')
def test_get_js_model_data(self):
"""Testing ReviewEntry.get_js_model_data"""
self.review.ship_it = True
self.review.publish()
entry = ReviewEntry(request=self.request,
review_request=self.review_request,
review=self.review,
collapsed=False,
data=self.data)
self.assertEqual(entry.get_js_model_data(), {
'reviewData': {
'id': self.review.pk,
'bodyTop': 'Test Body Top',
'bodyBottom': 'Test Body Bottom',
'public': True,
'shipIt': True,
},
})
@add_fixtures(['test_scmtools'])
def test_get_js_model_data_with_diff_comments(self):
"""Testing ReviewEntry.get_js_model_data with diff comments"""
self.review_request.repository = self.create_repository()
diffset = self.create_diffset(self.review_request)
filediff = self.create_filediff(diffset)
comment1 = self.create_diff_comment(self.review, filediff)
comment2 = self.create_diff_comment(self.review, filediff)
self.review.publish()
# This is needed by the entry's add_comment(). It's normally built when
# creating the entries and their data.
comment1.review_obj = self.review
comment2.review_obj = self.review
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
entry = ReviewEntry(request=self.request,
review_request=self.review_request,
review=self.review,
collapsed=False,
data=self.data)
entry.add_comment('diff_comments', comment1)
entry.add_comment('diff_comments', comment2)
self.assertEqual(entry.get_js_model_data(), {
'reviewData': {
'id': self.review.pk,
'bodyTop': 'Test Body Top',
'bodyBottom': 'Test Body Bottom',
'public': True,
'shipIt': False,
},
'diffCommentsData': [
(six.text_type(comment1.pk), six.text_type(filediff.pk)),
(six.text_type(comment2.pk), six.text_type(filediff.pk)),
],
})
def test_add_comment_with_no_open_issues(self):
"""Testing ReviewEntry.add_comment with comment not opening an issue"""
self.request.user = self.review_request.submitter
entry = ReviewEntry(request=self.request,
review_request=self.review_request,
review=self.review,
collapsed=True,
data=self.data)
self.assertFalse(entry.has_issues)
self.assertEqual(entry.issue_open_count, 0)
entry.add_comment('general_comments', GeneralComment())
self.assertFalse(entry.has_issues)
self.assertEqual(entry.issue_open_count, 0)
self.assertTrue(entry.collapsed)
def test_add_comment_with_open_issues(self):
"""Testing ReviewEntry.add_comment with comment opening an issue"""
entry = ReviewEntry(request=self.request,
review_request=self.review_request,
review=self.review,
collapsed=True,
data=self.data)
self.assertFalse(entry.has_issues)
self.assertEqual(entry.issue_open_count, 0)
entry.add_comment('general_comments',
GeneralComment(issue_opened=True,
issue_status=GeneralComment.OPEN))
self.assertTrue(entry.has_issues)
self.assertEqual(entry.issue_open_count, 1)
self.assertTrue(entry.collapsed)
def test_add_comment_with_open_issues_and_viewer_is_owner(self):
"""Testing ReviewEntry.add_comment with comment opening an issue and
the review request owner is viewing the page
"""
self.request.user = self.review_request.submitter
entry = ReviewEntry(request=self.request,
review_request=self.review_request,
review=self.review,
collapsed=True,
data=self.data)
self.assertFalse(entry.has_issues)
self.assertEqual(entry.issue_open_count, 0)
entry.add_comment('general_comments',
GeneralComment(issue_opened=True,
issue_status=GeneralComment.OPEN))
self.assertTrue(entry.has_issues)
self.assertEqual(entry.issue_open_count, 1)
self.assertFalse(entry.collapsed)
def test_build_entries(self):
"""Testing ReviewEntry.build_entries"""
review1 = self.create_review(
self.review_request,
timestamp=self.review.timestamp - timedelta(days=2),
public=True)
review2 = self.review
comment = self.create_general_comment(review1)
# These shouldn't show up in the results.
self.create_review(
self.review_request,
timestamp=self.review.timestamp - timedelta(days=1),
public=False)
self.create_reply(review1)
status_update_review = self.create_review(self.review_request,
public=True)
self.create_general_comment(status_update_review)
self.create_status_update(self.review_request,
review=status_update_review)
# Create a change description to test collapsing.
self.review_request.changedescs.create(
timestamp=review2.timestamp - timedelta(days=1),
public=True)
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
entries = list(ReviewEntry.build_entries(self.data))
self.assertEqual(len(entries), 2)
# These will actually be in database query order (newest to oldest),
# not the order shown on the page.
entry = entries[0]
self.assertEqual(entry.review, review2)
self.assertFalse(entry.collapsed)
self.assertEqual(
entry.comments,
{
'diff_comments': [],
'screenshot_comments': [],
'file_attachment_comments': [],
'general_comments': [],
})
entry = entries[1]
self.assertEqual(entry.review, review1)
self.assertTrue(entry.collapsed)
self.assertEqual(
entry.comments,
{
'diff_comments': [],
'screenshot_comments': [],
'file_attachment_comments': [],
'general_comments': [comment],
})
class ChangeEntryTests(TestCase):
"""Unit tests for ChangeEntry."""
fixtures = ['test_users']
def setUp(self):
super(ChangeEntryTests, self).setUp()
self.request = RequestFactory().get('/r/1/')
self.request.user = AnonymousUser()
self.review_request = self.create_review_request()
self.changedesc = ChangeDescription.objects.create(id=123,
public=True)
self.review_request.changedescs.add(self.changedesc)
self.data = ReviewRequestPageData(review_request=self.review_request,
request=self.request)
def test_get_dom_element_id(self):
"""Testing ChangeEntry.get_dom_element_id"""
entry = ChangeEntry(request=self.request,
review_request=self.review_request,
changedesc=self.changedesc,
collapsed=False,
data=self.data)
self.assertEqual(entry.get_dom_element_id(), 'changedesc123')
def test_get_js_model_data(self):
"""Testing ChangeEntry.get_js_model_data for standard ChangeDescription
"""
entry = ChangeEntry(request=self.request,
review_request=self.review_request,
changedesc=self.changedesc,
collapsed=False,
data=self.data)
self.assertEqual(entry.get_js_model_data(), {
'pendingStatusUpdates': False,
})
@add_fixtures(['test_scmtools'])
def test_get_js_model_data_with_status_updates(self):
"""Testing ChangeEntry.get_js_model_data for ChangeDescription with
status updates
"""
self.review_request.repository = self.create_repository()
diffset = self.create_diffset(self.review_request)
filediff = self.create_filediff(diffset)
review = self.create_review(self.review_request,
body_top='Body top',
body_bottom='Body bottom',
ship_it=True)
comment1 = self.create_diff_comment(review, filediff)
comment2 = self.create_diff_comment(review, filediff)
review.publish()
# This is needed by the entry's add_comment(). It's normally built when
# creating the entries and their data.
comment1.review_obj = review
comment2.review_obj = review
status_update = self.create_status_update(
self.review_request,
review=review,
change_description=self.changedesc)
entry = ChangeEntry(request=self.request,
review_request=self.review_request,
changedesc=self.changedesc,
collapsed=False,
data=self.data)
entry.add_update(status_update)
entry.add_comment('diff_comments', comment1)
entry.add_comment('diff_comments', comment2)
self.assertEqual(entry.get_js_model_data(), {
'reviewsData': [
{
'id': review.pk,
'bodyTop': 'Body top',
'bodyBottom': 'Body bottom',
'public': True,
'shipIt': True,
},
],
'diffCommentsData': [
(six.text_type(comment1.pk), six.text_type(filediff.pk)),
(six.text_type(comment2.pk), six.text_type(filediff.pk)),
],
'pendingStatusUpdates': False,
})
def test_build_entries(self):
"""Testing ChangeEntry.build_entries"""
changedesc1 = self.changedesc
changedesc2 = self.review_request.changedescs.create(
timestamp=changedesc1.timestamp + timedelta(days=1),
public=True)
review = self.create_review(self.review_request, public=True)
comment = self.create_general_comment(review)
status_update = self.create_status_update(
self.review_request,
review=review,
change_description=changedesc2)
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
entries = list(ChangeEntry.build_entries(self.data))
# These will actually be in database query order (newest to oldest),
# not the order shown on the page.
entry = entries[0]
self.assertEqual(entry.changedesc, changedesc2)
self.assertFalse(entry.collapsed)
self.assertEqual(entry.status_updates, [status_update])
self.assertEqual(
entry.status_updates_by_review,
{
review.pk: status_update,
})
self.assertEqual(
entry.status_updates[0].comments,
{
'diff_comments': [],
'screenshot_comments': [],
'file_attachment_comments': [],
'general_comments': [comment],
})
entry = entries[1]
self.assertEqual(entry.changedesc, changedesc1)
self.assertTrue(entry.collapsed)
self.assertEqual(entry.status_updates, [])
| mit | 1,292,732,422,292,569,000 | 37.728167 | 79 | 0.580194 | false |
yhpeng-git/mxnet | docs/mxdoc.py | 1 | 9901 | """A sphnix-doc plugin to build mxnet docs"""
import subprocess
import re
import os
import json
from recommonmark import transform
import pypandoc
# start or end of a code block
_CODE_MARK = re.compile('^([ ]*)```([\w]*)')
# language names and the according file extensions and comment symbol
_LANGS = {'python' : ('py', '#'),
'r' : ('R','#'),
'scala' : ('scala', '#'),
'julia' : ('jl', '#'),
'perl' : ('pl', '#'),
'cpp' : ('cc', '//'),
'bash' : ('sh', '#')}
_LANG_SELECTION_MARK = 'INSERT SELECTION BUTTONS'
_SRC_DOWNLOAD_MARK = 'INSERT SOURCE DOWNLOAD BUTTONS'
def _run_cmd(cmds):
"""Run commands, raise exception if failed"""
if not isinstance(cmds, str):
cmds = "".join(cmds)
print("Execute \"%s\"" % cmds)
try:
subprocess.check_call(cmds, shell=True)
except subprocess.CalledProcessError as err:
print(err)
raise err
def generate_doxygen(app):
"""Run the doxygen make commands"""
_run_cmd("cd %s/.. && make doxygen" % app.builder.srcdir)
_run_cmd("cp -rf doxygen/html %s/doxygen" % app.builder.outdir)
def build_mxnet(app):
"""Build mxnet .so lib"""
_run_cmd("cd %s/.. && cp make/config.mk config.mk && make -j$(nproc) DEBUG=1" %
app.builder.srcdir)
def build_r_docs(app):
"""build r pdf"""
r_root = app.builder.srcdir + '/../R-package'
pdf_path = root_path + '/docs/api/r/mxnet-r-reference-manual.pdf'
_run_cmd('cd ' + r_root +
'; R -e "roxygen2::roxygenize()"; R CMD Rd2pdf . --no-preview -o ' + pdf_path)
dest_path = app.builder.outdir + '/api/r/'
_run_cmd('mkdir -p ' + dest_path + '; mv ' + pdf_path + ' ' + dest_path)
def build_scala_docs(app):
"""build scala doc and then move the outdir"""
scala_path = app.builder.srcdir + '/../scala-package/core/src/main/scala/ml/dmlc/mxnet'
# scaldoc fails on some apis, so exit 0 to pass the check
_run_cmd('cd ' + scala_path + '; scaladoc `find . | grep .*scala`; exit 0')
dest_path = app.builder.outdir + '/api/scala/docs'
_run_cmd('rm -rf ' + dest_path)
_run_cmd('mkdir -p ' + dest_path)
scaladocs = ['index', 'index.html', 'ml', 'lib', 'index.js', 'package.html']
for doc_file in scaladocs:
_run_cmd('cd ' + scala_path + ' && mv -f ' + doc_file + ' ' + dest_path)
def _convert_md_table_to_rst(table):
"""Convert a markdown table to rst format"""
if len(table) < 3:
return ''
out = '```eval_rst\n.. list-table::\n :header-rows: 1\n\n'
for i,l in enumerate(table):
cols = l.split('|')[1:-1]
if i == 0:
ncol = len(cols)
else:
if len(cols) != ncol:
return ''
if i == 1:
for c in cols:
if len(c) is not 0 and '---' not in c:
return ''
else:
for j,c in enumerate(cols):
out += ' * - ' if j == 0 else ' - '
out += pypandoc.convert_text(
c, 'rst', format='md').replace('\n', ' ').replace('\r', '') + '\n'
out += '```\n'
return out
def convert_table(app, docname, source):
"""Find tables in a markdown and then convert them into the rst format"""
num_tables = 0
for i,j in enumerate(source):
table = []
output = ''
in_table = False
for l in j.split('\n'):
r = l.strip()
if r.startswith('|'):
table.append(r)
in_table = True
else:
if in_table is True:
converted = _convert_md_table_to_rst(table)
if converted is '':
print("Failed to convert the markdown table")
print(table)
else:
num_tables += 1
output += converted
in_table = False
table = []
output += l + '\n'
source[i] = output
if num_tables > 0:
print('Converted %d tables in %s' % (num_tables, docname))
def _parse_code_lines(lines):
"""A iterator that returns if a line is within a code block
Returns
-------
iterator of (str, bool, str, int)
- line: the line
- in_code: if this line is in a code block
- lang: the code block langunage
- indent: the code indent
"""
in_code = False
lang = None
indent = None
for l in lines:
m = _CODE_MARK.match(l)
if m is not None:
if not in_code:
if m.groups()[1].lower() in _LANGS:
lang = m.groups()[1].lower()
indent = len(m.groups()[0])
in_code = True
yield (l, in_code, lang, indent)
else:
yield (l, in_code, lang, indent)
lang = None
indent = None
in_code = False
else:
yield (l, in_code, lang, indent)
def _get_lang_selection_btn(langs):
active = True
btngroup = '<div class="text-center">\n<div class="btn-group opt-group" role="group">'
for l in langs:
btngroup += '<button type="button" class="btn btn-default opt %s">%s</button>\n' % (
'active' if active else '', l[0].upper()+l[1:].lower())
active = False
btngroup += '</div>\n</div> <script type="text/javascript" src="../../_static/js/options.js"></script>'
return btngroup
def _get_blocks(lang, lines):
cur_block = []
pre_in_code = None
for (l, in_code, cur_lang, _) in _parse_code_lines(lines):
if in_code and cur_lang != lang:
in_code = False
if in_code != pre_in_code:
if pre_in_code and len(cur_block) >= 2:
cur_block = cur_block[1:-1] # remove ```
# remove empty lines at head
while len(cur_block) > 0:
if len(cur_block[0]) == 0:
cur_block.pop(0)
else:
break
# remove empty lines at tail
while len(cur_block) > 0:
if len(cur_block[-1]) == 0:
cur_block.pop()
else:
break
if len(cur_block):
yield (pre_in_code, cur_block)
cur_block = []
cur_block.append(l)
pre_in_code = in_code
if len(cur_block):
yield (pre_in_code, cur_block)
def _get_jupyter_notebook(lang, lines):
cells = []
for in_code, lines in _get_blocks(lang, lines):
cell = {
"cell_type": "code" if in_code else "markdown",
"metadata": {},
"source": '\n'.join(lines)
}
if in_code:
cell.update({
"outputs": [],
"execution_count": None,
})
cells.append(cell)
ipynb = {"nbformat" : 4,
"nbformat_minor" : 2,
"metadata" : {"language":lang, "display_name":'', "name":''},
"cells" : cells}
return ipynb
def _get_source(lang, lines):
cmt = _LANGS[lang][1] + ' '
out = []
for in_code, lines in _get_blocks(lang, lines):
if in_code:
out.append('')
for l in lines:
if in_code:
if '%matplotlib' not in l:
out.append(l)
else:
if ('<div>' in l or '</div>' in l or
'<script>' in l or '</script>' in l or
'<!--' in l or '-->' in l or
'%matplotlib' in l ):
continue
out.append(cmt+l)
if in_code:
out.append('')
return out
def _get_src_download_btn(out_prefix, langs, lines):
btn = '<div class="btn-group" role="group">\n'
for lang in langs:
ipynb = out_prefix + '_' + lang + '.ipynb'
with open(ipynb, 'w') as f:
json.dump(_get_jupyter_notebook(lang, lines), f)
src = out_prefix + '.' + _LANGS[lang][0]
with open(src, 'w') as f:
f.write('\n'.join(_get_source(lang, lines)))
for f in [ipynb, src]:
f = f.split('/')[-1]
btn += '<button type="button" class="btn btn-default download" '
btn += 'onclick="window.location=\'%s\'"><span class="glyphicon glyphicon-download-alt"></span> %s </button>\n' % (f, f)
btn += '</div>\n'
return btn
def add_buttons(app, docname, source):
out_prefix = app.builder.outdir + '/' + docname
dirname = os.path.dirname(out_prefix)
if not os.path.exists(dirname):
os.makedirs(dirname)
for i,j in enumerate(source):
lines = j.split('\n')
langs = set([l for (_, _, l, _) in _parse_code_lines(lines)
if l is not None and l in _LANGS])
# first convert
for k,l in enumerate(lines):
if _SRC_DOWNLOAD_MARK in l:
lines[k] = _get_src_download_btn(
out_prefix, langs, lines)
# then add lang buttons
for k,l in enumerate(lines):
if _LANG_SELECTION_MARK in l:
lines[k] = _get_lang_selection_btn(langs)
source[i] = '\n'.join(lines)
def setup(app):
app.connect("builder-inited", build_mxnet)
app.connect("builder-inited", generate_doxygen)
app.connect("builder-inited", build_scala_docs)
# skipped to build r, it requires to install latex, which is kinds of too heavy
# app.connect("builder-inited", build_r_docs)
app.connect('source-read', convert_table)
app.connect('source-read', add_buttons)
app.add_config_value('recommonmark_config', {
'url_resolver': lambda url: 'http://mxnet.io/' + url,
'enable_eval_rst': True,
}, True)
app.add_transform(transform.AutoStructify)
| apache-2.0 | 6,479,518,435,546,730,000 | 34.360714 | 132 | 0.503888 | false |
adampresley/trackathon | model/DateHelper.py | 1 | 2129 | from Service import Service
from datetime import tzinfo, timedelta, datetime
from dateutil import tz
class DateHelper(Service):
utc = tz.gettz("UTC")
pyToJsFormatMapping = {
"%m/%d/%Y": "MM/dd/yyyy",
"%d/%m/%Y": "dd/MM/yyyy",
"%Y-%m-%d": "yyyy-MM-dd"
}
def __init__(self, db, timezone = "UTC", dateFormat = "%m/%d/%Y", timeFormat = "%I:%M %p"):
self.db = db
self._timezone = timezone
self._dateFormat = dateFormat
self._timeFormat = timeFormat
def addDays(self, d, numDays = 1, format = "%Y-%m-%d"):
if not self.isDateType(d):
d = datetime.strptime(d, format)
newDate = d + timedelta(days = numDays)
return newDate
def dateFormat(self, d):
return self.utcToTimezone(d, self._timezone).strftime(self._dateFormat)
def dateTimeFormat(self, d):
return self.utcToTimezone(d, self._timezone).strftime("%s %s" % (self._dateFormat, self._timeFormat))
def isDateType(self, d):
result = True
try:
d.today()
except AttributeError as e:
result = False
return result
def localNow(self):
return self.utcToTimezone(datetime.now(self.utc), self._timezone)
def now(self):
return datetime.now(self.utc)
def pyToJsDateFormat(self, pyDateFormat):
return self.pyToJsFormatMapping[pyDateFormat]
def restDateFormat(self, d):
return d.strftime("%Y-%m-%d")
def restDateTime(self, d):
return d.strftime("%Y-%m-%d %H:%M")
def timeFormat(self, d):
return self.utcToTimezone(d, self._timezone).strftime(self._timeFormat)
def utcToTimezone(self, d, timezone):
targetTZ = tz.gettz(timezone)
d = d.replace(tzinfo = self.utc)
return d.astimezone(targetTZ)
def validateDateRange(self, start, end, format = "%Y-%m-%d"):
#
# Basically if the range between start and end is greater than 91
# days kick it back with today's date as default.
#
parsedStart = datetime.strptime(start, format)
parsedEnd = datetime.strptime(end, format)
delta = parsedEnd - parsedStart
newStart = start
newEnd = end
if delta.days > 91:
newStart = self.restDateFormat(self.localNow())
newEnd = self.restDateFormat(self.localNow())
return (newStart, newEnd)
| mit | 6,435,613,590,564,856,000 | 24.058824 | 103 | 0.685298 | false |
JmeHsieh/issue_aggregator | generate_url_list.py | 1 | 1184 | #!/usr/bin/env python3
import json
from subprocess import PIPE, Popen
from requests import get
repo_master_raw = 'https://raw.githubusercontent.com/g0v/awesome-g0v/master/'
readme = 'readme.md'
parser = 'parse.ls'
awesome_g0v = 'awesome-g0v.json'
outfile = 'url_list.json'
def get_source():
readme_url = repo_master_raw + readme
parser_url = repo_master_raw + parser
with open('./data/{}'.format(readme), 'wb+') as f:
response = get(readme_url)
f.write(response.content)
with open('./data/{}'.format(parser), 'wb+') as f:
response = get(parser_url)
f.write(response.content)
def run_parser():
try:
with Popen(['lsc', parser], cwd='./data/', stdout=PIPE) as p:
print(p.stdout.read().decode('utf-8'))
except Exception as e:
print(e)
def output_url_list():
with open('./data/{}'.format(awesome_g0v), 'r') as f:
js = json.load(f)
rs = [j['repository'] for j in js if 'github.com' in j['repository']]
with open('./data/{}'.format(outfile), 'w+') as f:
f.write(json.dumps(rs))
if __name__ == "__main__":
get_source()
run_parser()
output_url_list()
| mit | -4,816,565,584,934,220,000 | 23.666667 | 77 | 0.597128 | false |
Pistachitos/Sick-Beard | sickbeard/history.py | 1 | 2727 | # Author: Nic Wolfe <[email protected]>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import db
import datetime
from sickbeard.common import SNATCHED, SUBTITLED, Quality
dateFormat = "%Y%m%d%H%M%S"
def _logHistoryItem(action, showid, season, episode, quality, resource, provider):
logDate = datetime.datetime.today().strftime(dateFormat)
myDB = db.DBConnection()
myDB.action("INSERT INTO history (action, date, showid, season, episode, quality, resource, provider) VALUES (?,?,?,?,?,?,?,?)",
[action, logDate, showid, season, episode, quality, resource, provider])
def logSnatch(searchResult):
for curEpObj in searchResult.episodes:
showid = int(curEpObj.show.tvdbid)
season = int(curEpObj.season)
episode = int(curEpObj.episode)
quality = searchResult.quality
providerClass = searchResult.provider
if providerClass != None:
provider = providerClass.name
else:
provider = "unknown"
action = Quality.compositeStatus(SNATCHED, searchResult.quality)
resource = searchResult.name
_logHistoryItem(action, showid, season, episode, quality, resource, provider)
def logDownload(episode, filename, new_ep_quality, release_group=None):
showid = int(episode.show.tvdbid)
season = int(episode.season)
epNum = int(episode.episode)
quality = new_ep_quality
# store the release group as the provider if possible
if release_group:
provider = release_group
else:
provider = -1
action = episode.status
_logHistoryItem(action, showid, season, epNum, quality, filename, provider)
def logSubtitle(showid, season, episode, status, subtitleResult):
resource = subtitleResult.release if subtitleResult.release else ''
provider = subtitleResult.service
status, quality = Quality.splitCompositeStatus(status)
action = Quality.compositeStatus(SUBTITLED, quality)
_logHistoryItem(action, showid, season, episode, quality, resource, provider) | gpl-3.0 | 3,868,721,296,955,677,700 | 32.268293 | 132 | 0.703337 | false |
meine-stadt-transparent/meine-stadt-transparent | mainapp/models/person.py | 1 | 1634 | from django.db import models
from django.urls import reverse
from django.utils.translation import gettext as _
from .helper import DefaultFields, DummyInterface
from .location import Location
class Person(DefaultFields, DummyInterface):
name = models.CharField(max_length=100)
given_name = models.CharField(max_length=100)
family_name = models.CharField(max_length=100)
location = models.ForeignKey(
Location, null=True, blank=True, on_delete=models.CASCADE
)
def __str__(self):
return self.name
def name_autocomplete(self):
"""A workaround to prevent empty values in the autocomplete-field in elasticsearch, which throws an error"""
return self.name if len(self.name) > 0 else " "
def get_default_link(self):
return reverse("person", args=[self.id])
def organization_ids(self):
return [organization.id for organization in self.membership_set.all()]
def sort_date(self):
if hasattr(self, "sort_date_prefetch"):
if self.sort_date_prefetch:
return self.sort_date_prefetch[0].start
else:
return self.created
# The most recent time this person joined a new organization
latest = (
self.membership_set.filter(start__isnull=False).order_by("-start").first()
)
if latest:
return latest.start
else:
return self.created
@classmethod
def dummy(cls, oparl_id: str) -> "Person":
return Person(
name=_("Missing Person"), given_name=_("Missing"), family_name=_("Missing")
)
| mit | 1,775,969,022,176,492,000 | 31.68 | 116 | 0.638311 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.