ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b402736fe41a1923f5e1f2be2b9ac727b56303ec | from controller import Robot
from controller import Motor
from controller import PositionSensor
from controller import Robot, DistanceSensor, GPS, Camera, Receiver, Emitter
import cv2
import numpy as np
import math
import time
robot = Robot()
timeStep = 32
tile_size = 0.12
speed = 6.28
media_baldoza = 0.06
estado = 1
start = 0
global r
global g
global b
r = 0
g = 0
b = 0
# start = robot.getTime()
# Camera initialization
camera = robot.getDevice("camera3")
camera.enable(timeStep)
# Colour sensor initialization
colour_sensor = robot.getDevice("colour_sensor")
colour_sensor.enable(timeStep)
# Distance sensor initialization
distancia_sensor1 = robot.getDevice("distance sensor1")
distancia_sensor1.enable(timeStep)
# Motor initialization
ruedaIzquierda = robot.getDevice("wheel1 motor")
ruedaDerecha = robot.getDevice("wheel2 motor")
ruedaIzquierda.setPosition(float('inf'))
ruedaDerecha.setPosition(float('inf'))
rIzq_encoder = ruedaIzquierda.getPositionSensor()
rDer_encoder = ruedaDerecha.getPositionSensor()
rIzq_encoder.enable(timeStep)
rDer_encoder.enable(timeStep)
# Functions
def leer_sensores():
global r
global g
global b
# Color sensor
image = colour_sensor.getImage()
r = colour_sensor.imageGetRed(image, 1, 0, 0)
g = colour_sensor.imageGetGreen(image, 1, 0, 0)
b = colour_sensor.imageGetBlue(image, 1, 0, 0)
# azul: r=65 g=65 b=252
# rojo: r=252 g=65 b=65
# print("r: " + str(r) + " g: " + str(g) + " b: " + str(b))
"""
# Camara
image = camera.getImage()
imagen = np.frombuffer(image, np.uint8).reshape((camera.getHeight(), camera.getWidth(), 4))
frame = cv2.cvtColor(imagen, cv2.COLOR_BGRA2BGR)
cv2.imshow("frame", frame)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Grayscale
cv2.imshow("grayScale", frame)
cv2.threshold(frame, 80, 255, cv2.THRESH_BINARY) # Threshold
cv2.imshow("thresh", frame)
cv2.waitKey(1)
# Sensor de Distancia
print("Distancia: " + str(distancia_sensor1.getValue()))
"""
def avanzar(vel):
ruedaIzquierda.setVelocity(vel)
ruedaDerecha.setVelocity(vel)
def retroceder(vel):
ruedaIzquierda.setVelocity(-vel)
ruedaDerecha.setVelocity(-vel)
def girar_der(vel):
ruedaIzquierda.setVelocity(-vel)
ruedaDerecha.setVelocity(vel)
def girar_izq(vel):
ruedaIzquierda.setVelocity(vel)
ruedaDerecha.setVelocity(-vel)
gyro = robot.getDevice("gyro")
gyro.enable(timeStep)
def rotar(angulo):
global angulo_actual
global tiempo_anterior
# iniciar_rotacion
if angulo > 0:
girar_der(0.5)
else:
girar_izq(0.5)
# Mientras no llego al angulo solicitado sigo girando
if (abs(abs(angulo) - angulo_actual) > 1):
tiempo_actual = robot.getTime()
# print("Inicio rotacion angulo", angulo, "Angulo actual:",angulo_actual)
tiempo_transcurrido = tiempo_actual - \
tiempo_anterior # tiempo que paso en cada timestep
# rad/seg * mseg * 1000
radsIntimestep = abs(gyro.getValues()[1]) * tiempo_transcurrido
degsIntimestep = radsIntimestep * 180 / math.pi
# print("rads: " + str(radsIntimestep) +
# " | degs: " + str(degsIntimestep))
angulo_actual += degsIntimestep
# Si se pasa de 360 grados se ajusta la rotacion empezando desde 0 grados
angulo_actual = angulo_actual % 360
# Si es mas bajo que 0 grados, le resta ese valor a 360
if angulo_actual < 0:
angulo_actual += 360
tiempo_anterior = tiempo_actual
# print("Angulo actual:", angulo_actual)
return False
#print("Rotacion finalizada.")
angulo_actual = 0
return True
def delay(ms):
initTime = robot.getTime() # Store starting time (in seconds)
while robot.step(timeStep) != -1:
print("delay")
if (robot.getTime() - initTime) * 1000.0 > ms: # If time elapsed (converted into ms) is greater than value passed in
avanzar(0)
break
def rotar_enclavado(angulo):
while robot.step(timeStep) != -1:
leer_sensores()
# print("r: " + str(r) + " g: " + str(g) + " b: " + str(b))
if rotar(angulo) == True: # If time elapsed (converted into ms) is greater than value passed in
avanzar(0)
break
def avance(tipo_avance):
start = rDer_encoder.getValue()
velocidad = 0
avance = 0
if tipo_avance == "medio":
velocidad = 3
avance = 2.9
elif tipo_avance == "largo":
avance = 5.9
velocidad = 5.96
elif tipo_avance == "esquina":
avance = 4.1
velocidad = 6.28
while robot.step(timeStep) != -1:
avanzar(velocidad)
leer_sensores()
tipo_pizza()
# print("r: " + str(r) + " g: " + str(g) + " b: " + str(b))
if rDer_encoder.getValue() >= start + avance:
avanzar(0)
break
def retroceso(tipo_retroceso):
start = rDer_encoder.getValue()
velocidad = 0
retroceso = 0
if tipo_retroceso == "medio":
velocidad = 6.28
retroceso = 2.9
elif tipo_retroceso == "largo":
retroceso = 5.9
velocidad = 5.96
elif tipo_retroceso == "esquina":
retroceso = 4.1
velocidad = 6.28
elif tipo_retroceso == "poquito":
retroceso = 1.9
velocidad = 6.28
while robot.step(timeStep) != -1:
retroceder(velocidad)
leer_sensores()
# print("r: " + str(r) + " g: " + str(g) + " b: " + str(b))
if start - retroceso >= rDer_encoder.getValue():
avanzar(0)
break
def tipo_pizza():
#print("valores(1): r:" + str(r) + " , g:" + str(g) + " , b:" + str(b))
if 255 >= r >= 240 and 60 <= b <= 75 and 60 <= g <= 75:
print("(Red)pasaje zona 3 a 1")
elif 150 >= r >= 100 and 210 <= b <= 230 and 60 <= g <= 75:
print("(Vaiolet)pasaje zona 2 a 3")
elif 60 <= r <= 75 and 255 >= b >= 245 and 60 <= g <= 75:
print("(Blue)pasaje zona 1 a 2")
elif 200 <= r <= 220 and 110 >= b >= 100 and 175 <= g <= 180:
print("Entered swamp")
return "swamp"
elif 250 >= r >= 230 and 250 >= b >= 235 and 250 >= g >= 235:
print("Found Checkpoint")
elif r == 233 and b == 233 and g == 233:
print("Azulejo normal")
elif 30 <= r <= 50 :
print("un agujero negro we")
retroceso("medio")
rotar_enclavado(90)
else:
return "prueba"
angulo_actual = 0
tiempo_anterior = robot.getTime()
contador = 0
while robot.step(timeStep) != -1:
avance("medio")
|
py | b40274448f1e8621ef7e78bce899e892e5532ead | """Quick and Dirty, a high level file access wrapper.
The QnD interface looks like this::
f = format_specific_open(filename, mode) # e.g. openh5
var = f.varname # read var
var = f['varname']
var = f.get('varname', default)
f.var = var_value # declare and write var
f['var'] = something
f.var = dtype, shape # declare var without writing
f.update({...vars...}, another_var=value, ...)
f.grpname = {...vars...} # declare a subgroup and some members
if name in f: do_something
varnames = list(f)
for name in f: do_something
for name, var in f.items(): do_something
g = f.grpname
f = g.root() # Use the root method to get the top-level QGroup.
f.close() # important if you have written to f
Generally, a QnD QGroup like `f` in the example behaves like a dict.
However, you may also reference variables or subgroups as if they were
attributes. Use attributes to access variables when you know the
variable name. In short, use square brackets when the variable name
is the value of an expression. (QnD will remove a single trailing
underscore from any attribute reference, so you can use ``f.yield_``
for ``f['yield']`` or ``f.items_`` for ``f['items']``.) The adict
module has an `ADict` class and a `redict` function to produce ordinary
in-memory dict objects with their items accessible as attributes with
the same rules. You can read a whole file (or a whole subgroup) like
this::
ff = f(2)
The optional `2` argument is the auto-read mode flag. By default, the
auto-read mode flag is set to 1, which causes ``f.varname`` to read an
array variable and return its value, but to simply return a QGroup
object (like `f`) if the name refers to a subgroup. When the `auto`
flag equals `2`, any subgroups are read recursively, and their values
become ADict instances. (QnD also supports QList variables, and
``auto=2`` mode returns those as python list instances.)
The ``items()`` method also accepts an optional `auto` argument to
temporarily change auto-read mode used for the iteration.
You can turn auto-read mode off by setting the `auto` flag to `0`. In
this mode, referencing a variable returns a QLeaf instance without
reading it. This enables you to query a variable without reading it.
You can also do that by retrieving the attributes object::
with f.push(): # Use f as a context manager to temporarily change modes.
f.auto(0) # Turn off auto-read mode.
v = f.varname
value = v() # Read a QLeaf by calling it...
value = v[:] # ...or by indexing it.
v(value) # Write a QLeaf by calling it with an argument...
v[:] = value # ...or by setting a slice.
v.dtype, v.shape, v.size, v.ndim # properties of the QLeaf v
# An alternate method which pays no attention to auto mode:
va = f.attrs.varname # Get attributes of varname.
va.dtype, va.shape, va.size, va.ndim # Built-in pseudo-attributes.
# You can use va to get or set real attributes of varname as well:
units = va.units # retrieve units attribute
va.centering = 1 # set centering attribute
When you call a QGroup like `f` as a function, you may also pass it a
list of variable names to read only that subset of variables. With
auto-read mode turned off, this results in a sort of "casual subgroup"::
g = f(0, 'vname1', 'vname2', ...)
h = f(1, 'vname1', 'vname2', ...)
ff = f(2, 'vname1', 'vname2', ...)
Here, g is an ADict containing QLeaf and QGroup objects, with nothing at
all read from the file, while h is and ADict containing ndarray and QGroup
objects, while ff is an ADict containing ndarray and ADict objects, with
no references at all to `f`.
If you want to use `f` as a context manager in the manner of other
python file handles, so that the file is closed when you exit the with
statement, just do it::
with openh5(filename, "a") as f:
do_something(f)
# f has been properly flushed and closed on exit from the with.
------
QnD also supports old netCDF style UNLIMITED dimensions, and their
equivalents in HDF5. Unlike the netCDF or HDF5 interface, in QnD the
first (slowest varying) dimension of these arrays maps to a python
list, so we regard the entire collected variable as a list of
ndarrays. The netCDF record number is the index into the list, while
any faster varying dimensions are real ndarray dimensions. This
subtle difference in approach is more consistent with the way these
variables are stored, and also generalizes to the fairly common case
that the array dimensions -- often mesh dimensions -- change from one
record to the next.
To write records using QnD, turn on "recording mode"::
f.recording(1) # 0 for off, 2 for generalized records
f.time = 0.
f.x = x = arange(10)
f.time = 0.5
f.x = x**2
Ordinarily, when you set the value of ``f.time`` or ``f.x``, any
previous value will be overwritten. But in recording mode, each time
you write a variable, you create a new record, saving the new value
without overwriting the previous value. If you want all record
variables to have the same number of records, you need to be sure
you write them each the same number of times. One way to do that is
to use the update function rather than setting them one at a time::
record = ADict()
record.time, record.x = 0., arange(10)
f.recording(1)
f.update(record)
record.time, record.x = 0.5, record.x**2
f.update(record)
You cannot change a variable from not having records to having records
(or from recording mode 1 to recording mode 2); the recording mode in
force when a variable was first declared determines if and how all
future write operations behave.
Reading back record variables introduces "goto mode". Initially, goto
mode is off or None, so that reading a record variable gets the whole
collection of values as a QList, or as an ordinary python list if
auto mode is on::
f.goto(None) # explicitly turn off goto mode
f.auto(2)
times = f.time # python list of f.time values
xs = f.x # python list of f.x arrays
f.auto(0)
time = f.time # QList for the collection of time values
nrecords = len(time)
On the other hand, with goto mode turned on, the fact that `time` and `x`
are record variables disappears, so that your view of ``f.time`` and
``f.x`` matches what it was when you recorded them. You use the goto
function to set the record::
f.goto(0) # first record is 0, like any python list
t = f.time # == 0.
f.goto(1) # set to second record
t = f.time # == 0.5
x = f.x # == arange(10)**2
f.goto(-1) # final record, negative index works like any python list
# You can also pass a keyword to goto, which can be the name of any
# scalar record variable, to go to the record nearest that value.
f.goto(time=0.1) # will select record 0 here
current_record = f.goto() # goto() returns current record number
for r in f.gotoit(): do_something # f.goto(r) is set automatically
Note the ``gotoit()`` method returns an iterator over all records,
yielding the record number for each pass, and setting the goto record
for each pass automatically. You can use ``f.push()`` in a with
statement to temporarily move to a different record.
If you set the recording mode to `2`, the record variables need not
have the same shape or same type from one record to the next (indeed,
they can be a subgroup on one record and an array on another). This
cannot be represented as an UNLIMITED array dimension in an HDF5 or
netCDF file, so the QList variable in QnD will become an HDF5 group in
this case, where variable names in the group are _0, _1, _2, and so on
for QList element 0, 1, 2, and so on (plus a hidden element _ which
identifies this group as a list when it is empty). You can create a
QList of this general type without using recording or goto mode at
all::
f.recording(0) # Turn off recording and goto modes.
f.goto(None)
f.varname = list # Make an empty QList
ql = f.varname
ql.append(value0)
ql.extend([value1, value2, ...])
var = ql[1] # retrieves value1
nelements = len(ql) # current number of elements (also works for QGroup)
ql.auto(0) # a QList has auto mode just like a QGroup
for var in ql: do_something # var depends on ql auto mode setting
------
"""
from __future__ import absolute_import
# The three kinds of objects we support here are:
# 1. QGroup --> dict with str keys
# 2. QList --> list
# 3. QLeaf --> ndarray with dtype.kind in buifcS, with U encoded as S
# and V handled as recarray.
# We attempt to store arbitrary python objects as a QGroup with member
# __class__ = 'modulename.classname' (or just classname for builtin)
# remainder of QGroup is the instance __dict__, unless __class__ has a
# __setstate__, in which case argument to that method stored in the
# __setstate__ variable.
# If __class__ has a __getnewargs__, result is written sub-QGroup with
# _0, _1, ..., which will be passed to the class constructor -- otherwise
# the class starts empty and neither __new__ nor __init__ is called.
# List or tuple objects not distinguished, becoming QList items.
# Dict objects with non-text keys stored with __class__ = 'dict' and
# members _0, _1, _2, etc., where even item is key and following odd item
# is corresponding value.
# We ask the backend to support the variable value None as a QLeaf in
# addition to the arrays, if possible. Arrays with zero length dimensions
# should also be supported if possible.
#
# This qnd module also provides a low level QnDList implementation of the
# QList in terms of the backend QGroup (recording=2) and QLeaf (recording=1)
# implementations, for backends which do not support a native list type.
# The convention is that a generic QList is a QGroup with a blank member _
# and members _0, _1, _2, etc. If the backend supports QLeaf arrays with an
# UNLIMITED leading dimension, these can also be presented as QList
# variables by the QnD API.
# Backend object methods assumed here:
# qnd_group methods: close(), flush(), root()
# isgroup() -> 1, islist() -> 0, isleaf() -> 0
# __len__, __iter__ returns names
# lookup(name) -> None if not found
# declare(name, dtype, shape, unlim) dtype can be dict, list, or None
# attget(vname) --> variable attributes, vname='' for group attributes
# attset(vname, aname, dtype, shape, value) --> variable attributes
# qnd_list methods: root()
# isgroup() -> 0, isleaf() -> 0
# islist() -> 1 if this is UNLIMITED dimension, -> 2 if anonymous group
# __len__, __iter__ returns unread elements
# index(i) -> None if i out of range
# declare(dtype, shape) dtype can be dict, list, or None
# qnd_leaf methods: root()
# isgroup() -> 0, islist() -> 0, isleaf() -> 1
# query() -> dtype, shape, sshape (None, (), () for None)
# read(args)
# write(value, args)
import sys
from weakref import proxy, ProxyTypes
from importlib import import_module
import re
# Major change in array(x) function semantics when x is a list of ragged
# arrays: This now generates a DeprecationWarning as of 1.19, and
# presumably an exception for some future numpy (beyond 1.21). See the
# _categorize function below for the workaround.
from warnings import catch_warnings, simplefilter
from numpy import VisibleDeprecationWarning
from numpy import (dtype, asfarray, asanyarray, arange, interp, where, prod,
ndarray)
from numpy.core.defchararray import encode as npencode, decode as npdecode
from .adict import ItemsAreAttrs, ADict
PY2 = sys.version_info < (3,)
if PY2:
range = xrange
else:
basestring = str
_NOT_PRESENT_ = object()
_us_digits = re.compile(r"^_\d*$")
class QGroup(ItemsAreAttrs):
"""Group of subgroups, lists, and ndarrays.
You reference QGroup items by name, either as ``qg['name']`` like a
dict item, or equivalently as ``qg.name`` like an object attribute.
Use ``[]`` when the item name is an expression or the contents of
a variable; use ``.`` when you know the name of the item. You can
use ``[]`` or ``.`` to both get and set items in the QGroup. To
read the entire group into a ADict, call it like a function, ``qg()``;
you may supply a list of names to read only a subset of items.
A QGroup acts like a dict in many ways::
if 'name' in qg: do_something
for name in qg: do_something
item_names = list(qg) # qg.keys() exists but is never necessary
for name, item in qg.items(): do_something
qg.update({name0: val0, ...}, [(name1, val1), ...], name2=val2, ...)
value = qg.get('name', default)
A QGroup has several possible states or modes:
1. Recording mode, turned on by ``qg.recording(1)`` and off by
``qg.recording(0)``, affects what happens when you set group items.
With recording mode off, setting an item to an array creates the
item as an array if its name has not been used, or otherwise writes
its new value, requiring it be compatible with the dtype and shape
of the previous declaration. With recording mode on, setting an
item for the first time creates a QList and sets its first element
to the given value, and subsequently setting that item appends the
given value to the existing QList. There is also a recording mode
``qg.recording(2)`` in which subsequent values need not match the
dtype or shape of the first item. You may not switch recording
modes for a given item; the mode in effect when an item is first
created governs the behavior of that item.
2. Goto mode, in which you set a current record with ``qg.goto(rec)``.
Any item you retrieve or query which is a QList retrieves or queries
the element with 0-origin index ``rec`` instead of the whole QList.
You turn off goto mode with ``qg.goto(None)``. There is also a
``qg.gotoit()`` function which returns an iterator over all the
records (generally the longest QList in ``qg``).
3. Auto mode, turned on by ``qg.auto(1)`` and off by ``qg.auto(0)``,
in which getting any item reads and returns its value, rather than
a QLeaf object. There is also a ``qg.auto(2)`` mode in which
the auto-read feature applies to any QGroup or QList (if goto mode
is off) items recursively.
A QGroup has `push` and `drop` methods which can be used to save and
restore all its modes. The `drop` method is called implicitly upon
exit from a with statement, so you can use the QGroup as a context
manager::
with openh5('myfile.h5', 'a') as qg:
do_something(qg)
with qg.push():
qg.goto(rec)
do_something_else(qg)
# qg restored to goto mode state before with.
do_even_more(qg)
# qg flushed and closed upon exit from with clause that has no
# no corresponding push
Attributes
----------
islist
isleaf
Always 0.
isgroup
Always 1.
dtype
Always ``dict``, the builtin python type.
shape
ndim
size
sshape
Always None.
"""
__slots__ = "_qnd_group", "_qnd_state", "_qnd_cache", "__weakref__"
isgroup = 1
islist = isleaf = 0
dtype, shape, ndim, size, sshape = dict, None, None, None, None
def __init__(self, item=None, state=None, auto=None, recording=None,
goto=None):
object.__setattr__(self, "_qnd_group", item)
object.__setattr__(self, "_qnd_state",
QState() if state is None else QState(state))
object.__setattr__(self, "_qnd_cache", None)
state = self._qnd_state
if auto is not None:
state.auto = int(auto)
if recording is not None:
state.recording = int(recording)
if goto is not None:
state.goto = int(goto)
def recording(self, flag):
"""Change recording mode for this QGroup.
With recording mode off, writing to a variable overwrites that
variable. With recording mode on, new variables are declared as
a QList and subsequent write operations append a new element to
this QList instead of overwriting any previously stored values.
In netCDF parlance, variables declared in recording mode are
record variables. Writing to a variable declared when recording
mode was off will always overwrite it; once declared, you cannot
convert a variable to a QList simply by turning on recording mode.
See goto mode for handling record variable read operations.
A `flag` value of 0 turns off recording mode. A `flag` of 1 turns
on recording mode, utilizing a trailing UNLIMITED array dimension
in netCDF or HDF5 parlance, which promises that all values written
will have the same dtype and shape. A `flag` of 2 places no
restrictions on the dtype or shape of the QList elements; such
an unrestricted QList resembles an anonymous QGroup.
"""
self._qnd_state.recording = int(flag)
def goto(self, record=_NOT_PRESENT_, **kwargs):
"""Set the current record for this QGroup, or turn off goto mode.
Pass `record` of None to turn off goto mode, so that QList
variables appear as the whole QList. Setting an integer `record`
makes any QList variable appear to be the specified single
element. A `record` value may be negative, with the usual python
interpretation for a negative sequence index. If different QList
variables have different lengths, the current `record` may be
out of range for some variables but not for others. (Hence using
goto mode may be confusing in such situations.)
Note that you can temporarily set goto mode using a with clause.
This `goto` method also accepts a keyword argument instead of a
`record` number. The keyword name must match the name of a
QList variable in this QGroup, whose vaules are scalars. This
will set `record` to the record where that variable is nearest
the keyword value. Thus, ``goto(time=t)`` selects the record
nearest `time` t.
As a special case, you can get the current record number by calling
`goto` with neither a `record` nor a keyword::
current_record = qg.goto()
"""
if kwargs:
if record is not _NOT_PRESENT_:
raise TypeError("either use keyword or record index")
if len(kwargs) != 1:
raise TypeError("only one keyword argument accepted")
name, val = list(kwargs.items())[0]
records, values = self._qnd_goto_recs(name)
val = float(val)
n = values.size
if n > 1:
# result of interp is float scalar in older numpy versions
# rather than numpy.float64, cannot use astype
i = int(interp(val, values, arange(n) + 0.5))
else:
i = 0
record = records[min(i, n-1)]
elif record is _NOT_PRESENT_:
return self._qnd_state.goto
elif record is not None:
record = int(record)
self._qnd_state.goto = record
def _qnd_goto_recs(self, name):
cache = self._qnd_cache
values = cache.get(name) if cache else None
if values is None:
item = self._qnd_group.lookup(name)
if item is not None and item.islist():
with self.push():
self.goto(None)
self.auto(2)
values = self[name]
values = asfarray(values)
if values.ndim != 1 or values.size < 1:
values = None
if values is None:
raise TypeError("{} is not scalar record variable"
"".format(name))
values = _monotonize(values)
if not cache:
cache = {}
object.__setattr__(self, "_qnd_cache", cache)
cache[name] = values
return values # returned by _monotonize
def gotoit(self, name=None):
"""Iterate over goto records, yielding current record.
Optional `name` argument is the name of a `goto` method keyword,
which may implicitly remove records corresponding to non-monotonic
changes of that variable. If `name` is a decreasing variable,
the record order will be reversed.
As a side effect, the current record of this QGroup will be set
during each pass. If the loop completes, the original goto state
will be restored, but breaking out of the loop will leave the
goto record set.
"""
if name is not None:
records, _ = self._qnd_goto_recs(name)
else:
# scan through all variables to find largest recrod count
nrecords = 0
for name in self._qnd_group:
item = self._qnd_group.lookup(name)
if item.islist():
n = len(item)
if n > nrecords:
nrecords = n
records = arange(nrecords)
r0 = self._qnd_state.goto
for r in records:
self._qnd_state.goto = r
yield r
self._qnd_state.goto = r0
def auto(self, recurse):
"""Set the auto-read mode for this QGroup.
In auto-read mode, getting an item returns its value, rather than a
QLeaf. If the item is a QGroup or QList, that is returned if
the `recurse` value is 1, whereas if `recurse` is 2, the QGroup
or QList variables will be read recursively. Setting `recurse` to
0 turns off auto-read mode entirely.
Note that you can temporarily set auto mode using a with clause.
"""
self._qnd_state.auto = int(recurse)
def push(self):
"""Push current recording, goto, and auto mode onto state stack."""
self._qnd_state.push()
return self
def drop(self, nlevels=None, close=False):
"""Restore previous recording, goto, and auto mode settings.
Default ``drop()`` drops one pushed state, ``drop(n)`` drops n,
``drop('all')`` drops all pushed states. By default, `drop` is
a no-op if no pushed states to drop, ``drop(close=1)`` closes
the file if no pushed states to drop, which is called implicitly
on exit from a with suite.
"""
if nlevels is None:
nlevels = 1
elif nlevels == "all":
nlevels = len(self._qnd_state) - 3
while nlevels >= 0:
if self._qnd_state.drop() and close:
self.close()
nlevels -= 1
def close(self):
"""Close associated file."""
this = self._qnd_group
if this is not None:
for nm in ["_qnd_group", "_qnd_state", "_qnd_cache"]:
object.__setattr__(self, nm, None)
this.close()
def flush(self):
"""Flush associated file."""
this = self._qnd_group
if this is not None:
this.flush()
def root(self):
"""Return root QGroup for this item."""
qgroup = self._qnd_group
root = qgroup.root()
if root is qgroup:
return self
state = QState(self._qnd_state) # copy
return QGroup(root, state)
def attrs(self):
"""Return attribute tree for variables in this group."""
return QAttributes(self._qnd_group)
def get(self, key, default=None):
"""like dict.get method"""
try:
return self[key]
except KeyError:
return default
def items(self, auto=None):
"""like dict.items method (iteritems in python2)"""
if auto == self._qnd_state.auto:
auto = None
for name in self._qnd_group:
if auto is None:
value = self[name]
else:
with self.push():
self.auto(auto)
value = self[name]
yield name, value
def __repr__(self):
this = self._qnd_group
if this is not None:
return "<QGroup with {} items>".format(len(this))
else:
return "<closed QGroup>"
def __len__(self):
return len(self._qnd_group)
def __contains__(self, name):
return self._qnd_group.lookup(name) is not None
def __iter__(self):
return iter(self._qnd_group)
keys = __iter__
__enter__ = push
def __exit__(self, etype, evalue, etrace):
self.drop(close=1)
def __call__(self, auto=None, *args):
# Make qg() shorthand for qg[()], returning whole group.
if auto == self._qnd_state.auto:
auto = None
if auto is None:
value = self[args]
else:
with self.push():
self.auto(auto)
value = self[args]
return value
def __getitem__(self, key):
if not isinstance(key, tuple):
key = (key,)
if not key: # qg[()] retrieves entire group
key = (list(self._qnd_group),)
name, args = key[0], key[1:]
if isinstance(name, basestring):
if "/" in name:
if name.startswith("/"):
return self.root()[(name[1:],) + args]
name = name.split("/")
name, args = name[0], tuple(name[1:]) + args
else:
# qg[["name1", "name2", ...], slice0, ...]
# returns [qg.name1[slice0, ...], qg.name2[slice0, ...], ...]
items = []
for key in name:
if not isinstance(key, basestring):
# Prevent recursive name lists inside name lists.
raise KeyError("expecting item name or list of item names")
items.append((key, self[(key,)+args]))
return ADict(items)
item = self._qnd_group.lookup(name)
if item is None:
raise KeyError("no such item in QGroup as {}".format(name))
state = self._qnd_state
auto, recording = state.auto, state.recording
record = state.goto
if item.islist():
item = QList(item, auto)
if record is None:
if not args and auto <= 1:
return item
else:
args = (record,) + args
return item[args] if args else item[:]
if item.isleaf():
return _reader(item, args) if args or auto else QLeaf(item)
# Item must be a group, set up inherited part of state.
# Note that goto record was not used, so subgroup inherits it.
cls = item.lookup("__class__")
if cls is not None and auto:
return _load_object(item, cls)
item = QGroup(item, auto=auto, recording=recording, goto=record)
return item() if auto > 1 else item
def __setitem__(self, key, value):
name, args = (key[0], key[1:]) if isinstance(key, tuple) else (key, ())
if not isinstance(name, basestring):
name = "/".join(name)
if "/" in name:
if name.startswith("/"):
item = self.root()
name = name[1:]
else:
path, name = name.rsplit("/", 1)
with self.push():
self.auto = 0
item = self[path]
item[(name,) + args] = value
return
dtype, shape, value = _categorize(value)
state = self._qnd_state
recording, record = state.recording, state.goto
this = self._qnd_group
item = this.lookup(name)
if item is None:
# Declare item now.
if args:
raise KeyError("partial write during declaration of {}"
"".format(name))
if recording:
# numpy (1.16.4) misfeature dtype('f8') tests == None
# (other dtypes are != None as expected), so cannot
# ask if (list, dict, object, None) contains dtype
if recording != 1 or (dtype is None or
dtype in (list, dict, object)):
# Declare an anonymous-group-style list.
item = this.declare(name, list, None)
else: # Declare item with UNLIMITED dimension.
item = this.declare(name, dtype, shape, 1)
# item now an empty list
elif dtype == dict:
item = this.declare(name, dict, None)
if value:
QGroup(item).update(value)
return
elif dtype == list:
item = this.declare(name, list, None)
if value:
QList(item).extend(value)
return
elif dtype == object:
item = this.declare(name, dict, None)
_dump_object(item, value)
return
else:
item = this.declare(name, dtype, shape)
if value is None:
return
while item.islist():
if recording:
if args:
raise KeyError("partial write while recording {}"
"".format(name))
record = len(item) # index of next record
item = item.declare(dict if dtype == object else dtype,
shape) # declare the next item
state.goto = record
if dtype is None:
return
if dtype == list:
if value:
QList(item).extend(value)
return
if dtype == dict:
if value:
QGroup(item).update(value)
return
if dtype == object:
_dump_object(item, value)
return
break
if record is None:
if args:
record, args = args[0], args[1:]
elif dtype == list and not value:
# qg.lst = list is no-op for existing QList
return
else:
raise ValueError("cannot set existing QList {}, use "
"append, goto, or recording".format(name))
item = item.index(record)
if item is None:
raise KeyError("no such item in QList as {}".format(record))
record = None # makes no sense to use record recursively
recording = 0
if item.isgroup():
if args:
QGroup(item)[args] = value
return
if dtype == dict and not value:
# qg.grp = {} is no-op for existing QGroup
return
raise ValueError("cannot set existing QGroup {}, use update"
"".format(name))
# item is a leaf (neither a list nor a group)
if dtype in (dict, list, object):
raise TypeError("type mismatch in QLeaf {}".format(name))
elif item.query()[0] is None:
# None QLeaf objects need not support write() method.
if dtype is None and not args:
return
raise TypeError("QLeaf {} declared as None".format(name))
item.write(value, args)
def _monotonize(values):
# This function ensures values are monotonically increasing,
# searching backwards for decreasing sequences.
decreasing = values[-1] < values[0]
if decreasing:
values = -values
mask = values == values
vnext = values[-1]
for i in range(-2, -values.size-1, -1):
v = values[i]
if v >= vnext:
mask[i] = False
else:
vnext = v
records, values = where(mask)[0], values[mask]
if decreasing:
# Reverse both records and values so latter is strictly increasing.
# In this way, values can always be used as x in the interp function.
records = records[::-1]
values = -values[::-1]
return records, values
def _categorize(value, attrib=False):
# This function defines the various sorts of values QnD recognizes:
# 1. None dtype = shape = value = None
# 2. list [, seq] dtype = list, shape = None, value = [] or seq
# 3. {...} dtype = dict, shape = None, value = {...}
# 4. type|dtype [, shape] dtype = dtype(type), shape, value = None
# 5. array_like value.dtype, value.shape, value = asanyarray(...)
# 6. object or dtype('O') dtype = object, shape = None, value
if value is None:
dtype = shape = None
elif isinstance(value, (type, _dtype)):
if value == list:
dtype, shape, value = list, None, []
else:
dtype, shape, value = _dtype(value), (), None
elif isinstance(value, dict):
if all(isinstance(key, basestring) for key in value):
dtype = dict
else:
dtype = object
shape = None
elif (isinstance(value, tuple) and len(value) == 2+bool(attrib) and
isinstance(value[0], (type, _dtype))):
dtype = value[0]
if dtype is not None and dtype not in (list, dict, object):
dtype = _dtype(dtype) # no-op if already a dtype
if not attrib:
if dtype == list:
value, shape = value[1], None
else:
value, shape = None, tuple(value[1])
else:
shape, value = value[1:]
else:
# The array(a) constructor used to accept essentially any argument a.
# At numpy 1.19 it began issues a VisibleDeprecationWarning when a
# was a list whose items were of differing lengths (or shapes).
# Prior to that, it simply produced an ndarray of dtype object whose
# items were the python entities in the original list. This is the
# behavior we want in QnD, so we do not want to print a warning.
# Moreover, when the feature is eventually removed, this case will
# throw a (currently unknown) exception, which we need to avoid.
# Passing the dtype=object keyword to the array() constructor
# produces the pre-1.19 behavior (as far as I can tell), but of
# course we cannot do that here.
# The following code must work in three cases: (1) pre-1.19 numpy,
# (2) numpy 1.19-1.21 (at least) which print unwanted warnings without
# special treatment, and (3) future numpy which throws an error
# without the dtype=object keyword. Since QnD must always run in
# all three cases, there is no way to remove the protection against
# the deprecation wawrning, even when numpy move past it.
with catch_warnings():
# Make case 2 (numpy 1.19) behave like case 3 (future numpy)
simplefilter("error", VisibleDeprecationWarning)
try:
v = asanyarray(value)
except Exception:
# As far as I can tell, the original numpy array() constructor
# would accept any argument whatsoever, returning either a
# scalar or 1D array of type object if its argument could not
# be interpreted. Therefore I believe only a ragged array
# argument reaches this point, and we can return to the
# original behavior by specifying dtype explicitly.
# Nevertheless, we protect against a possible exception.
simplefilter("ignore", VisibleDeprecationWarning)
try:
v = asanyarray(value, dtype=object)
except Exception:
return object, None, value
dtype, shape = v.dtype, v.shape
if dtype.kind == "O":
if not shape:
dtype, shape = object, None
else:
# Note that this does not work as expected when the contents
# of the list were themselves lists (not ndarrays) of numbers
# of varying lengths, since the asanyarray function will not
# convert those inner lists to ndarrays. Hence v.tolist() is
# really the same as the original value here.
# A QnD user must ensure that the inner lists are ndarrays if
# that is what they intended.
dtype, shape, value = list, None, v.tolist()
else:
value = v
if isinstance(dtype, _dtype):
kind = dtype.kind
if kind == "U":
if value is not None:
value = npencode(value, "utf8") # convert to 'S'
dtype = value.dtype
elif kind == "O":
raise ValueError("numpy dtype.kind 'O' not supported")
return dtype, shape, value
def _reader(item, args):
value = item.read(args)
dtyp = getattr(value, "dtype", None)
if dtyp is not None:
kind = dtyp.kind
if kind == "V":
if dtyp.names:
# The recarray has some significant misfeatures. The worst
# is that it will not print (repr or str) if it is aligned,
# or simply if the itemsize does not match what it expects.
# value = value.view(recarray)
pass
elif kind in "SU":
if not PY2:
if dtyp.kind == "S":
try:
value = npdecode(value, "utf8")
except UnicodeDecodeError:
value = npdecode(value, "latin1")
if isinstance(value, ndarray) and not value.shape:
value = value[()]
return value
_dtype = dtype # to allow access in methods using local name dtype
_builtin_module = str.__class__.__module__
def _dump_object(item, value):
# item.isgroup() is true, as yet empty, value is an object
item = QGroup(item)
if isinstance(value, dict):
# special case for dict with non-text keys
item["__class__"] = "dict"
items = value.iteritems if PY2 else value.items
for i, (k, v) in enumerate(items()):
item["_" + str(2*i)] = k
item["_" + str(2*i+1)] = v
else:
cls = value.__class__
cname, module = cls.__name__, cls.__module__
if module is not None and module != _builtin_module:
cname = ".".join((module, cname))
item["__class__"] = cname
# Note that __getnewargs_ex__ is python3 only, so we skip
# it here. The recommendation in the python3 docs is to use
# the _ex version only if __new__ requires keyword arguments.
# Similarly, we do not support the python2-only __getinitargs__.
mydict = getattr(value, "__dict__", None)
getnew = getattr(value, "__getnewargs__", None)
setter = hasattr(value, "__setstate__")
getter = getattr(value, "__getstate__", None)
if getnew:
args = getnew()
elif not getter and mydict is None:
# We cannot handle the intricacies of the full
# pickle/copyreg protocol, but by handling one simple
# case of __reduce__ we can pick up both slice() and set()
# objects, which is worthwhile.
# Virtually all objects have a __reduce__ method, which
# will often raise a TypeError. Go ahead and blow up here.
getnew = value.__reduce__()
if getnew[0] != cls or any(v is not None for v in getnew[2:]):
raise TypeError("QnD cannot dump class {}".format(cname))
args = getnew[1]
if getnew:
item["__getnewargs__"] = {}
subdir = item["__getnewargs__"]
for i, arg in enumerate(args):
subdir["_" + str(i)] = arg
value = getter() if getter else mydict
if setter:
# __setstate__ only called if __getstate__ not false
# Never convert lists or tuples to ndarrays here. (??)
if value:
if isinstance(value, (list, tuple)):
value = list, value
item["__setstate__"] = value
elif value:
item.update(value)
def _load_object(qgroup, cls):
# If you fail here, you can still read the group with ADict(qgroup)
# which avoids this special treatment.
cls = cls.read() # assume QLeaf yields a text string
if not isinstance(cls, basestring):
raise TypeError("Expecting __class__ member of QGroup to be text.")
qgroup = QGroup(qgroup, auto=2)
if cls == "dict":
obj = {}
names = list(name for name in qgroup if name != "__class__")
if len(names) & 1:
names[0] = "" # die in first pass
key = None
for i, n in enumerate(sorted(names)):
if "_{}".format(i) != n:
raise TypeError("QGroup with __class__ dict error")
value = qgroup[n]
if i & 1:
obj[key] = value
else:
key = value
else:
cls = cls.rsplit(".", 1)
try:
module = (import_module(cls[0]) if len(cls) > 1 else
sys.modules[_builtin_module])
cls = getattr(module, cls[-1])
except (ImportError, AttributeError):
# If the named module does not exist or does not have
# the specified class, just return an ADict.
return ADict(qgroup)
args = qgroup.get("__getnewargs__")
if args is not None:
args = [args["_" + str(i)] for i in range(len(args))]
obj = cls(*args)
else:
obj = object.__new__(cls)
args = qgroup.get("__setstate__")
if args is not None:
obj.__setstate__(args)
else:
names = list(name for name in qgroup
if name not in ["__class__", "__getnewargs__"])
if names:
obj.__dict__.update(qgroup(2, names))
return obj
class QState(list):
"""State information for a QGroup."""
__slots__ = ()
def __init__(self, recording=0, goto=None, auto=0):
if hasattr(recording, "__iter__"):
seq = tuple(recording)[:3]
else:
if goto is not None:
goto = int(goto)
recording, auto = int(recording), int(auto)
seq = recording, goto, auto
super(QState, self).__init__(seq)
@property
def recording(self):
return self[0]
@recording.setter
def recording(self, value):
self[0] = int(value)
@property
def goto(self):
return self[1]
@goto.setter
def goto(self, value):
self[1] = None if value is None else int(value)
@property
def auto(self):
return self[2]
@auto.setter
def auto(self, value):
self[2] = int(value)
def push(self):
state = self[:3]
self.append(state)
def drop(self):
if len(self) < 4:
return 1
self[:3] = super(QState, self).pop()
return 0
class QList(object):
"""List of subgroups, lists, and ndarrays.
You reference QList elements by index or slice, like ordinary list
elements, including the python convention for negative index values.
To read the entire list, call it like a function, ``ql()``, which is
equivalent to ``ql[:]``. A QList has __iter__, append, and extend::
for element in ql: do_something
ql.append(value)
ql.extend(iterable)
In general, the elements of a QList are unrelated to one another;
it's like an anonymous QGroup. However, a common use case is to
represent a so-called UNLIMITED dimension in netCDF or HDF5. In
this case, every element will have the same dtype and shape. The
`islist` method returns 1 for this special restricted case, while
it returns 2 for an unrestricted QList. Whether this makes any
difference depends on the underlying file format. The QGroup
`recording` and `goto` methods allow you to access QList items in
the group transparently, as if they were individual elements at
a current record or index.
Attributes
----------
isgroup
isleaf
Always 0.
islist
This is 1 if this QList is a record array declared in recording
mode 1, and 2 if it was declared in any other way (including as a
record array in recording mode 2).
dtype
Always ``list``, the builtin python type.
shape
ndim
size
sshape
Always None.
"""
__slots__ = "_qnd_list", "_qnd_auto"
isgroup = isleaf = 0
dtype, shape, ndim, size, sshape = list, None, None, None, None
def __init__(self, item=None, auto=0):
object.__setattr__(self, "_qnd_list", item)
self.auto(auto)
def auto(self, recurse):
"""Set auto read mode, analogous to QGroup.auto method."""
object.__setattr__(self, "_qnd_auto", int(recurse))
def root(self):
"""Return root QGroup for this item."""
return QGroup(self._qnd_list.root(), QState(auto=self._qnd_auto))
@property
def islist(self):
return self._qnd_list.islist()
def extend(self, iterable):
"""append multiple new elements to this QList"""
for value in iterable:
self.append(value)
def append(self, value):
"""append a new element to this QList"""
dtype, shape, value = _categorize(value)
item = self._qnd_list.declare(dtype, shape)
if dtype is None:
return
if dtype == list:
if value:
QList(item).extend(value)
return
if dtype == dict:
if value:
QGroup(item).update(value)
return
if dtype == object:
_dump_object(item, value)
return
if value is not None:
item.write(value, ())
# Being unable to do partial write on declaration is consistent
# with behavior of QGroup __setitem__. The way to get it is to
# make a declaration with value = (type, shape) instead of an
# actual value in both cases.
def __repr__(self):
return "<QList with {} items>".format(len(self))
def __len__(self):
return len(self._qnd_list)
def __iter__(self):
auto = self._qnd_auto
recurse = auto > 1
for item in self._qnd_list:
if item.isgroup():
cls = item.lookup("__class__") if auto else None
if cls is None:
item, readit = QGroup(item), recurse
else:
item, readit = _load_object(item, cls), 0
elif item.islist():
item, readit = QList(item), recurse
else:
item, readit = QLeaf(item), auto
yield item() if readit else item
def __call__(self):
return self[:]
def __getitem__(self, key):
if not isinstance(key, tuple):
key = (key,)
index, args = key[0], key[1:]
this = self._qnd_list
if isinstance(index, slice):
index = range(*index.indices(len(this)))
if hasattr(index, "__iter__"):
return [self[(i,) + args] for i in index]
item = this.index(index)
if item is None:
raise IndexError("QList index {} out of range".format(index))
auto = self._qnd_auto
if item.islist():
item = QList(item, auto)
if args:
return item[args]
return item[:] if auto > 1 else item
if item.isleaf():
return _reader(item, args) if args or auto else QLeaf(item)
# Item must be a group, set up inherited part of state.
# Note that goto record was not used, so subgroup inherits it.
cls = item.lookup("__class__")
if cls is not None and auto:
return _load_object(item, cls)
item = QGroup(item, auto=auto)
return item() if auto > 1 else item
def __setitem__(self, key, value):
if not isinstance(key, tuple):
key = (key,)
index, args = key[0], key[1:]
if isinstance(index, slice) or hasattr(index, "__iter__"):
raise TypeError("QList does not support multi-element setitem")
dtype, shape, value = _categorize(value)
item = self._qnd_list.index(index)
if item is None:
raise IndexError("QList index {} out of range".format(index))
if item.islist() or item.isgroup():
idtype = list if item.islist() else dict
if idtype == dtype and not value:
return
raise TypeError("cannot set existing QGroup or QList")
# item is a QLeaf
if item.query()[0] is None:
if dtype is None and not args:
return
raise TypeError("QLeaf {} declared as None".format(index))
# Work around numpy (1.16.4) misfeature dtype('f8') tests == None:
if dtype is None or dtype in (list, dict, object):
raise TypeError("type mismatch setting QLeaf {}".format(index))
item.write(value, args)
class QLeaf(object):
"""An ndarray or None stored in a file.
You can read the data by calling the leaf instance ``ql()``, or by
indexing it ``ql[:]``, which also provides a means for partial reads.
A QLeaf has `dtype`, `shape`, `ndim`, and `size` properties with the
same meanings as an ndarray (except None has all these properties
equal None). Additionally, the `sshape` property may return a symbolic
shape with optional strings in the tuple representing dimension names.
You can write data by calling ``ql(value)``, or by setting a slice,
which provides a means for partial writes.
Attributes
----------
isgroup
islist
Always 0.
isleaf
Always 1.
dtype
The numpy dtype of this ndarray, or None if this leaf is None.
This is the dtype in memory, not necessarily as stored.
shape
ndim
size
The numpy ndarray properties, or None if this leaf is None.
sshape
A symbolic shape tuple, like shape except dimension lengths may be
type str instead of int.
"""
__slots__ = "_qnd_leaf",
isgroup = islist = 0
isleaf = 1
def __init__(self, item):
object.__setattr__(self, "_qnd_leaf", item)
def root(self):
"""Return root QGroup for this item."""
return QGroup(self._qnd_leaf.root())
def __call__(self, value=_NOT_PRESENT_):
if value is _NOT_PRESENT_:
return self[()]
else:
self[()] = value
def __getitem__(self, key):
return _reader(self._qnd_leaf,
key if isinstance(key, tuple) else (key,))
def __setitem__(self, key, value):
self._qnd_leaf.write(value, key if isinstance(key, tuple) else (key,))
@property
def dtype(self):
return self._qnd_leaf.query()[0]
@property
def shape(self):
return self._qnd_leaf.query()[1]
@property
def ndim(self):
return len(self._qnd_leaf.query()[1])
@property
def size(self):
shape = self._qnd_leaf.query()[1]
return prod(shape) if shape else 1
@property
def sshape(self):
_, s, ss = self._qnd_leaf.query()
return ss if ss else s
class QAttributes(ItemsAreAttrs):
"""Attributes for a QGroup and its members.
Usage::
qa = qgroup.attrs()
qa0 = qa.vname # for variables in this group, or qa['vname']
qa1 = qa._ # or qa[''] for attributes of this group
value = qa0.aname # or qa0['aname'], None if no such attribute
qa0.aname = value # or qa0['aname'] = value
qa0.aname = dtype, shape, value
if 'aname' in qa0: do_something
for aname in qa0: do_something
for aname, value in qa0.items(): do_something
"""
__slots__ = "_qnd_parent", "_qnd_vname", "__weakref__"
def __init__(self, parent, vname=None):
if not isinstance(parent, ProxyTypes):
parent = proxy(parent)
object.__setattr__(self, "_qnd_parent", parent)
object.__setattr__(self, "_qnd_vname", vname)
def __repr__(self):
vname = self._qnd_vname
if vname is None:
return "<QAttributes accessor for QGroup items>"
elif not vname:
return "<QAttributes for whole QGroup>"
return "<QAttributes for item {}>".format(vname)
def get(self, key, default=None):
parent, vname = self._qnd_parent, self._qnd_vname
if vname is None:
# Get group attribute, even though that is inconsistent...
# Should we implement matching set() or just let it go?
vname = ""
else:
parent = parent._qnd_parent
return parent.attget(vname).get(key, default)
def keys(self):
group, vname = self._qnd_group_vname()
return iter(group.attget(vname))
def items(self):
group, vname = self._qnd_group_vname()
return group.attget(vname).items()
def _qnd_group_vname(self):
parent, vname = self._qnd_parent, self._qnd_vname
if vname is None:
raise TypeError("need to specify QGroup item name")
return parent._qnd_parent, vname
def __getattr__(self, name):
vname = self._qnd_vname
if vname is None or name not in self._qnd_builtins_:
return super(QAttributes, self).__getattr__(name)
# Handle builtin pseudo-attributes here; they do not show up
# in the actual attribute dict referenced by [key].
# Can use dtype_, shape_, etc. attributes if real attributes
# have these names.
item = self._qnd_parent._qnd_parent.lookup(vname)
if item.isgroup():
return dict if name == "dtype" else None
if item.islist():
return list if name == "dtype" else None
dsss = item.query()
if dsss[0] is None:
return None
if name == "ndim":
return len(dsss[1])
if name == "size":
return prod(dsss[1])
return dsss[self._qnd_builtins_.index(name)]
_qnd_builtins_ = ["dtype", "shape", "sshape", "size", "ndim"]
def __getitem__(self, key):
parent, vname = self._qnd_parent, self._qnd_vname
if vname is None:
# key is vname
item = parent.lookup(key) if key else True
if item is None:
raise KeyError("no such item in QGroup as {}".format(key))
return QAttributes(self, key)
return parent._qnd_parent.attget(vname).get(key)
def __setitem__(self, key, value):
group, vname = self._qnd_group_vname()
# Note that value can be (dtype, shape, value) to be explicit.
dtype, shape, value = _categorize(value, 1)
if dtype in (list, dict, object):
raise TypeError("an attribute cannot be a dict or list")
group.attset(vname, key, dtype, shape, value)
def __iter__(self):
group, vname = self._qnd_group_vname()
return iter(group.attget(vname))
def __contains__(self, name):
group, vname = self._qnd_group_vname()
return name in group.attget(vname)
def __len__(self):
group, vname = self._qnd_group_vname()
return len(group.attget(vname))
class QnDList(object):
"""Implmentation of a low level QList type using QGroup.
A backend which has no direct support for QList objects can use
this to produce a pseudo-list, which is a group with member names
_ (None or a single signed or unsigned byte, value never read) and
names _0, _1, _2, etc.
This implementation will handle both UNLIMITED index-style lists
made with recording = 1 (that is group.declare with unlim flag)
and general lists. If UNLIMITED dimensions are supported, pass the
QnDLeaf to this constructor::
item = QnDList(QnDLeaf) # if at least one record exists
item = QnDList(QnDLeaf, 1) # if no records yet exist
Use the fromgroup constructor to check if a QnDGroup is a pseudo-list::
item = QnDList.fromgroup(QnDGroup)
"""
__slots__ = "_qnd_parent", "_qnd_current",
def __init__(self, parent, empty=None):
self._qnd_parent = parent
current = empty
if empty is not None:
if parent.isgroup():
parent.declare("_", None, ())
elif not isinstance(parent, QnDList):
current = -1
self._qnd_current = current
@staticmethod
def fromgroup(parent):
item = parent.lookup("_")
if item is not None:
if all(_us_digits.match(name) for name in parent):
return QnDList(parent) # parent is a pseudo-list
return parent
def parent(self):
parent = self._qnd_parent
return parent._qnd_parent if isinstance(parent, QnDList) else parent
@staticmethod
def isgroup():
return 0
def isleaf(self):
return int(isinstance(self._qnd_parent, QnDList))
def islist(self):
if self._qnd_parent.isgroup():
return 2
return int(not isinstance(self._qnd_parent, QnDList))
def root(self):
return self._qnd_parent.root()
# len, iter, index, declare are list methods, assume isleaf() false
def __len__(self):
if self._qnd_parent.isgroup():
return len(self._qnd_parent) - 1 # subtract _ item
if self._qnd_current is not None and self._qnd_current < 0:
return 0 # leaf.query() probably returns 1
return self._qnd_parent.query()[1][0]
def __iter__(self):
parent = self._qnd_parent
if parent.isgroup():
for i in range(len(self)):
yield parent.lookup("_" + str(i))
else:
for i in range(len(self)):
yield QnDList(self, i)
def index(self, ndx):
nrecs = max(len(self), 1)
if ndx < 0:
ndx = ndx + nrecs
if ndx < 0 or ndx >= nrecs:
return None # out of range, let caller raise any exception
parent = self._qnd_parent
if parent.isgroup():
return parent.lookup("_" + str(ndx))
return QnDList(self, ndx)
def declare(self, dtype, shape):
parent = self._qnd_parent
nrecs = len(self)
if parent.isgroup():
return parent.declare("_" + str(nrecs), dtype, shape)
return QnDList(self, nrecs)
# query, read, write are leaf methods, assume isleaf() true
def query(self):
qndlist = self._qnd_parent
dtype, shape, sshape = qndlist._qnd_parent.query()
shape = shape[1:]
if sshape:
sshape = sshape[1:]
return dtype, shape, sshape
def read(self, args=()):
current = self._qnd_current
qndlist = self._qnd_parent
check = qndlist._qnd_current
if check is not None and check < 0:
raise TypeError("attempt to read from empty UNLIMITED array")
return qndlist._qnd_parent.read((current,) + args)
def write(self, value, args=()):
qndlist = self._qnd_parent
qndlist._qnd_parent.write(value, (self._qnd_current,) + args)
# Turn off special empty list state (if on):
qndlist._qnd_current = None
|
py | b402745b3667a29214a0f64613439f4161def820 | from abc import ABC, abstractmethod
from typing import Any, Union, TypeVar
KeysSet = TypeVar('KeysSet', tuple[Union[str, int], ...], list[Union[str, int], ...])
class Base(ABC):
_data: Any
@abstractmethod
async def setdefault(self, cog: str, keys: KeysSet, value: Any = None) -> None:
"""Initialize a sub_key if not already initialized"""
...
@abstractmethod
async def set(self, cog: str, keys: KeysSet, value: Any) -> None:
"""Set a value to a sub_key"""
...
@abstractmethod
async def get(self, cog: str, keys: KeysSet) -> Any:
"""Get the value knowing his location"""
...
@abstractmethod
async def delete(self, cog: str, keys: KeysSet) -> None:
"""Delete a sub_key and its value"""
...
@abstractmethod
def register(self, cog: str, *, schema: dict, override_schema: bool) -> None:
"""Initialize a main_key in data if not already initialized"""
...
@abstractmethod
async def unregister(self, cog: str) -> None:
"""Delete main_key and its sub_keys from data"""
...
@abstractmethod
async def dump(self) -> None:
"""Save data"""
...
async def _load(self) -> None:
"""Load data"""
raise NotImplementedError
|
py | b40275b583a0fb554bc642b719edbff525679253 | """JciHitachi integration."""
import asyncio
import async_timeout
import logging
from datetime import timedelta
from queue import Queue
from typing import NamedTuple
import voluptuous as vol
from homeassistant.const import CONF_DEVICES, CONF_EMAIL, CONF_PASSWORD
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
UpdateFailed
)
from JciHitachi.api import JciHitachiAPI
_LOGGER = logging.getLogger(__name__)
DOMAIN = "jcihitachi_tw"
API = "api"
COORDINATOR = "coordinator"
UPDATE_DATA = "update_data"
UPDATED_DATA = "updated_data"
CONF_RETRY = "retry"
DEFAULT_RETRY = 5
DEFAULT_DEVICES = []
PLATFORMS = ["climate", "sensor"]
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_EMAIL): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_RETRY, default=DEFAULT_RETRY): cv.positive_int,
vol.Optional(CONF_DEVICES, default=DEFAULT_DEVICES): vol.All(cv.ensure_list, list),
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
_LOGGER.debug(
f"CONF_EMAIL: {config[DOMAIN].get(CONF_EMAIL)}, \
CONF_PASSWORD: {''.join(['*']*len(config[DOMAIN].get(CONF_EMAIL)))}, \
CONF_RETRY: {config[DOMAIN].get(CONF_RETRY)}, \
CONF_DEVICES: {config[DOMAIN].get(CONF_DEVICES)}"
)
if len(config[DOMAIN].get(CONF_DEVICES)) == 0:
config[DOMAIN][CONF_DEVICES] = None
api = JciHitachiAPI(
email=config[DOMAIN].get(CONF_EMAIL),
password=config[DOMAIN].get(CONF_PASSWORD),
device_names=config[DOMAIN].get(CONF_DEVICES),
max_retries=config[DOMAIN].get(CONF_RETRY),
)
hass.data[API] = api
hass.data[UPDATE_DATA] = Queue()
hass.data[UPDATED_DATA] = dict()
hass.data[COORDINATOR] = None
try:
await hass.async_add_executor_job(api.login)
except AssertionError as err:
_LOGGER.error(f"Encountered assertion error:{err}")
return False
except RuntimeError as err:
_LOGGER.error(f"Failed to login API: {err}")
return False
_LOGGER.debug(
f"Peripheral info: {[peripheral for peripheral in api._peripherals.values()]}")
async def async_update_data():
"""Fetch data from API endpoint.
This is the place to pre-process the data to lookup tables
so entities can quickly look up their data.
"""
try:
# Note: asyncio.TimeoutError and aiohttp.ClientError are already
# handled by the data update coordinator.
async with async_timeout.timeout(10):
await hass.async_add_executor_job(api.refresh_status)
hass.data[UPDATED_DATA] = api.get_status()
_LOGGER.debug(
f"Latest data: {[(name, value.status) for name, value in hass.data[UPDATED_DATA].items()]}")
except asyncio.TimeoutError as err:
raise UpdateFailed(f"Command executed timed out when regularly fetching data.")
except Exception as err:
raise UpdateFailed(f"Error communicating with API: {err}")
coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
# Name of the data. For logging purposes.
name=DOMAIN,
update_method=async_update_data,
# Polling interval. Will only be polled if there are subscribers.
update_interval=timedelta(seconds=30),
)
await coordinator.async_refresh()
hass.data[COORDINATOR] = coordinator
# Start jcihitachi components
if hass.data[API]:
_LOGGER.debug("Starting jcihitachi components.")
for platform in PLATFORMS:
discovery.load_platform(hass, platform, DOMAIN, {}, config)
# Return boolean to indicate that initialization was successful.
return True
class JciHitachiEntity(CoordinatorEntity):
def __init__(self, peripheral, coordinator):
super().__init__(coordinator)
self._peripheral = peripheral
@property
def name(self):
"""Return the peripheral's name."""
return self._peripheral.name
@property
def unique_id(self):
"""Return the peripheral's unique id."""
raise NotImplementedError
def put_queue(self, command, value, device_name):
"""Put data into the queue to update status"""
self.hass.data[UPDATE_DATA].put(
UpdateData(command, value, device_name)
)
def update(self):
"""Update latest status."""
api = self.hass.data[API]
while self.hass.data[UPDATE_DATA].qsize() > 0:
data = self.hass.data[UPDATE_DATA].get()
_LOGGER.debug(f"Updating data: {data}")
result = api.set_status(*data)
if result is True:
_LOGGER.debug("Data updated successfully.")
api.refresh_status()
self.hass.data[UPDATED_DATA] = api.get_status()
_LOGGER.debug(
f"Latest data: {[(name, value.status) for name, value in self.hass.data[UPDATED_DATA].items()]}")
# Important: We have to reset the update scheduler to prevent old status from wrongly being loaded.
self.coordinator.async_set_updated_data(None)
class UpdateData(NamedTuple):
command : str
value : int
device_name : str |
py | b402762ee86914d9f14cc798272a07ed4f273266 | import sys
import os
import argparse
from ChainRepository import ChainRepository
from ChainHierarchyPrinter import ChainHierarchyPrinter
from GitCommandPreviewer import GitCommandPreviewer
from pygit2 import Repository
from BranchFilters.BranchFilterer import BranchFilterer
from BranchFilters.BasicBranchFilterer import BasicBranchFilterer
from LegendPrinter import LegendPrinter
def __main__():
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument("-f", "--full", help="Replay all commits during preview", action="store_true")
parser.add_argument("-r", "--reduce", help="Filter to only the branches in the command", action="store_true")
args, unknown_args = parser.parse_known_args()
command = "git " + ' '.join(unknown_args)
validate()
repo_name = os.getcwd() + "\\.git"
branch_filterer = BranchFilterer()
if args.reduce:
branch_filterer = BasicBranchFilterer(unknown_args)
repository = Repository(repo_name)
chain_repo = ChainRepository(repository, branch_filterer)
commands = ["git checkout " + chain_repo.head_name, command]
LegendPrinter().print_legend()
print("\nBefore `%s`" % command)
ChainHierarchyPrinter(chain_repo.tree, chain_repo.head_name).print()
print("\nAfter `%s`" % command)
GitCommandPreviewer(chain_repo, not args.full, branch_filterer).preview_commands(commands)
print()
def validate():
if not os.path.exists("./.git"):
print("Must be run inside a repository")
__main__() |
py | b402764b26090141ba8e9cd51fe46205bb7b553d | from app.core.routers.generic import router as server_health
from app.core.routers.publication import publications_router
from app.core.routers.author import authors_router
from app.utils.api.router import TypedAPIRouter
generic_router = TypedAPIRouter(router=server_health, tags=["Server health"])
publications_router = TypedAPIRouter(router=publications_router, tags=["Publications"])
author_router = TypedAPIRouter(router=authors_router, tags=["Authors"])
|
py | b40276826ca50731969c52d324e44e871d350684 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.mgmt.core import ARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Optional
from azure.core.credentials import TokenCredential
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from ._configuration import AuthorizationManagementClientConfiguration
from .operations import PermissionsOperations
from .operations import RoleDefinitionsOperations
from .operations import ProviderOperationsMetadataOperations
from .operations import GlobalAdministratorOperations
from .operations import RoleAssignmentsOperations
from .operations import ClassicAdministratorsOperations
from . import models
class AuthorizationManagementClient(object):
"""Role based access control provides you a way to apply granular level policy administration down to individual resources or resource groups. These operations enable you to manage role definitions and role assignments. A role definition describes the set of actions that can be performed on resources. A role assignment grants access to Azure Active Directory users.
:ivar permissions: PermissionsOperations operations
:vartype permissions: azure.mgmt.authorization.v2015_07_01.operations.PermissionsOperations
:ivar role_definitions: RoleDefinitionsOperations operations
:vartype role_definitions: azure.mgmt.authorization.v2015_07_01.operations.RoleDefinitionsOperations
:ivar provider_operations_metadata: ProviderOperationsMetadataOperations operations
:vartype provider_operations_metadata: azure.mgmt.authorization.v2015_07_01.operations.ProviderOperationsMetadataOperations
:ivar global_administrator: GlobalAdministratorOperations operations
:vartype global_administrator: azure.mgmt.authorization.v2015_07_01.operations.GlobalAdministratorOperations
:ivar role_assignments: RoleAssignmentsOperations operations
:vartype role_assignments: azure.mgmt.authorization.v2015_07_01.operations.RoleAssignmentsOperations
:ivar classic_administrators: ClassicAdministratorsOperations operations
:vartype classic_administrators: azure.mgmt.authorization.v2015_07_01.operations.ClassicAdministratorsOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
:param str base_url: Service URL
"""
def __init__(
self,
credential, # type: "TokenCredential"
subscription_id, # type: str
base_url=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
if not base_url:
base_url = 'https://management.azure.com'
self._config = AuthorizationManagementClientConfiguration(credential, subscription_id, **kwargs)
self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.permissions = PermissionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.role_definitions = RoleDefinitionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.provider_operations_metadata = ProviderOperationsMetadataOperations(
self._client, self._config, self._serialize, self._deserialize)
self.global_administrator = GlobalAdministratorOperations(
self._client, self._config, self._serialize, self._deserialize)
self.role_assignments = RoleAssignmentsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.classic_administrators = ClassicAdministratorsOperations(
self._client, self._config, self._serialize, self._deserialize)
def _send_request(self, http_request, **kwargs):
# type: (HttpRequest, Any) -> HttpResponse
"""Runs the network request through the client's chained policies.
:param http_request: The network request you want to make. Required.
:type http_request: ~azure.core.pipeline.transport.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to True.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.pipeline.transport.HttpResponse
"""
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
http_request.url = self._client.format_url(http_request.url, **path_format_arguments)
stream = kwargs.pop("stream", True)
pipeline_response = self._client._pipeline.run(http_request, stream=stream, **kwargs)
return pipeline_response.http_response
def close(self):
# type: () -> None
self._client.close()
def __enter__(self):
# type: () -> AuthorizationManagementClient
self._client.__enter__()
return self
def __exit__(self, *exc_details):
# type: (Any) -> None
self._client.__exit__(*exc_details)
|
py | b40276c638ffadda1d5dd6250928339a4d86606d | import graphene
from graphene_sqlalchemy import SQLAlchemyObjectType
from database import StudentsTable, TeachersTable, ClassesTable, GradesTable
class Students(SQLAlchemyObjectType):
class Meta:
model = StudentsTable
class StudentFields:
id = graphene.Int()
name = graphene.String()
age = graphene.Int()
country = graphene.String()
class AddStudentFields(graphene.InputObjectType, StudentFields):
pass
class Teachers(SQLAlchemyObjectType):
class Meta:
model = TeachersTable
class Classes(SQLAlchemyObjectType):
class Meta:
model = ClassesTable
class Grades(SQLAlchemyObjectType):
class Meta:
model = GradesTable
|
py | b40276cd5a84142ec3194c9be78234d4849d843b | # Copyright 2018 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import unit_tests.utils as ut_utils
import zaza.utilities.file_assertions as file_assertions
class TestFileAssertionUtils(ut_utils.BaseTestCase):
def setUp(self):
super(TestFileAssertionUtils, self).setUp()
# Patch all run_on_unit calls
self.patch(
'zaza.utilities.file_assertions.model.run_on_unit',
new_callable=mock.MagicMock(),
name='run_on_unit'
)
self._assert = mock.MagicMock()
self._assert.assertEqual = mock.MagicMock()
def test_path_glob(self):
self.run_on_unit.return_value = {
'Stdout': 'file-name root root 600'
}
file_details = {'path': '*'}
file_assertions.assert_path_glob(
self._assert, 'test/0', file_details)
self.run_on_unit.assert_called_once_with(
'test/0', 'bash -c "shopt -s -q globstar;'
' stat -c "%n %U %G %a" *"')
def test_single_path(self):
self.run_on_unit.return_value = {
'Stdout': 'root root 600'
}
file_details = {'path': 'test'}
file_assertions.assert_single_file(
self._assert, 'test/0', file_details)
self.run_on_unit.assert_called_once_with(
'test/0', 'stat -c "%U %G %a" test')
def test_error_message_glob(self):
message = file_assertions._error_message(
"Owner", "test/0", "root", "/path/to/something")
self.assertEqual(
message,
"Owner is incorrect for /path/to/something on test/0: root")
def test_error_message_single(self):
message = file_assertions._error_message(
"Owner", "test/0", "root")
self.assertEqual(message, "Owner is incorrect on test/0: root")
|
py | b4027821d3d1d65a4f54bf5f8c1f93a3092590b7 | # Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved.
# Python
import copy
import json
import logging
import re
import six
import urllib
from collections import OrderedDict
from datetime import timedelta
# OAuth2
from oauthlib.common import generate_token
from oauth2_provider.settings import oauth2_settings
# Django
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist, ValidationError as DjangoValidationError
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import force_text
from django.utils.text import capfirst
from django.utils.timezone import now
from django.utils.functional import cached_property
# Django REST Framework
from rest_framework.exceptions import ValidationError, PermissionDenied
from rest_framework import fields
from rest_framework import serializers
from rest_framework import validators
from rest_framework.utils.serializer_helpers import ReturnList
# Django-Polymorphic
from polymorphic.models import PolymorphicModel
# AWX
from awx.main.constants import SCHEDULEABLE_PROVIDERS, ANSI_SGR_PATTERN
from awx.main.models import * # noqa
from awx.main.models.unified_jobs import ACTIVE_STATES
from awx.main.models.base import NEW_JOB_TYPE_CHOICES
from awx.main.access import get_user_capabilities
from awx.main.fields import ImplicitRoleField
from awx.main.utils import (
get_type_for_model, get_model_for_type, timestamp_apiformat,
camelcase_to_underscore, getattrd, parse_yaml_or_json,
has_model_field_prefetched, extract_ansible_vars, encrypt_dict)
from awx.main.utils.filters import SmartFilter
from awx.main.redact import REPLACE_STR
from awx.main.validators import vars_validate_or_raise
from awx.conf.license import feature_enabled
from awx.api.versioning import reverse, get_request_version
from awx.api.fields import BooleanNullField, CharNullField, ChoiceNullField, VerbatimField
logger = logging.getLogger('awx.api.serializers')
DEPRECATED = 'This resource has been deprecated and will be removed in a future release'
# Fields that should be summarized regardless of object type.
DEFAULT_SUMMARY_FIELDS = ('id', 'name', 'description')# , 'created_by', 'modified_by')#, 'type')
# Keys are fields (foreign keys) where, if found on an instance, summary info
# should be added to the serialized data. Values are a tuple of field names on
# the related object to include in the summary data (if the field is present on
# the related object).
SUMMARIZABLE_FK_FIELDS = {
'organization': DEFAULT_SUMMARY_FIELDS,
'user': ('id', 'username', 'first_name', 'last_name'),
'application': ('id', 'name', 'client_id'),
'team': DEFAULT_SUMMARY_FIELDS,
'inventory': DEFAULT_SUMMARY_FIELDS + ('has_active_failures',
'total_hosts',
'hosts_with_active_failures',
'total_groups',
'groups_with_active_failures',
'has_inventory_sources',
'total_inventory_sources',
'inventory_sources_with_failures',
'organization_id',
'kind',
'insights_credential_id',),
'host': DEFAULT_SUMMARY_FIELDS + ('has_active_failures',
'has_inventory_sources'),
'group': DEFAULT_SUMMARY_FIELDS + ('has_active_failures',
'total_hosts',
'hosts_with_active_failures',
'total_groups',
'groups_with_active_failures',
'has_inventory_sources'),
'project': DEFAULT_SUMMARY_FIELDS + ('status', 'scm_type'),
'source_project': DEFAULT_SUMMARY_FIELDS + ('status', 'scm_type'),
'project_update': DEFAULT_SUMMARY_FIELDS + ('status', 'failed',),
'credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'credential_type_id'),
'vault_credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'credential_type_id'),
'job': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'elapsed'),
'job_template': DEFAULT_SUMMARY_FIELDS,
'workflow_job_template': DEFAULT_SUMMARY_FIELDS,
'workflow_job': DEFAULT_SUMMARY_FIELDS,
'schedule': DEFAULT_SUMMARY_FIELDS + ('next_run',),
'unified_job_template': DEFAULT_SUMMARY_FIELDS + ('unified_job_type',),
'last_job': DEFAULT_SUMMARY_FIELDS + ('finished', 'status', 'failed', 'license_error'),
'last_job_host_summary': DEFAULT_SUMMARY_FIELDS + ('failed',),
'last_update': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'license_error'),
'current_update': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'license_error'),
'current_job': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'license_error'),
'inventory_source': ('source', 'last_updated', 'status'),
'custom_inventory_script': DEFAULT_SUMMARY_FIELDS,
'source_script': ('name', 'description'),
'role': ('id', 'role_field'),
'notification_template': DEFAULT_SUMMARY_FIELDS,
'instance_group': {'id', 'name', 'controller_id'},
'insights_credential': DEFAULT_SUMMARY_FIELDS,
}
def reverse_gfk(content_object, request):
'''
Computes a reverse for a GenericForeignKey field.
Returns a dictionary of the form
{ '<type>': reverse(<type detail>) }
for example
{ 'organization': '/api/v1/organizations/1/' }
'''
if content_object is None or not hasattr(content_object, 'get_absolute_url'):
return {}
return {
camelcase_to_underscore(content_object.__class__.__name__): content_object.get_absolute_url(request=request)
}
class CopySerializer(serializers.Serializer):
name = serializers.CharField()
def validate(self, attrs):
name = attrs.get('name')
view = self.context.get('view', None)
obj = view.get_object()
if name == obj.name:
raise serializers.ValidationError(_(
'The original object is already named {}, a copy from'
' it cannot have the same name.'.format(name)
))
return attrs
class BaseSerializerMetaclass(serializers.SerializerMetaclass):
'''
Custom metaclass to enable attribute inheritance from Meta objects on
serializer base classes.
Also allows for inheriting or updating field lists from base class(es):
class Meta:
# Inherit all fields from base class.
fields = ('*',)
# Inherit all fields from base class and add 'foo'.
fields = ('*', 'foo')
# Inherit all fields from base class except 'bar'.
fields = ('*', '-bar')
# Define fields as 'foo' and 'bar'; ignore base class fields.
fields = ('foo', 'bar')
# Extra field kwargs dicts are also merged from base classes.
extra_kwargs = {
'foo': {'required': True},
'bar': {'read_only': True},
}
# If a subclass were to define extra_kwargs as:
extra_kwargs = {
'foo': {'required': False, 'default': ''},
'bar': {'label': 'New Label for Bar'},
}
# The resulting value of extra_kwargs would be:
extra_kwargs = {
'foo': {'required': False, 'default': ''},
'bar': {'read_only': True, 'label': 'New Label for Bar'},
}
# Extra field kwargs cannot be removed in subclasses, only replaced.
'''
@staticmethod
def _is_list_of_strings(x):
return isinstance(x, (list, tuple)) and all([isinstance(y, basestring) for y in x])
@staticmethod
def _is_extra_kwargs(x):
return isinstance(x, dict) and all([isinstance(k, basestring) and isinstance(v, dict) for k,v in x.items()])
@classmethod
def _update_meta(cls, base, meta, other=None):
for attr in dir(other):
if attr.startswith('_'):
continue
val = getattr(other, attr)
meta_val = getattr(meta, attr, None)
# Special handling for lists/tuples of strings (field names).
if cls._is_list_of_strings(val) and cls._is_list_of_strings(meta_val or []):
meta_val = meta_val or []
new_vals = []
except_vals = []
if base: # Merge values from all bases.
new_vals.extend([x for x in meta_val])
for v in val:
if not base and v == '*': # Inherit all values from previous base(es).
new_vals.extend([x for x in meta_val])
elif not base and v.startswith('-'): # Except these values.
except_vals.append(v[1:])
else:
new_vals.append(v)
val = []
for v in new_vals:
if v not in except_vals and v not in val:
val.append(v)
val = tuple(val)
# Merge extra_kwargs dicts from base classes.
elif cls._is_extra_kwargs(val) and cls._is_extra_kwargs(meta_val or {}):
meta_val = meta_val or {}
new_val = {}
if base:
for k,v in meta_val.items():
new_val[k] = copy.deepcopy(v)
for k,v in val.items():
new_val.setdefault(k, {}).update(copy.deepcopy(v))
val = new_val
# Any other values are copied in case they are mutable objects.
else:
val = copy.deepcopy(val)
setattr(meta, attr, val)
def __new__(cls, name, bases, attrs):
meta = type('Meta', (object,), {})
for base in bases[::-1]:
cls._update_meta(base, meta, getattr(base, 'Meta', None))
cls._update_meta(None, meta, attrs.get('Meta', meta))
attrs['Meta'] = meta
return super(BaseSerializerMetaclass, cls).__new__(cls, name, bases, attrs)
class BaseSerializer(serializers.ModelSerializer):
__metaclass__ = BaseSerializerMetaclass
class Meta:
fields = ('id', 'type', 'url', 'related', 'summary_fields', 'created',
'modified', 'name', 'description')
summary_fields = ()
summarizable_fields = ()
# add the URL and related resources
type = serializers.SerializerMethodField()
url = serializers.SerializerMethodField()
related = serializers.SerializerMethodField('_get_related')
summary_fields = serializers.SerializerMethodField('_get_summary_fields')
# make certain fields read only
created = serializers.SerializerMethodField()
modified = serializers.SerializerMethodField()
@property
def version(self):
"""
The request version component of the URL as an integer i.e., 1 or 2
"""
return get_request_version(self.context.get('request'))
def get_type(self, obj):
return get_type_for_model(self.Meta.model)
def get_types(self):
return [self.get_type(None)]
def get_type_choices(self):
type_name_map = {
'job': _('Playbook Run'),
'ad_hoc_command': _('Command'),
'project_update': _('SCM Update'),
'inventory_update': _('Inventory Sync'),
'system_job': _('Management Job'),
'workflow_job': _('Workflow Job'),
'workflow_job_template': _('Workflow Template'),
}
choices = []
for t in self.get_types():
name = type_name_map.get(t, force_text(get_model_for_type(t)._meta.verbose_name).title())
choices.append((t, name))
return choices
def get_url(self, obj):
if obj is None or not hasattr(obj, 'get_absolute_url'):
return ''
elif isinstance(obj, User):
return self.reverse('api:user_detail', kwargs={'pk': obj.pk})
else:
return obj.get_absolute_url(request=self.context.get('request'))
def filter_field_metadata(self, fields, method):
"""
Filter field metadata based on the request method.
This it intended to be extended by subclasses.
"""
return fields
def _get_related(self, obj):
return {} if obj is None else self.get_related(obj)
def _generate_named_url(self, url_path, obj, node):
url_units = url_path.split('/')
named_url = node.generate_named_url(obj)
url_units[4] = named_url
return '/'.join(url_units)
def get_related(self, obj):
res = OrderedDict()
view = self.context.get('view', None)
if view and (hasattr(view, 'retrieve') or view.request.method == 'POST') and \
type(obj) in settings.NAMED_URL_GRAPH:
original_url = self.get_url(obj)
if not original_url.startswith('/api/v1'):
res['named_url'] = self._generate_named_url(
original_url, obj, settings.NAMED_URL_GRAPH[type(obj)]
)
if getattr(obj, 'created_by', None):
res['created_by'] = self.reverse('api:user_detail', kwargs={'pk': obj.created_by.pk})
if getattr(obj, 'modified_by', None):
res['modified_by'] = self.reverse('api:user_detail', kwargs={'pk': obj.modified_by.pk})
return res
def _get_summary_fields(self, obj):
return {} if obj is None else self.get_summary_fields(obj)
def get_summary_fields(self, obj):
# Return values for certain fields on related objects, to simplify
# displaying lists of items without additional API requests.
summary_fields = OrderedDict()
for fk, related_fields in SUMMARIZABLE_FK_FIELDS.items():
try:
# A few special cases where we don't want to access the field
# because it results in additional queries.
if fk == 'job' and isinstance(obj, UnifiedJob):
continue
if fk == 'project' and (isinstance(obj, InventorySource) or
isinstance(obj, Project)):
continue
fkval = getattr(obj, fk, None)
if fkval is None:
continue
if fkval == obj:
continue
summary_fields[fk] = OrderedDict()
for field in related_fields:
if (
self.version < 2 and field == 'credential_type_id' and
fk in ['credential', 'vault_credential']): # TODO: remove version check in 3.3
continue
fval = getattr(fkval, field, None)
if fval is None and field == 'type':
if isinstance(fkval, PolymorphicModel):
fkval = fkval.get_real_instance()
fval = get_type_for_model(fkval)
elif fval is None and field == 'unified_job_type' and isinstance(fkval, UnifiedJobTemplate):
fkval = fkval.get_real_instance()
fval = get_type_for_model(fkval._get_unified_job_class())
if fval is not None:
summary_fields[fk][field] = fval
# Can be raised by the reverse accessor for a OneToOneField.
except ObjectDoesNotExist:
pass
if getattr(obj, 'created_by', None):
summary_fields['created_by'] = OrderedDict()
for field in SUMMARIZABLE_FK_FIELDS['user']:
summary_fields['created_by'][field] = getattr(obj.created_by, field)
if getattr(obj, 'modified_by', None):
summary_fields['modified_by'] = OrderedDict()
for field in SUMMARIZABLE_FK_FIELDS['user']:
summary_fields['modified_by'][field] = getattr(obj.modified_by, field)
# RBAC summary fields
roles = {}
for field in obj._meta.get_fields():
if type(field) is ImplicitRoleField:
roles[field.name] = role_summary_fields_generator(obj, field.name)
if len(roles) > 0:
summary_fields['object_roles'] = roles
# Advance display of RBAC capabilities
if hasattr(self, 'show_capabilities'):
view = self.context.get('view', None)
parent_obj = None
if view and hasattr(view, 'parent_model') and hasattr(view, 'get_parent_object'):
parent_obj = view.get_parent_object()
if view and view.request and view.request.user:
user_capabilities = get_user_capabilities(
view.request.user, obj, method_list=self.show_capabilities, parent_obj=parent_obj)
if user_capabilities:
summary_fields['user_capabilities'] = user_capabilities
return summary_fields
def get_created(self, obj):
if obj is None:
return None
elif isinstance(obj, User):
return obj.date_joined
elif hasattr(obj, 'created'):
return obj.created
return None
def get_modified(self, obj):
if obj is None:
return None
elif isinstance(obj, User):
return obj.last_login # Not actually exposed for User.
elif hasattr(obj, 'modified'):
return obj.modified
return None
def get_extra_kwargs(self):
extra_kwargs = super(BaseSerializer, self).get_extra_kwargs()
if self.instance:
read_only_on_update_fields = getattr(self.Meta, 'read_only_on_update_fields', tuple())
for field_name in read_only_on_update_fields:
kwargs = extra_kwargs.get(field_name, {})
kwargs['read_only'] = True
extra_kwargs[field_name] = kwargs
return extra_kwargs
def build_standard_field(self, field_name, model_field):
# DRF 3.3 serializers.py::build_standard_field() -> utils/field_mapping.py::get_field_kwargs() short circuits
# when a Model's editable field is set to False. The short circuit skips choice rendering.
#
# This logic is to force rendering choice's on an uneditable field.
# Note: Consider expanding this rendering for more than just choices fields
# Note: This logic works in conjuction with
if hasattr(model_field, 'choices') and model_field.choices:
was_editable = model_field.editable
model_field.editable = True
field_class, field_kwargs = super(BaseSerializer, self).build_standard_field(field_name, model_field)
if hasattr(model_field, 'choices') and model_field.choices:
model_field.editable = was_editable
if was_editable is False:
field_kwargs['read_only'] = True
# Pass model field default onto the serializer field if field is not read-only.
if model_field.has_default() and not field_kwargs.get('read_only', False):
field_kwargs['default'] = field_kwargs['initial'] = model_field.get_default()
# Enforce minimum value of 0 for PositiveIntegerFields.
if isinstance(model_field, (models.PositiveIntegerField, models.PositiveSmallIntegerField)) and 'choices' not in field_kwargs:
field_kwargs['min_value'] = 0
# Use custom boolean field that allows null and empty string as False values.
if isinstance(model_field, models.BooleanField) and not field_kwargs.get('read_only', False):
field_class = BooleanNullField
# Use custom char or choice field that coerces null to an empty string.
if isinstance(model_field, (models.CharField, models.TextField)) and not field_kwargs.get('read_only', False):
if 'choices' in field_kwargs:
field_class = ChoiceNullField
else:
field_class = CharNullField
# Update the message used for the unique validator to use capitalized
# verbose name; keeps unique message the same as with DRF 2.x.
opts = self.Meta.model._meta.concrete_model._meta
for validator in field_kwargs.get('validators', []):
if isinstance(validator, validators.UniqueValidator):
unique_error_message = model_field.error_messages.get('unique', None)
if unique_error_message:
unique_error_message = unique_error_message % {
'model_name': capfirst(opts.verbose_name),
'field_label': capfirst(model_field.verbose_name),
}
validator.message = unique_error_message
return field_class, field_kwargs
def build_relational_field(self, field_name, relation_info):
field_class, field_kwargs = super(BaseSerializer, self).build_relational_field(field_name, relation_info)
# Don't include choices for foreign key fields.
field_kwargs.pop('choices', None)
return field_class, field_kwargs
def get_unique_together_validators(self):
# Allow the model's full_clean method to handle the unique together validation.
return []
def run_validation(self, data=fields.empty):
try:
return super(BaseSerializer, self).run_validation(data)
except ValidationError as exc:
# Avoid bug? in DRF if exc.detail happens to be a list instead of a dict.
raise ValidationError(detail=serializers.as_serializer_error(exc))
def get_validation_exclusions(self, obj=None):
# Borrowed from DRF 2.x - return model fields that should be excluded
# from model validation.
cls = self.Meta.model
opts = cls._meta.concrete_model._meta
exclusions = [field.name for field in opts.fields]
for field_name, field in self.fields.items():
field_name = field.source or field_name
if field_name not in exclusions:
continue
if field.read_only:
continue
if isinstance(field, serializers.Serializer):
continue
exclusions.remove(field_name)
# The clean_ methods cannot be ran on many-to-many models
exclusions.extend([field.name for field in opts.many_to_many])
return exclusions
def validate(self, attrs):
attrs = super(BaseSerializer, self).validate(attrs)
try:
# Create/update a model instance and run it's full_clean() method to
# do any validation implemented on the model class.
exclusions = self.get_validation_exclusions(self.instance)
obj = self.instance or self.Meta.model()
for k,v in attrs.items():
if k not in exclusions:
setattr(obj, k, v)
obj.full_clean(exclude=exclusions)
# full_clean may modify values on the instance; copy those changes
# back to attrs so they are saved.
for k in attrs.keys():
if k not in exclusions:
attrs[k] = getattr(obj, k)
except DjangoValidationError as exc:
# DjangoValidationError may contain a list or dict; normalize into a
# dict where the keys are the field name and the values are a list
# of error messages, then raise as a DRF ValidationError. DRF would
# normally convert any DjangoValidationError to a non-field specific
# error message; here we preserve field-specific errors raised from
# the model's full_clean method.
d = exc.update_error_dict({})
for k,v in d.items():
v = v if isinstance(v, list) else [v]
v2 = []
for e in v:
if isinstance(e, DjangoValidationError):
v2.extend(list(e))
elif isinstance(e, list):
v2.extend(e)
else:
v2.append(e)
d[k] = map(force_text, v2)
raise ValidationError(d)
return attrs
def reverse(self, *args, **kwargs):
kwargs['request'] = self.context.get('request')
return reverse(*args, **kwargs)
@property
def is_detail_view(self):
if 'view' in self.context:
if 'pk' in self.context['view'].kwargs:
return True
return False
class EmptySerializer(serializers.Serializer):
pass
class BaseFactSerializer(BaseSerializer):
__metaclass__ = BaseSerializerMetaclass
def get_fields(self):
ret = super(BaseFactSerializer, self).get_fields()
if 'module' in ret:
# TODO: the values_list may pull in a LOT of entries before the distinct is called
modules = Fact.objects.all().values_list('module', flat=True).distinct()
choices = [(o, o.title()) for o in modules]
ret['module'] = serializers.ChoiceField(choices=choices, read_only=True, required=False)
return ret
class UnifiedJobTemplateSerializer(BaseSerializer):
class Meta:
model = UnifiedJobTemplate
fields = ('*', 'last_job_run', 'last_job_failed',
'next_job_run', 'status')
def get_related(self, obj):
res = super(UnifiedJobTemplateSerializer, self).get_related(obj)
if obj.current_job:
res['current_job'] = obj.current_job.get_absolute_url(request=self.context.get('request'))
if obj.last_job:
res['last_job'] = obj.last_job.get_absolute_url(request=self.context.get('request'))
if obj.next_schedule:
res['next_schedule'] = obj.next_schedule.get_absolute_url(request=self.context.get('request'))
return res
def get_types(self):
if type(self) is UnifiedJobTemplateSerializer:
return ['project', 'inventory_source', 'job_template', 'system_job_template', 'workflow_job_template',]
else:
return super(UnifiedJobTemplateSerializer, self).get_types()
def to_representation(self, obj):
serializer_class = None
if type(self) is UnifiedJobTemplateSerializer:
if isinstance(obj, Project):
serializer_class = ProjectSerializer
elif isinstance(obj, InventorySource):
serializer_class = InventorySourceSerializer
elif isinstance(obj, JobTemplate):
serializer_class = JobTemplateSerializer
elif isinstance(obj, SystemJobTemplate):
serializer_class = SystemJobTemplateSerializer
elif isinstance(obj, WorkflowJobTemplate):
serializer_class = WorkflowJobTemplateSerializer
if serializer_class:
serializer = serializer_class(instance=obj, context=self.context)
return serializer.to_representation(obj)
else:
return super(UnifiedJobTemplateSerializer, self).to_representation(obj)
class UnifiedJobSerializer(BaseSerializer):
show_capabilities = ['start', 'delete']
class Meta:
model = UnifiedJob
fields = ('*', 'unified_job_template', 'launch_type', 'status',
'failed', 'started', 'finished', 'elapsed', 'job_args',
'job_cwd', 'job_env', 'job_explanation', 'execution_node',
'result_traceback')
extra_kwargs = {
'unified_job_template': {
'source': 'unified_job_template_id',
'label': 'unified job template',
},
'job_env': {
'read_only': True,
'label': 'job_env',
}
}
def get_types(self):
if type(self) is UnifiedJobSerializer:
return ['project_update', 'inventory_update', 'job', 'ad_hoc_command', 'system_job', 'workflow_job',]
else:
return super(UnifiedJobSerializer, self).get_types()
def get_related(self, obj):
res = super(UnifiedJobSerializer, self).get_related(obj)
if obj.unified_job_template:
res['unified_job_template'] = obj.unified_job_template.get_absolute_url(request=self.context.get('request'))
if obj.schedule:
res['schedule'] = obj.schedule.get_absolute_url(request=self.context.get('request'))
if isinstance(obj, ProjectUpdate):
res['stdout'] = self.reverse('api:project_update_stdout', kwargs={'pk': obj.pk})
elif isinstance(obj, InventoryUpdate):
res['stdout'] = self.reverse('api:inventory_update_stdout', kwargs={'pk': obj.pk})
elif isinstance(obj, Job):
res['stdout'] = self.reverse('api:job_stdout', kwargs={'pk': obj.pk})
elif isinstance(obj, AdHocCommand):
res['stdout'] = self.reverse('api:ad_hoc_command_stdout', kwargs={'pk': obj.pk})
if obj.workflow_job_id:
res['source_workflow_job'] = self.reverse('api:workflow_job_detail', kwargs={'pk': obj.workflow_job_id})
return res
def get_summary_fields(self, obj):
summary_fields = super(UnifiedJobSerializer, self).get_summary_fields(obj)
if obj.spawned_by_workflow:
summary_fields['source_workflow_job'] = {}
try:
summary_obj = obj.unified_job_node.workflow_job
except UnifiedJob.unified_job_node.RelatedObjectDoesNotExist:
return summary_fields
for field in SUMMARIZABLE_FK_FIELDS['job']:
val = getattr(summary_obj, field, None)
if val is not None:
summary_fields['source_workflow_job'][field] = val
return summary_fields
def to_representation(self, obj):
serializer_class = None
if type(self) is UnifiedJobSerializer:
if isinstance(obj, ProjectUpdate):
serializer_class = ProjectUpdateSerializer
elif isinstance(obj, InventoryUpdate):
serializer_class = InventoryUpdateSerializer
elif isinstance(obj, Job):
serializer_class = JobSerializer
elif isinstance(obj, AdHocCommand):
serializer_class = AdHocCommandSerializer
elif isinstance(obj, SystemJob):
serializer_class = SystemJobSerializer
elif isinstance(obj, WorkflowJob):
serializer_class = WorkflowJobSerializer
if serializer_class:
serializer = serializer_class(instance=obj, context=self.context)
ret = serializer.to_representation(obj)
else:
ret = super(UnifiedJobSerializer, self).to_representation(obj)
if 'elapsed' in ret:
if obj and obj.pk and obj.started and not obj.finished:
td = now() - obj.started
ret['elapsed'] = (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6) / (10 ** 6 * 1.0)
ret['elapsed'] = float(ret['elapsed'])
return ret
class UnifiedJobListSerializer(UnifiedJobSerializer):
class Meta:
fields = ('*', '-job_args', '-job_cwd', '-job_env', '-result_traceback')
def get_field_names(self, declared_fields, info):
field_names = super(UnifiedJobListSerializer, self).get_field_names(declared_fields, info)
# Meta multiple inheritance and -field_name options don't seem to be
# taking effect above, so remove the undesired fields here.
return tuple(x for x in field_names if x not in ('job_args', 'job_cwd', 'job_env', 'result_traceback'))
def get_types(self):
if type(self) is UnifiedJobListSerializer:
return ['project_update', 'inventory_update', 'job', 'ad_hoc_command', 'system_job', 'workflow_job']
else:
return super(UnifiedJobListSerializer, self).get_types()
def to_representation(self, obj):
serializer_class = None
if type(self) is UnifiedJobListSerializer:
if isinstance(obj, ProjectUpdate):
serializer_class = ProjectUpdateListSerializer
elif isinstance(obj, InventoryUpdate):
serializer_class = InventoryUpdateListSerializer
elif isinstance(obj, Job):
serializer_class = JobListSerializer
elif isinstance(obj, AdHocCommand):
serializer_class = AdHocCommandListSerializer
elif isinstance(obj, SystemJob):
serializer_class = SystemJobListSerializer
elif isinstance(obj, WorkflowJob):
serializer_class = WorkflowJobSerializer
if serializer_class:
serializer = serializer_class(instance=obj, context=self.context)
ret = serializer.to_representation(obj)
else:
ret = super(UnifiedJobListSerializer, self).to_representation(obj)
if 'elapsed' in ret:
ret['elapsed'] = float(ret['elapsed'])
return ret
class UnifiedJobStdoutSerializer(UnifiedJobSerializer):
result_stdout = serializers.SerializerMethodField()
class Meta:
fields = ('result_stdout',)
def get_types(self):
if type(self) is UnifiedJobStdoutSerializer:
return ['project_update', 'inventory_update', 'job', 'ad_hoc_command', 'system_job']
else:
return super(UnifiedJobStdoutSerializer, self).get_types()
class UserSerializer(BaseSerializer):
password = serializers.CharField(required=False, default='', write_only=True,
help_text=_('Write-only field used to change the password.'))
ldap_dn = serializers.CharField(source='profile.ldap_dn', read_only=True)
external_account = serializers.SerializerMethodField(help_text=_('Set if the account is managed by an external service'))
is_system_auditor = serializers.BooleanField(default=False)
show_capabilities = ['edit', 'delete']
class Meta:
model = User
fields = ('*', '-name', '-description', '-modified',
'username', 'first_name', 'last_name',
'email', 'is_superuser', 'is_system_auditor', 'password', 'ldap_dn', 'external_account')
def to_representation(self, obj): # TODO: Remove in 3.3
ret = super(UserSerializer, self).to_representation(obj)
ret.pop('password', None)
if obj and type(self) is UserSerializer or self.version == 1:
ret['auth'] = obj.social_auth.values('provider', 'uid')
return ret
def get_validation_exclusions(self, obj=None):
ret = super(UserSerializer, self).get_validation_exclusions(obj)
ret.append('password')
return ret
def validate_password(self, value):
if not self.instance and value in (None, ''):
raise serializers.ValidationError(_('Password required for new User.'))
return value
def _update_password(self, obj, new_password):
# For now we're not raising an error, just not saving password for
# users managed by LDAP who already have an unusable password set.
if getattr(settings, 'AUTH_LDAP_SERVER_URI', None) and feature_enabled('ldap'):
try:
if obj.pk and obj.profile.ldap_dn and not obj.has_usable_password():
new_password = None
except AttributeError:
pass
if (getattr(settings, 'SOCIAL_AUTH_GOOGLE_OAUTH2_KEY', None) or
getattr(settings, 'SOCIAL_AUTH_GITHUB_KEY', None) or
getattr(settings, 'SOCIAL_AUTH_GITHUB_ORG_KEY', None) or
getattr(settings, 'SOCIAL_AUTH_GITHUB_TEAM_KEY', None) or
getattr(settings, 'SOCIAL_AUTH_SAML_ENABLED_IDPS', None)) and obj.social_auth.all():
new_password = None
if (getattr(settings, 'RADIUS_SERVER', None) or
getattr(settings, 'TACACSPLUS_HOST', None)) and obj.enterprise_auth.all():
new_password = None
if new_password:
obj.set_password(new_password)
obj.save(update_fields=['password'])
UserSessionMembership.clear_session_for_user(obj)
elif not obj.password:
obj.set_unusable_password()
obj.save(update_fields=['password'])
def get_external_account(self, obj):
account_type = None
if getattr(settings, 'AUTH_LDAP_SERVER_URI', None) and feature_enabled('ldap'):
try:
if obj.pk and obj.profile.ldap_dn and not obj.has_usable_password():
account_type = "ldap"
except AttributeError:
pass
if (getattr(settings, 'SOCIAL_AUTH_GOOGLE_OAUTH2_KEY', None) or
getattr(settings, 'SOCIAL_AUTH_GITHUB_KEY', None) or
getattr(settings, 'SOCIAL_AUTH_GITHUB_ORG_KEY', None) or
getattr(settings, 'SOCIAL_AUTH_GITHUB_TEAM_KEY', None) or
getattr(settings, 'SOCIAL_AUTH_SAML_ENABLED_IDPS', None)) and obj.social_auth.all():
account_type = "social"
if (getattr(settings, 'RADIUS_SERVER', None) or
getattr(settings, 'TACACSPLUS_HOST', None)) and obj.enterprise_auth.all():
account_type = "enterprise"
return account_type
def create(self, validated_data):
new_password = validated_data.pop('password', None)
obj = super(UserSerializer, self).create(validated_data)
self._update_password(obj, new_password)
return obj
def update(self, obj, validated_data):
new_password = validated_data.pop('password', None)
obj = super(UserSerializer, self).update(obj, validated_data)
self._update_password(obj, new_password)
return obj
def get_related(self, obj):
res = super(UserSerializer, self).get_related(obj)
res.update(dict(
teams = self.reverse('api:user_teams_list', kwargs={'pk': obj.pk}),
organizations = self.reverse('api:user_organizations_list', kwargs={'pk': obj.pk}),
admin_of_organizations = self.reverse('api:user_admin_of_organizations_list', kwargs={'pk': obj.pk}),
projects = self.reverse('api:user_projects_list', kwargs={'pk': obj.pk}),
credentials = self.reverse('api:user_credentials_list', kwargs={'pk': obj.pk}),
roles = self.reverse('api:user_roles_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:user_activity_stream_list', kwargs={'pk': obj.pk}),
access_list = self.reverse('api:user_access_list', kwargs={'pk': obj.pk}),
applications = self.reverse('api:o_auth2_application_list', kwargs={'pk': obj.pk}),
tokens = self.reverse('api:o_auth2_token_list', kwargs={'pk': obj.pk}),
authorized_tokens = self.reverse('api:user_authorized_token_list', kwargs={'pk': obj.pk}),
personal_tokens = self.reverse('api:o_auth2_personal_token_list', kwargs={'pk': obj.pk}),
))
return res
def _validate_ldap_managed_field(self, value, field_name):
if not getattr(settings, 'AUTH_LDAP_SERVER_URI', None) or not feature_enabled('ldap'):
return value
try:
is_ldap_user = bool(self.instance and self.instance.profile.ldap_dn)
except AttributeError:
is_ldap_user = False
if is_ldap_user:
ldap_managed_fields = ['username']
ldap_managed_fields.extend(getattr(settings, 'AUTH_LDAP_USER_ATTR_MAP', {}).keys())
ldap_managed_fields.extend(getattr(settings, 'AUTH_LDAP_USER_FLAGS_BY_GROUP', {}).keys())
if field_name in ldap_managed_fields:
if value != getattr(self.instance, field_name):
raise serializers.ValidationError(_('Unable to change %s on user managed by LDAP.') % field_name)
return value
def validate_username(self, value):
return self._validate_ldap_managed_field(value, 'username')
def validate_first_name(self, value):
return self._validate_ldap_managed_field(value, 'first_name')
def validate_last_name(self, value):
return self._validate_ldap_managed_field(value, 'last_name')
def validate_email(self, value):
return self._validate_ldap_managed_field(value, 'email')
def validate_is_superuser(self, value):
return self._validate_ldap_managed_field(value, 'is_superuser')
class UserAuthorizedTokenSerializer(BaseSerializer):
refresh_token = serializers.SerializerMethodField()
token = serializers.SerializerMethodField()
class Meta:
model = OAuth2AccessToken
fields = (
'*', '-name', 'description', 'user', 'token', 'refresh_token',
'expires', 'scope', 'application',
)
read_only_fields = ('user', 'token', 'expires')
def get_token(self, obj):
request = self.context.get('request', None)
try:
if request.method == 'POST':
return obj.token
else:
return '*************'
except ObjectDoesNotExist:
return ''
def get_refresh_token(self, obj):
request = self.context.get('request', None)
try:
if request.method == 'POST':
return getattr(obj.refresh_token, 'token', '')
else:
return '**************'
except ObjectDoesNotExist:
return ''
def create(self, validated_data):
validated_data['user'] = self.context['request'].user
validated_data['token'] = generate_token()
validated_data['expires'] = now() + timedelta(
seconds=oauth2_settings.ACCESS_TOKEN_EXPIRE_SECONDS
)
obj = super(OAuth2TokenSerializer, self).create(validated_data)
obj.save()
if obj.application is not None:
RefreshToken.objects.create(
user=self.context['request'].user,
token=generate_token(),
application=obj.application,
access_token=obj
)
return obj
class OAuth2ApplicationSerializer(BaseSerializer):
show_capabilities = ['edit', 'delete']
class Meta:
model = OAuth2Application
fields = (
'*', 'description', 'user', 'client_id', 'client_secret', 'client_type',
'redirect_uris', 'authorization_grant_type', 'skip_authorization',
)
read_only_fields = ('client_id', 'client_secret')
read_only_on_update_fields = ('user', 'authorization_grant_type')
extra_kwargs = {
'user': {'allow_null': False, 'required': True},
'authorization_grant_type': {'allow_null': False}
}
def to_representation(self, obj):
ret = super(OAuth2ApplicationSerializer, self).to_representation(obj)
if obj.client_type == 'public':
ret.pop('client_secret', None)
return ret
def get_modified(self, obj):
if obj is None:
return None
return obj.updated
def get_related(self, obj):
ret = super(OAuth2ApplicationSerializer, self).get_related(obj)
if obj.user:
ret['user'] = self.reverse('api:user_detail', kwargs={'pk': obj.user.pk})
ret['tokens'] = self.reverse(
'api:o_auth2_application_token_list', kwargs={'pk': obj.pk}
)
ret['activity_stream'] = self.reverse(
'api:o_auth2_application_activity_stream_list', kwargs={'pk': obj.pk}
)
return ret
def _summary_field_tokens(self, obj):
token_list = [{'id': x.pk, 'token': '**************', 'scope': x.scope} for x in obj.oauth2accesstoken_set.all()[:10]]
if has_model_field_prefetched(obj, 'oauth2accesstoken_set'):
token_count = len(obj.oauth2accesstoken_set.all())
else:
if len(token_list) < 10:
token_count = len(token_list)
else:
token_count = obj.oauth2accesstoken_set.count()
return {'count': token_count, 'results': token_list}
def get_summary_fields(self, obj):
ret = super(OAuth2ApplicationSerializer, self).get_summary_fields(obj)
ret['tokens'] = self._summary_field_tokens(obj)
return ret
class OAuth2TokenSerializer(BaseSerializer):
refresh_token = serializers.SerializerMethodField()
token = serializers.SerializerMethodField()
class Meta:
model = OAuth2AccessToken
fields = (
'*', '-name', 'description', 'user', 'token', 'refresh_token',
'application', 'expires', 'scope',
)
read_only_fields = ('user', 'token', 'expires')
def get_modified(self, obj):
if obj is None:
return None
return obj.updated
def get_related(self, obj):
ret = super(OAuth2TokenSerializer, self).get_related(obj)
if obj.user:
ret['user'] = self.reverse('api:user_detail', kwargs={'pk': obj.user.pk})
if obj.application:
ret['application'] = self.reverse(
'api:o_auth2_application_detail', kwargs={'pk': obj.application.pk}
)
ret['activity_stream'] = self.reverse(
'api:o_auth2_token_activity_stream_list', kwargs={'pk': obj.pk}
)
return ret
def get_token(self, obj):
request = self.context.get('request', None)
try:
if request.method == 'POST':
return obj.token
else:
return '*************'
except ObjectDoesNotExist:
return ''
def get_refresh_token(self, obj):
request = self.context.get('request', None)
try:
if request.method == 'POST':
return getattr(obj.refresh_token, 'token', '')
else:
return '**************'
except ObjectDoesNotExist:
return ''
def create(self, validated_data):
validated_data['user'] = self.context['request'].user
validated_data['token'] = generate_token()
validated_data['expires'] = now() + timedelta(
seconds=oauth2_settings.ACCESS_TOKEN_EXPIRE_SECONDS
)
obj = super(OAuth2TokenSerializer, self).create(validated_data)
if obj.application and obj.application.user:
obj.user = obj.application.user
obj.save()
if obj.application is not None:
RefreshToken.objects.create(
user=obj.application.user if obj.application.user else None,
token=generate_token(),
application=obj.application,
access_token=obj
)
return obj
class OAuth2TokenDetailSerializer(OAuth2TokenSerializer):
class Meta:
read_only_fields = ('*', 'user', 'application')
class OAuth2AuthorizedTokenSerializer(BaseSerializer):
refresh_token = serializers.SerializerMethodField()
token = serializers.SerializerMethodField()
class Meta:
model = OAuth2AccessToken
fields = (
'*', '-name', 'description', 'user', 'token', 'refresh_token',
'expires', 'scope', 'application',
)
read_only_fields = ('user', 'token', 'expires')
def get_token(self, obj):
request = self.context.get('request', None)
try:
if request.method == 'POST':
return obj.token
else:
return '*************'
except ObjectDoesNotExist:
return ''
def get_refresh_token(self, obj):
request = self.context.get('request', None)
try:
if request.method == 'POST':
return getattr(obj.refresh_token, 'token', '')
else:
return '**************'
except ObjectDoesNotExist:
return ''
def create(self, validated_data):
validated_data['user'] = self.context['request'].user
validated_data['token'] = generate_token()
validated_data['expires'] = now() + timedelta(
seconds=oauth2_settings.ACCESS_TOKEN_EXPIRE_SECONDS
)
obj = super(OAuth2AuthorizedTokenSerializer, self).create(validated_data)
if obj.application and obj.application.user:
obj.user = obj.application.user
obj.save()
if obj.application is not None:
RefreshToken.objects.create(
user=obj.application.user if obj.application.user else None,
token=generate_token(),
application=obj.application,
access_token=obj
)
return obj
class OAuth2PersonalTokenSerializer(BaseSerializer):
refresh_token = serializers.SerializerMethodField()
token = serializers.SerializerMethodField()
class Meta:
model = OAuth2AccessToken
fields = (
'*', '-name', 'description', 'user', 'token', 'refresh_token',
'application', 'expires', 'scope',
)
read_only_fields = ('user', 'token', 'expires', 'application')
def get_modified(self, obj):
if obj is None:
return None
return obj.updated
def get_related(self, obj):
ret = super(OAuth2PersonalTokenSerializer, self).get_related(obj)
if obj.user:
ret['user'] = self.reverse('api:user_detail', kwargs={'pk': obj.user.pk})
if obj.application:
ret['application'] = self.reverse(
'api:o_auth2_application_detail', kwargs={'pk': obj.application.pk}
)
ret['activity_stream'] = self.reverse(
'api:o_auth2_token_activity_stream_list', kwargs={'pk': obj.pk}
)
return ret
def get_token(self, obj):
request = self.context.get('request', None)
try:
if request.method == 'POST':
return obj.token
else:
return '*************'
except ObjectDoesNotExist:
return ''
def get_refresh_token(self, obj):
return None
def create(self, validated_data):
validated_data['user'] = self.context['request'].user
validated_data['token'] = generate_token()
validated_data['expires'] = now() + timedelta(
seconds=oauth2_settings.ACCESS_TOKEN_EXPIRE_SECONDS
)
obj = super(OAuth2PersonalTokenSerializer, self).create(validated_data)
obj.save()
return obj
class OrganizationSerializer(BaseSerializer):
show_capabilities = ['edit', 'delete']
class Meta:
model = Organization
fields = ('*', 'custom_virtualenv',)
def get_related(self, obj):
res = super(OrganizationSerializer, self).get_related(obj)
res.update(dict(
projects = self.reverse('api:organization_projects_list', kwargs={'pk': obj.pk}),
inventories = self.reverse('api:organization_inventories_list', kwargs={'pk': obj.pk}),
workflow_job_templates = self.reverse('api:organization_workflow_job_templates_list', kwargs={'pk': obj.pk}),
users = self.reverse('api:organization_users_list', kwargs={'pk': obj.pk}),
admins = self.reverse('api:organization_admins_list', kwargs={'pk': obj.pk}),
teams = self.reverse('api:organization_teams_list', kwargs={'pk': obj.pk}),
credentials = self.reverse('api:organization_credential_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:organization_activity_stream_list', kwargs={'pk': obj.pk}),
notification_templates = self.reverse('api:organization_notification_templates_list', kwargs={'pk': obj.pk}),
notification_templates_any = self.reverse('api:organization_notification_templates_any_list', kwargs={'pk': obj.pk}),
notification_templates_success = self.reverse('api:organization_notification_templates_success_list', kwargs={'pk': obj.pk}),
notification_templates_error = self.reverse('api:organization_notification_templates_error_list', kwargs={'pk': obj.pk}),
object_roles = self.reverse('api:organization_object_roles_list', kwargs={'pk': obj.pk}),
access_list = self.reverse('api:organization_access_list', kwargs={'pk': obj.pk}),
instance_groups = self.reverse('api:organization_instance_groups_list', kwargs={'pk': obj.pk}),
))
return res
def get_summary_fields(self, obj):
summary_dict = super(OrganizationSerializer, self).get_summary_fields(obj)
counts_dict = self.context.get('related_field_counts', None)
if counts_dict is not None and summary_dict is not None:
if obj.id not in counts_dict:
summary_dict['related_field_counts'] = {
'inventories': 0, 'teams': 0, 'users': 0,
'job_templates': 0, 'admins': 0, 'projects': 0}
else:
summary_dict['related_field_counts'] = counts_dict[obj.id]
return summary_dict
class ProjectOptionsSerializer(BaseSerializer):
class Meta:
fields = ('*', 'local_path', 'scm_type', 'scm_url', 'scm_branch',
'scm_clean', 'scm_delete_on_update', 'credential', 'timeout',)
def get_related(self, obj):
res = super(ProjectOptionsSerializer, self).get_related(obj)
if obj.credential:
res['credential'] = self.reverse('api:credential_detail',
kwargs={'pk': obj.credential.pk})
return res
def validate(self, attrs):
errors = {}
# Don't allow assigning a local_path used by another project.
# Don't allow assigning a local_path when scm_type is set.
valid_local_paths = Project.get_local_path_choices()
if self.instance:
scm_type = attrs.get('scm_type', self.instance.scm_type) or u''
else:
scm_type = attrs.get('scm_type', u'') or u''
if self.instance and not scm_type:
valid_local_paths.append(self.instance.local_path)
if scm_type:
attrs.pop('local_path', None)
if 'local_path' in attrs and attrs['local_path'] not in valid_local_paths:
errors['local_path'] = 'Invalid path choice.'
if errors:
raise serializers.ValidationError(errors)
return super(ProjectOptionsSerializer, self).validate(attrs)
def to_representation(self, obj):
ret = super(ProjectOptionsSerializer, self).to_representation(obj)
if obj is not None and 'credential' in ret and not obj.credential:
ret['credential'] = None
return ret
class ProjectSerializer(UnifiedJobTemplateSerializer, ProjectOptionsSerializer):
status = serializers.ChoiceField(choices=Project.PROJECT_STATUS_CHOICES, read_only=True)
last_update_failed = serializers.BooleanField(read_only=True)
last_updated = serializers.DateTimeField(read_only=True)
show_capabilities = ['start', 'schedule', 'edit', 'delete']
class Meta:
model = Project
fields = ('*', 'organization', 'scm_delete_on_next_update', 'scm_update_on_launch',
'scm_update_cache_timeout', 'scm_revision', 'custom_virtualenv',) + \
('last_update_failed', 'last_updated') # Backwards compatibility
read_only_fields = ('scm_delete_on_next_update',)
def get_related(self, obj):
res = super(ProjectSerializer, self).get_related(obj)
res.update(dict(
teams = self.reverse('api:project_teams_list', kwargs={'pk': obj.pk}),
playbooks = self.reverse('api:project_playbooks', kwargs={'pk': obj.pk}),
inventory_files = self.reverse('api:project_inventories', kwargs={'pk': obj.pk}),
update = self.reverse('api:project_update_view', kwargs={'pk': obj.pk}),
project_updates = self.reverse('api:project_updates_list', kwargs={'pk': obj.pk}),
scm_inventory_sources = self.reverse('api:project_scm_inventory_sources', kwargs={'pk': obj.pk}),
schedules = self.reverse('api:project_schedules_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:project_activity_stream_list', kwargs={'pk': obj.pk}),
notification_templates_any = self.reverse('api:project_notification_templates_any_list', kwargs={'pk': obj.pk}),
notification_templates_success = self.reverse('api:project_notification_templates_success_list', kwargs={'pk': obj.pk}),
notification_templates_error = self.reverse('api:project_notification_templates_error_list', kwargs={'pk': obj.pk}),
access_list = self.reverse('api:project_access_list', kwargs={'pk': obj.pk}),
object_roles = self.reverse('api:project_object_roles_list', kwargs={'pk': obj.pk}),
copy = self.reverse('api:project_copy', kwargs={'pk': obj.pk}),
))
if obj.organization:
res['organization'] = self.reverse('api:organization_detail',
kwargs={'pk': obj.organization.pk})
# Backwards compatibility.
if obj.current_update:
res['current_update'] = self.reverse('api:project_update_detail',
kwargs={'pk': obj.current_update.pk})
if obj.last_update:
res['last_update'] = self.reverse('api:project_update_detail',
kwargs={'pk': obj.last_update.pk})
return res
def to_representation(self, obj):
ret = super(ProjectSerializer, self).to_representation(obj)
if 'scm_revision' in ret and obj.scm_type == '':
ret['scm_revision'] = ''
return ret
def validate(self, attrs):
def get_field_from_model_or_attrs(fd):
return attrs.get(fd, self.instance and getattr(self.instance, fd) or None)
organization = None
if 'organization' in attrs:
organization = attrs['organization']
elif self.instance:
organization = self.instance.organization
view = self.context.get('view', None)
if not organization and not view.request.user.is_superuser:
# Only allow super users to create orgless projects
raise serializers.ValidationError(_('Organization is missing'))
elif get_field_from_model_or_attrs('scm_type') == '':
for fd in ('scm_update_on_launch', 'scm_delete_on_update', 'scm_clean'):
if get_field_from_model_or_attrs(fd):
raise serializers.ValidationError({fd: _('Update options must be set to false for manual projects.')})
return super(ProjectSerializer, self).validate(attrs)
class ProjectPlaybooksSerializer(ProjectSerializer):
playbooks = serializers.SerializerMethodField(help_text=_('Array of playbooks available within this project.'))
class Meta:
model = Project
fields = ('playbooks',)
def get_playbooks(self, obj):
return obj.playbook_files if obj.scm_type else obj.playbooks
@property
def data(self):
ret = super(ProjectPlaybooksSerializer, self).data
ret = ret.get('playbooks', [])
return ReturnList(ret, serializer=self)
class ProjectInventoriesSerializer(ProjectSerializer):
inventory_files = serializers.ReadOnlyField(help_text=_(
'Array of inventory files and directories available within this project, '
'not comprehensive.'))
class Meta:
model = Project
fields = ('inventory_files',)
@property
def data(self):
ret = super(ProjectInventoriesSerializer, self).data
ret = ret.get('inventory_files', [])
return ReturnList(ret, serializer=self)
class ProjectUpdateViewSerializer(ProjectSerializer):
can_update = serializers.BooleanField(read_only=True)
class Meta:
fields = ('can_update',)
class ProjectUpdateSerializer(UnifiedJobSerializer, ProjectOptionsSerializer):
class Meta:
model = ProjectUpdate
fields = ('*', 'project', 'job_type')
def get_related(self, obj):
res = super(ProjectUpdateSerializer, self).get_related(obj)
try:
res.update(dict(
project = self.reverse('api:project_detail', kwargs={'pk': obj.project.pk}),
))
except ObjectDoesNotExist:
pass
res.update(dict(
cancel = self.reverse('api:project_update_cancel', kwargs={'pk': obj.pk}),
scm_inventory_updates = self.reverse('api:project_update_scm_inventory_updates', kwargs={'pk': obj.pk}),
notifications = self.reverse('api:project_update_notifications_list', kwargs={'pk': obj.pk}),
events = self.reverse('api:project_update_events_list', kwargs={'pk': obj.pk}),
))
return res
class ProjectUpdateListSerializer(ProjectUpdateSerializer, UnifiedJobListSerializer):
pass
class ProjectUpdateCancelSerializer(ProjectUpdateSerializer):
can_cancel = serializers.BooleanField(read_only=True)
class Meta:
fields = ('can_cancel',)
class BaseSerializerWithVariables(BaseSerializer):
def validate_variables(self, value):
return vars_validate_or_raise(value)
class InventorySerializer(BaseSerializerWithVariables):
show_capabilities = ['edit', 'delete', 'adhoc']
class Meta:
model = Inventory
fields = ('*', 'organization', 'kind', 'host_filter', 'variables', 'has_active_failures',
'total_hosts', 'hosts_with_active_failures', 'total_groups',
'groups_with_active_failures', 'has_inventory_sources',
'total_inventory_sources', 'inventory_sources_with_failures',
'insights_credential', 'pending_deletion',)
def get_related(self, obj):
res = super(InventorySerializer, self).get_related(obj)
res.update(dict(
hosts = self.reverse('api:inventory_hosts_list', kwargs={'pk': obj.pk}),
groups = self.reverse('api:inventory_groups_list', kwargs={'pk': obj.pk}),
root_groups = self.reverse('api:inventory_root_groups_list', kwargs={'pk': obj.pk}),
variable_data = self.reverse('api:inventory_variable_data', kwargs={'pk': obj.pk}),
script = self.reverse('api:inventory_script_view', kwargs={'pk': obj.pk}),
tree = self.reverse('api:inventory_tree_view', kwargs={'pk': obj.pk}),
inventory_sources = self.reverse('api:inventory_inventory_sources_list', kwargs={'pk': obj.pk}),
update_inventory_sources = self.reverse('api:inventory_inventory_sources_update', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:inventory_activity_stream_list', kwargs={'pk': obj.pk}),
job_templates = self.reverse('api:inventory_job_template_list', kwargs={'pk': obj.pk}),
ad_hoc_commands = self.reverse('api:inventory_ad_hoc_commands_list', kwargs={'pk': obj.pk}),
access_list = self.reverse('api:inventory_access_list', kwargs={'pk': obj.pk}),
object_roles = self.reverse('api:inventory_object_roles_list', kwargs={'pk': obj.pk}),
instance_groups = self.reverse('api:inventory_instance_groups_list', kwargs={'pk': obj.pk}),
copy = self.reverse('api:inventory_copy', kwargs={'pk': obj.pk}),
))
if obj.insights_credential:
res['insights_credential'] = self.reverse('api:credential_detail', kwargs={'pk': obj.insights_credential.pk})
if obj.organization:
res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization.pk})
return res
def to_representation(self, obj):
ret = super(InventorySerializer, self).to_representation(obj)
if obj is not None and 'organization' in ret and not obj.organization:
ret['organization'] = None
return ret
def validate_host_filter(self, host_filter):
if host_filter:
try:
SmartFilter().query_from_string(host_filter)
except RuntimeError as e:
raise models.base.ValidationError(e)
return host_filter
def validate(self, attrs):
kind = None
if 'kind' in attrs:
kind = attrs['kind']
elif self.instance:
kind = self.instance.kind
host_filter = None
if 'host_filter' in attrs:
host_filter = attrs['host_filter']
elif self.instance:
host_filter = self.instance.host_filter
if kind == 'smart' and not host_filter:
raise serializers.ValidationError({'host_filter': _(
'Smart inventories must specify host_filter')})
return super(InventorySerializer, self).validate(attrs)
# TODO: Remove entire serializer in 3.3, replace with normal serializer
class InventoryDetailSerializer(InventorySerializer):
def get_fields(self):
fields = super(InventoryDetailSerializer, self).get_fields()
if self.version == 1:
fields['can_run_ad_hoc_commands'] = serializers.SerializerMethodField()
return fields
def get_can_run_ad_hoc_commands(self, obj):
view = self.context.get('view', None)
return bool(obj and view and view.request and view.request.user and view.request.user.can_access(Inventory, 'run_ad_hoc_commands', obj))
class InventoryScriptSerializer(InventorySerializer):
class Meta:
fields = ()
class HostSerializer(BaseSerializerWithVariables):
show_capabilities = ['edit', 'delete']
class Meta:
model = Host
fields = ('*', 'inventory', 'enabled', 'instance_id', 'variables',
'has_active_failures', 'has_inventory_sources', 'last_job',
'last_job_host_summary', 'insights_system_id', 'ansible_facts_modified',)
read_only_fields = ('last_job', 'last_job_host_summary', 'insights_system_id',
'ansible_facts_modified',)
def build_relational_field(self, field_name, relation_info):
field_class, field_kwargs = super(HostSerializer, self).build_relational_field(field_name, relation_info)
# Inventory is read-only unless creating a new host.
if self.instance and field_name == 'inventory':
field_kwargs['read_only'] = True
field_kwargs.pop('queryset', None)
return field_class, field_kwargs
def get_related(self, obj):
res = super(HostSerializer, self).get_related(obj)
res.update(dict(
variable_data = self.reverse('api:host_variable_data', kwargs={'pk': obj.pk}),
groups = self.reverse('api:host_groups_list', kwargs={'pk': obj.pk}),
all_groups = self.reverse('api:host_all_groups_list', kwargs={'pk': obj.pk}),
job_events = self.reverse('api:host_job_events_list', kwargs={'pk': obj.pk}),
job_host_summaries = self.reverse('api:host_job_host_summaries_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:host_activity_stream_list', kwargs={'pk': obj.pk}),
inventory_sources = self.reverse('api:host_inventory_sources_list', kwargs={'pk': obj.pk}),
smart_inventories = self.reverse('api:host_smart_inventories_list', kwargs={'pk': obj.pk}),
ad_hoc_commands = self.reverse('api:host_ad_hoc_commands_list', kwargs={'pk': obj.pk}),
ad_hoc_command_events = self.reverse('api:host_ad_hoc_command_events_list', kwargs={'pk': obj.pk}),
fact_versions = self.reverse('api:host_fact_versions_list', kwargs={'pk': obj.pk}),
))
if self.version > 1:
res['insights'] = self.reverse('api:host_insights', kwargs={'pk': obj.pk})
if obj.inventory:
res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory.pk})
if obj.last_job:
res['last_job'] = self.reverse('api:job_detail', kwargs={'pk': obj.last_job.pk})
if obj.last_job_host_summary:
res['last_job_host_summary'] = self.reverse('api:job_host_summary_detail', kwargs={'pk': obj.last_job_host_summary.pk})
if self.version > 1:
res.update(dict(
ansible_facts = self.reverse('api:host_ansible_facts_detail', kwargs={'pk': obj.pk}),
))
return res
def get_summary_fields(self, obj):
d = super(HostSerializer, self).get_summary_fields(obj)
try:
d['last_job']['job_template_id'] = obj.last_job.job_template.id
d['last_job']['job_template_name'] = obj.last_job.job_template.name
except (KeyError, AttributeError):
pass
if has_model_field_prefetched(obj, 'groups'):
group_list = sorted([{'id': g.id, 'name': g.name} for g in obj.groups.all()], key=lambda x: x['id'])[:5]
else:
group_list = [{'id': g.id, 'name': g.name} for g in obj.groups.all().order_by('id')[:5]]
group_cnt = obj.groups.count()
d.setdefault('groups', {'count': group_cnt, 'results': group_list})
d.setdefault('recent_jobs', [{
'id': j.job.id,
'name': j.job.job_template.name if j.job.job_template is not None else "",
'status': j.job.status,
'finished': j.job.finished,
} for j in obj.job_host_summaries.select_related('job__job_template').order_by('-created')[:5]])
return d
def _get_host_port_from_name(self, name):
# Allow hostname (except IPv6 for now) to specify the port # inline.
port = None
if name.count(':') == 1:
name, port = name.split(':')
try:
port = int(port)
if port < 1 or port > 65535:
raise ValueError
except ValueError:
raise serializers.ValidationError(_(u'Invalid port specification: %s') % force_text(port))
return name, port
def validate_name(self, value):
name = force_text(value or '')
# Validate here only, update in main validate method.
host, port = self._get_host_port_from_name(name)
return value
def validate_inventory(self, value):
if value.kind == 'smart':
raise serializers.ValidationError({"detail": _("Cannot create Host for Smart Inventory")})
return value
def validate_variables(self, value):
return vars_validate_or_raise(value)
def validate(self, attrs):
name = force_text(attrs.get('name', self.instance and self.instance.name or ''))
host, port = self._get_host_port_from_name(name)
if port:
attrs['name'] = host
variables = force_text(attrs.get('variables', self.instance and self.instance.variables or ''))
vars_dict = parse_yaml_or_json(variables)
vars_dict['ansible_ssh_port'] = port
attrs['variables'] = json.dumps(vars_dict)
return super(HostSerializer, self).validate(attrs)
def to_representation(self, obj):
ret = super(HostSerializer, self).to_representation(obj)
if not obj:
return ret
if 'inventory' in ret and not obj.inventory:
ret['inventory'] = None
if 'last_job' in ret and not obj.last_job:
ret['last_job'] = None
if 'last_job_host_summary' in ret and not obj.last_job_host_summary:
ret['last_job_host_summary'] = None
return ret
class AnsibleFactsSerializer(BaseSerializer):
class Meta:
model = Host
def to_representation(self, obj):
return obj.ansible_facts
class GroupSerializer(BaseSerializerWithVariables):
class Meta:
model = Group
fields = ('*', 'inventory', 'variables', 'has_active_failures',
'total_hosts', 'hosts_with_active_failures', 'total_groups',
'groups_with_active_failures', 'has_inventory_sources')
@property
def show_capabilities(self): # TODO: consolidate in 3.3
if self.version == 1:
return ['copy', 'edit', 'start', 'schedule', 'delete']
else:
return ['copy', 'edit', 'delete']
def build_relational_field(self, field_name, relation_info):
field_class, field_kwargs = super(GroupSerializer, self).build_relational_field(field_name, relation_info)
# Inventory is read-only unless creating a new group.
if self.instance and field_name == 'inventory':
field_kwargs['read_only'] = True
field_kwargs.pop('queryset', None)
return field_class, field_kwargs
def get_summary_fields(self, obj): # TODO: remove in 3.3
summary_fields = super(GroupSerializer, self).get_summary_fields(obj)
if self.version == 1:
try:
inv_src = obj.deprecated_inventory_source
summary_fields['inventory_source'] = {}
for field in SUMMARIZABLE_FK_FIELDS['inventory_source']:
fval = getattr(inv_src, field, None)
if fval is not None:
summary_fields['inventory_source'][field] = fval
except Group.deprecated_inventory_source.RelatedObjectDoesNotExist:
pass
return summary_fields
def get_related(self, obj):
res = super(GroupSerializer, self).get_related(obj)
res.update(dict(
variable_data = self.reverse('api:group_variable_data', kwargs={'pk': obj.pk}),
hosts = self.reverse('api:group_hosts_list', kwargs={'pk': obj.pk}),
potential_children = self.reverse('api:group_potential_children_list', kwargs={'pk': obj.pk}),
children = self.reverse('api:group_children_list', kwargs={'pk': obj.pk}),
all_hosts = self.reverse('api:group_all_hosts_list', kwargs={'pk': obj.pk}),
job_events = self.reverse('api:group_job_events_list', kwargs={'pk': obj.pk}),
job_host_summaries = self.reverse('api:group_job_host_summaries_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:group_activity_stream_list', kwargs={'pk': obj.pk}),
inventory_sources = self.reverse('api:group_inventory_sources_list', kwargs={'pk': obj.pk}),
ad_hoc_commands = self.reverse('api:group_ad_hoc_commands_list', kwargs={'pk': obj.pk}),
))
if self.version == 1: # TODO: remove in 3.3
try:
res['inventory_source'] = self.reverse('api:inventory_source_detail',
kwargs={'pk': obj.deprecated_inventory_source.pk})
except Group.deprecated_inventory_source.RelatedObjectDoesNotExist:
pass
if obj.inventory:
res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory.pk})
return res
def create(self, validated_data): # TODO: remove in 3.3
instance = super(GroupSerializer, self).create(validated_data)
if self.version == 1: # TODO: remove in 3.3
manual_src = InventorySource(deprecated_group=instance, inventory=instance.inventory)
manual_src.v1_group_name = instance.name
manual_src.save()
return instance
def validate_name(self, value):
if value in ('all', '_meta'):
raise serializers.ValidationError(_('Invalid group name.'))
return value
def validate_inventory(self, value):
if value.kind == 'smart':
raise serializers.ValidationError({"detail": _("Cannot create Group for Smart Inventory")})
return value
def to_representation(self, obj):
ret = super(GroupSerializer, self).to_representation(obj)
if obj is not None and 'inventory' in ret and not obj.inventory:
ret['inventory'] = None
return ret
class GroupTreeSerializer(GroupSerializer):
children = serializers.SerializerMethodField()
class Meta:
model = Group
fields = ('*', 'children')
def get_children(self, obj):
if obj is None:
return {}
children_qs = obj.children
children_qs = children_qs.select_related('inventory')
children_qs = children_qs.prefetch_related('inventory_source')
return GroupTreeSerializer(children_qs, many=True).data
class BaseVariableDataSerializer(BaseSerializer):
class Meta:
fields = ('variables',)
def to_representation(self, obj):
if obj is None:
return {}
ret = super(BaseVariableDataSerializer, self).to_representation(obj)
return parse_yaml_or_json(ret.get('variables', '') or '{}')
def to_internal_value(self, data):
data = {'variables': json.dumps(data)}
return super(BaseVariableDataSerializer, self).to_internal_value(data)
class InventoryVariableDataSerializer(BaseVariableDataSerializer):
class Meta:
model = Inventory
class HostVariableDataSerializer(BaseVariableDataSerializer):
class Meta:
model = Host
class GroupVariableDataSerializer(BaseVariableDataSerializer):
class Meta:
model = Group
class CustomInventoryScriptSerializer(BaseSerializer):
script = serializers.CharField(trim_whitespace=False)
show_capabilities = ['edit', 'delete']
class Meta:
model = CustomInventoryScript
fields = ('*', "script", "organization")
def validate_script(self, value):
if not value.startswith("#!"):
raise serializers.ValidationError(_('Script must begin with a hashbang sequence: i.e.... #!/usr/bin/env python'))
return value
def to_representation(self, obj):
ret = super(CustomInventoryScriptSerializer, self).to_representation(obj)
if obj is None:
return ret
request = self.context.get('request', None)
if request.user not in obj.admin_role and \
not request.user.is_superuser and \
not request.user.is_system_auditor and \
not (obj.organization is not None and request.user in obj.organization.auditor_role):
ret['script'] = None
return ret
def get_related(self, obj):
res = super(CustomInventoryScriptSerializer, self).get_related(obj)
res.update(dict(
object_roles = self.reverse('api:inventory_script_object_roles_list', kwargs={'pk': obj.pk}),
copy = self.reverse('api:inventory_script_copy', kwargs={'pk': obj.pk}),
))
if obj.organization:
res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization.pk})
return res
class InventorySourceOptionsSerializer(BaseSerializer):
credential = models.PositiveIntegerField(
blank=True, null=True, default=None,
help_text='This resource has been deprecated and will be removed in a future release')
class Meta:
fields = ('*', 'source', 'source_path', 'source_script', 'source_vars', 'credential',
'source_regions', 'instance_filters', 'group_by', 'overwrite', 'overwrite_vars',
'timeout', 'verbosity')
def get_related(self, obj):
res = super(InventorySourceOptionsSerializer, self).get_related(obj)
if obj.credential: # TODO: remove when 'credential' field is removed
res['credential'] = self.reverse('api:credential_detail',
kwargs={'pk': obj.credential})
if obj.source_script:
res['source_script'] = self.reverse('api:inventory_script_detail', kwargs={'pk': obj.source_script.pk})
return res
def validate_source_vars(self, value):
ret = vars_validate_or_raise(value)
for env_k in parse_yaml_or_json(value):
if env_k in settings.INV_ENV_VARIABLE_BLACKLIST:
raise serializers.ValidationError(_("`{}` is a prohibited environment variable".format(env_k)))
return ret
def validate(self, attrs):
# TODO: Validate source, validate source_regions
errors = {}
source = attrs.get('source', self.instance and self.instance.source or '')
source_script = attrs.get('source_script', self.instance and self.instance.source_script or '')
if source == 'custom':
if source_script is None or source_script == '':
errors['source_script'] = _("If 'source' is 'custom', 'source_script' must be provided.")
else:
try:
if not self.instance:
dest_inventory = attrs.get('inventory', None)
if not dest_inventory:
errors['inventory'] = _("Must provide an inventory.")
else:
dest_inventory = self.instance.inventory
if dest_inventory and source_script.organization != dest_inventory.organization:
errors['source_script'] = _("The 'source_script' does not belong to the same organization as the inventory.")
except Exception:
errors['source_script'] = _("'source_script' doesn't exist.")
logger.exception('Problem processing source_script validation.')
if errors:
raise serializers.ValidationError(errors)
return super(InventorySourceOptionsSerializer, self).validate(attrs)
# TODO: remove when old 'credential' fields are removed
def get_summary_fields(self, obj):
summary_fields = super(InventorySourceOptionsSerializer, self).get_summary_fields(obj)
if 'credential' in summary_fields:
cred = obj.get_cloud_credential()
if cred:
summary_fields['credential'] = {
'id': cred.id, 'name': cred.name, 'description': cred.description,
'kind': cred.kind, 'cloud': True, 'credential_type_id': cred.credential_type_id
}
else:
summary_fields.pop('credential')
return summary_fields
class InventorySourceSerializer(UnifiedJobTemplateSerializer, InventorySourceOptionsSerializer):
status = serializers.ChoiceField(choices=InventorySource.INVENTORY_SOURCE_STATUS_CHOICES, read_only=True)
last_update_failed = serializers.BooleanField(read_only=True)
last_updated = serializers.DateTimeField(read_only=True)
show_capabilities = ['start', 'schedule', 'edit', 'delete']
group = serializers.SerializerMethodField(
help_text=_('Automatic group relationship, will be removed in 3.3'))
class Meta:
model = InventorySource
fields = ('*', 'name', 'inventory', 'update_on_launch', 'update_cache_timeout',
'source_project', 'update_on_project_update') + \
('last_update_failed', 'last_updated', 'group') # Backwards compatibility.
def get_related(self, obj):
res = super(InventorySourceSerializer, self).get_related(obj)
res.update(dict(
update = self.reverse('api:inventory_source_update_view', kwargs={'pk': obj.pk}),
inventory_updates = self.reverse('api:inventory_source_updates_list', kwargs={'pk': obj.pk}),
schedules = self.reverse('api:inventory_source_schedules_list', kwargs={'pk': obj.pk}),
credentials = self.reverse('api:inventory_source_credentials_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:inventory_source_activity_stream_list', kwargs={'pk': obj.pk}),
hosts = self.reverse('api:inventory_source_hosts_list', kwargs={'pk': obj.pk}),
groups = self.reverse('api:inventory_source_groups_list', kwargs={'pk': obj.pk}),
notification_templates_any = self.reverse('api:inventory_source_notification_templates_any_list', kwargs={'pk': obj.pk}),
notification_templates_success = self.reverse('api:inventory_source_notification_templates_success_list', kwargs={'pk': obj.pk}),
notification_templates_error = self.reverse('api:inventory_source_notification_templates_error_list', kwargs={'pk': obj.pk}),
))
if obj.inventory:
res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory.pk})
if obj.source_project_id is not None:
res['source_project'] = self.reverse('api:project_detail', kwargs={'pk': obj.source_project.pk})
# Backwards compatibility.
if obj.current_update:
res['current_update'] = self.reverse('api:inventory_update_detail',
kwargs={'pk': obj.current_update.pk})
if obj.last_update:
res['last_update'] = self.reverse('api:inventory_update_detail',
kwargs={'pk': obj.last_update.pk})
if self.version == 1: # TODO: remove in 3.3
if obj.deprecated_group:
res['group'] = self.reverse('api:group_detail', kwargs={'pk': obj.deprecated_group.pk})
return res
def get_fields(self): # TODO: remove in 3.3
fields = super(InventorySourceSerializer, self).get_fields()
if self.version > 1:
fields.pop('group', None)
return fields
def get_summary_fields(self, obj): # TODO: remove in 3.3
summary_fields = super(InventorySourceSerializer, self).get_summary_fields(obj)
if self.version == 1 and obj.deprecated_group_id:
g = obj.deprecated_group
summary_fields['group'] = {}
for field in SUMMARIZABLE_FK_FIELDS['group']:
fval = getattr(g, field, None)
if fval is not None:
summary_fields['group'][field] = fval
return summary_fields
def get_group(self, obj): # TODO: remove in 3.3
if obj.deprecated_group:
return obj.deprecated_group.id
return None
def build_relational_field(self, field_name, relation_info):
field_class, field_kwargs = super(InventorySourceSerializer, self).build_relational_field(field_name, relation_info)
# SCM Project and inventory are read-only unless creating a new inventory.
if self.instance and field_name == 'inventory':
field_kwargs['read_only'] = True
field_kwargs.pop('queryset', None)
return field_class, field_kwargs
# TODO: remove when old 'credential' fields are removed
def build_field(self, field_name, info, model_class, nested_depth):
# have to special-case the field so that DRF will not automagically make it
# read-only because it's a property on the model.
if field_name == 'credential':
return self.build_standard_field(field_name, self.credential)
return super(InventorySourceOptionsSerializer, self).build_field(field_name, info, model_class, nested_depth)
def to_representation(self, obj):
ret = super(InventorySourceSerializer, self).to_representation(obj)
if obj is None:
return ret
if 'inventory' in ret and not obj.inventory:
ret['inventory'] = None
return ret
def validate_source_project(self, value):
if value and value.scm_type == '':
raise serializers.ValidationError(_("Cannot use manual project for SCM-based inventory."))
return value
def validate_source(self, value):
if value == '':
raise serializers.ValidationError(_(
"Manual inventory sources are created automatically when a group is created in the v1 API."))
return value
def validate_update_on_project_update(self, value):
if value and self.instance and self.instance.schedules.exists():
raise serializers.ValidationError(_("Setting not compatible with existing schedules."))
return value
def validate_inventory(self, value):
if value and value.kind == 'smart':
raise serializers.ValidationError({"detail": _("Cannot create Inventory Source for Smart Inventory")})
return value
# TODO: remove when old 'credential' fields are removed
def create(self, validated_data):
deprecated_fields = {}
if 'credential' in validated_data:
deprecated_fields['credential'] = validated_data.pop('credential')
obj = super(InventorySourceSerializer, self).create(validated_data)
if deprecated_fields:
self._update_deprecated_fields(deprecated_fields, obj)
return obj
# TODO: remove when old 'credential' fields are removed
def update(self, obj, validated_data):
deprecated_fields = {}
if 'credential' in validated_data:
deprecated_fields['credential'] = validated_data.pop('credential')
obj = super(InventorySourceSerializer, self).update(obj, validated_data)
if deprecated_fields:
self._update_deprecated_fields(deprecated_fields, obj)
return obj
# TODO: remove when old 'credential' fields are removed
def _update_deprecated_fields(self, fields, obj):
if 'credential' in fields:
new_cred = fields['credential']
existing_creds = obj.credentials.exclude(credential_type__kind='vault')
for cred in existing_creds:
# Remove all other cloud credentials
if cred != new_cred:
obj.credentials.remove(cred)
if new_cred:
# Add new credential
obj.credentials.add(new_cred)
def validate(self, attrs):
deprecated_fields = {}
if 'credential' in attrs: # TODO: remove when 'credential' field removed
deprecated_fields['credential'] = attrs.pop('credential')
def get_field_from_model_or_attrs(fd):
return attrs.get(fd, self.instance and getattr(self.instance, fd) or None)
if get_field_from_model_or_attrs('source') != 'scm':
redundant_scm_fields = filter(
lambda x: attrs.get(x, None),
['source_project', 'source_path', 'update_on_project_update']
)
if redundant_scm_fields:
raise serializers.ValidationError(
{"detail": _("Cannot set %s if not SCM type." % ' '.join(redundant_scm_fields))}
)
attrs = super(InventorySourceSerializer, self).validate(attrs)
# Check type consistency of source and cloud credential, if provided
if 'credential' in deprecated_fields: # TODO: remove when v2 API is deprecated
cred = deprecated_fields['credential']
attrs['credential'] = cred
if cred is not None:
cred = Credential.objects.get(pk=cred)
view = self.context.get('view', None)
if (not view) or (not view.request) or (view.request.user not in cred.use_role):
raise PermissionDenied()
cred_error = InventorySource.cloud_credential_validation(
get_field_from_model_or_attrs('source'),
cred
)
if cred_error:
raise serializers.ValidationError({"detail": cred_error})
return attrs
class InventorySourceUpdateSerializer(InventorySourceSerializer):
can_update = serializers.BooleanField(read_only=True)
class Meta:
fields = ('can_update',)
class InventoryUpdateSerializer(UnifiedJobSerializer, InventorySourceOptionsSerializer):
class Meta:
model = InventoryUpdate
fields = ('*', 'inventory_source', 'license_error', 'source_project_update')
def get_related(self, obj):
res = super(InventoryUpdateSerializer, self).get_related(obj)
try:
res.update(dict(
inventory_source = self.reverse(
'api:inventory_source_detail', kwargs={'pk': obj.inventory_source.pk}
),
))
except ObjectDoesNotExist:
pass
res.update(dict(
cancel = self.reverse('api:inventory_update_cancel', kwargs={'pk': obj.pk}),
notifications = self.reverse('api:inventory_update_notifications_list', kwargs={'pk': obj.pk}),
credentials = self.reverse('api:inventory_update_credentials_list', kwargs={'pk': obj.pk}),
events = self.reverse('api:inventory_update_events_list', kwargs={'pk': obj.pk}),
))
if obj.source_project_update_id:
res['source_project_update'] = self.reverse('api:project_update_detail',
kwargs={'pk': obj.source_project_update.pk})
return res
class InventoryUpdateListSerializer(InventoryUpdateSerializer, UnifiedJobListSerializer):
pass
class InventoryUpdateCancelSerializer(InventoryUpdateSerializer):
can_cancel = serializers.BooleanField(read_only=True)
class Meta:
fields = ('can_cancel',)
class TeamSerializer(BaseSerializer):
show_capabilities = ['edit', 'delete']
class Meta:
model = Team
fields = ('*', 'organization')
def get_related(self, obj):
res = super(TeamSerializer, self).get_related(obj)
res.update(dict(
projects = self.reverse('api:team_projects_list', kwargs={'pk': obj.pk}),
users = self.reverse('api:team_users_list', kwargs={'pk': obj.pk}),
credentials = self.reverse('api:team_credentials_list', kwargs={'pk': obj.pk}),
roles = self.reverse('api:team_roles_list', kwargs={'pk': obj.pk}),
object_roles = self.reverse('api:team_object_roles_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:team_activity_stream_list', kwargs={'pk': obj.pk}),
access_list = self.reverse('api:team_access_list', kwargs={'pk': obj.pk}),
))
if obj.organization:
res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization.pk})
return res
def to_representation(self, obj):
ret = super(TeamSerializer, self).to_representation(obj)
if obj is not None and 'organization' in ret and not obj.organization:
ret['organization'] = None
return ret
class RoleSerializer(BaseSerializer):
class Meta:
model = Role
read_only_fields = ('id', 'role_field', 'description', 'name')
def to_representation(self, obj):
ret = super(RoleSerializer, self).to_representation(obj)
if obj.object_id:
content_object = obj.content_object
if hasattr(content_object, 'username'):
ret['summary_fields']['resource_name'] = obj.content_object.username
if hasattr(content_object, 'name'):
ret['summary_fields']['resource_name'] = obj.content_object.name
content_model = obj.content_type.model_class()
ret['summary_fields']['resource_type'] = get_type_for_model(content_model)
ret['summary_fields']['resource_type_display_name'] = content_model._meta.verbose_name.title()
ret.pop('created')
ret.pop('modified')
return ret
def get_related(self, obj):
ret = super(RoleSerializer, self).get_related(obj)
ret['users'] = self.reverse('api:role_users_list', kwargs={'pk': obj.pk})
ret['teams'] = self.reverse('api:role_teams_list', kwargs={'pk': obj.pk})
try:
if obj.content_object:
ret.update(reverse_gfk(obj.content_object, self.context.get('request')))
except AttributeError:
# AttributeError's happen if our content_object is pointing at
# a model that no longer exists. This is dirty data and ideally
# doesn't exist, but in case it does, let's not puke.
pass
return ret
class RoleSerializerWithParentAccess(RoleSerializer):
show_capabilities = ['unattach']
class ResourceAccessListElementSerializer(UserSerializer):
show_capabilities = [] # Clear fields from UserSerializer parent class
def to_representation(self, user):
'''
With this method we derive "direct" and "indirect" access lists. Contained
in the direct access list are all the roles the user is a member of, and
all of the roles that are directly granted to any teams that the user is a
member of.
The indirect access list is a list of all of the roles that the user is
a member of that are ancestors of any roles that grant permissions to
the resource.
'''
ret = super(ResourceAccessListElementSerializer, self).to_representation(user)
obj = self.context['view'].get_parent_object()
if self.context['view'].request is not None:
requesting_user = self.context['view'].request.user
else:
requesting_user = None
if 'summary_fields' not in ret:
ret['summary_fields'] = {}
def format_role_perm(role):
role_dict = { 'id': role.id, 'name': role.name, 'description': role.description}
try:
role_dict['resource_name'] = role.content_object.name
role_dict['resource_type'] = get_type_for_model(role.content_type.model_class())
role_dict['related'] = reverse_gfk(role.content_object, self.context.get('request'))
except AttributeError:
pass
if role.content_type is not None:
role_dict['user_capabilities'] = {'unattach': requesting_user.can_access(
Role, 'unattach', role, user, 'members', data={}, skip_sub_obj_read_check=False)}
else:
# Singleton roles should not be managed from this view, as per copy/edit rework spec
role_dict['user_capabilities'] = {'unattach': False}
return { 'role': role_dict, 'descendant_roles': get_roles_on_resource(obj, role)}
def format_team_role_perm(naive_team_role, permissive_role_ids):
ret = []
team_role = naive_team_role
if naive_team_role.role_field == 'admin_role':
team_role = naive_team_role.content_object.member_role
for role in team_role.children.filter(id__in=permissive_role_ids).all():
role_dict = {
'id': role.id,
'name': role.name,
'description': role.description,
'team_id': team_role.object_id,
'team_name': team_role.content_object.name,
'team_organization_name': team_role.content_object.organization.name,
}
if role.content_type is not None:
role_dict['resource_name'] = role.content_object.name
role_dict['resource_type'] = get_type_for_model(role.content_type.model_class())
role_dict['related'] = reverse_gfk(role.content_object, self.context.get('request'))
role_dict['user_capabilities'] = {'unattach': requesting_user.can_access(
Role, 'unattach', role, team_role, 'parents', data={}, skip_sub_obj_read_check=False)}
else:
# Singleton roles should not be managed from this view, as per copy/edit rework spec
role_dict['user_capabilities'] = {'unattach': False}
ret.append({ 'role': role_dict, 'descendant_roles': get_roles_on_resource(obj, team_role)})
return ret
team_content_type = ContentType.objects.get_for_model(Team)
content_type = ContentType.objects.get_for_model(obj)
direct_permissive_role_ids = Role.objects.filter(content_type=content_type, object_id=obj.id).values_list('id', flat=True)
all_permissive_role_ids = Role.objects.filter(content_type=content_type, object_id=obj.id).values_list('ancestors__id', flat=True)
direct_access_roles = user.roles \
.filter(id__in=direct_permissive_role_ids).all()
direct_team_roles = Role.objects \
.filter(content_type=team_content_type,
members=user,
children__in=direct_permissive_role_ids)
if content_type == team_content_type:
# When looking at the access list for a team, exclude the entries
# for that team. This exists primarily so we don't list the read role
# as a direct role when a user is a member or admin of a team
direct_team_roles = direct_team_roles.exclude(
children__content_type=team_content_type,
children__object_id=obj.id
)
indirect_team_roles = Role.objects \
.filter(content_type=team_content_type,
members=user,
children__in=all_permissive_role_ids) \
.exclude(id__in=direct_team_roles)
indirect_access_roles = user.roles \
.filter(id__in=all_permissive_role_ids) \
.exclude(id__in=direct_permissive_role_ids) \
.exclude(id__in=direct_team_roles) \
.exclude(id__in=indirect_team_roles)
ret['summary_fields']['direct_access'] \
= [format_role_perm(r) for r in direct_access_roles.distinct()] \
+ [y for x in (format_team_role_perm(r, direct_permissive_role_ids) for r in direct_team_roles.distinct()) for y in x] \
+ [y for x in (format_team_role_perm(r, all_permissive_role_ids) for r in indirect_team_roles.distinct()) for y in x]
ret['summary_fields']['indirect_access'] \
= [format_role_perm(r) for r in indirect_access_roles.distinct()]
return ret
class CredentialTypeSerializer(BaseSerializer):
show_capabilities = ['edit', 'delete']
managed_by_tower = serializers.ReadOnlyField()
class Meta:
model = CredentialType
fields = ('*', 'kind', 'name', 'managed_by_tower', 'inputs',
'injectors')
def validate(self, attrs):
if self.instance and self.instance.managed_by_tower:
raise PermissionDenied(
detail=_("Modifications not allowed for managed credential types")
)
if self.instance and self.instance.credentials.exists():
if 'inputs' in attrs and attrs['inputs'] != self.instance.inputs:
raise PermissionDenied(
detail= _("Modifications to inputs are not allowed for credential types that are in use")
)
ret = super(CredentialTypeSerializer, self).validate(attrs)
if 'kind' in attrs and attrs['kind'] not in ('cloud', 'net'):
raise serializers.ValidationError({
"kind": _("Must be 'cloud' or 'net', not %s") % attrs['kind']
})
fields = attrs.get('inputs', {}).get('fields', [])
for field in fields:
if field.get('ask_at_runtime', False):
raise serializers.ValidationError({"inputs": _("'ask_at_runtime' is not supported for custom credentials.")})
return ret
def get_related(self, obj):
res = super(CredentialTypeSerializer, self).get_related(obj)
res['credentials'] = self.reverse(
'api:credential_type_credential_list',
kwargs={'pk': obj.pk}
)
res['activity_stream'] = self.reverse(
'api:credential_type_activity_stream_list',
kwargs={'pk': obj.pk}
)
return res
def to_representation(self, data):
value = super(CredentialTypeSerializer, self).to_representation(data)
# translate labels and help_text for credential fields "managed by Tower"
if value.get('managed_by_tower'):
for field in value.get('inputs', {}).get('fields', []):
field['label'] = _(field['label'])
if 'help_text' in field:
field['help_text'] = _(field['help_text'])
return value
def filter_field_metadata(self, fields, method):
# API-created/modified CredentialType kinds are limited to
# `cloud` and `net`
if method in ('PUT', 'POST'):
fields['kind']['choices'] = filter(
lambda choice: choice[0] in ('cloud', 'net'),
fields['kind']['choices']
)
return fields
# TODO: remove when API v1 is removed
@six.add_metaclass(BaseSerializerMetaclass)
class V1CredentialFields(BaseSerializer):
class Meta:
model = Credential
fields = ('*', 'kind', 'cloud', 'host', 'username',
'password', 'security_token', 'project', 'domain',
'ssh_key_data', 'ssh_key_unlock', 'become_method',
'become_username', 'become_password', 'vault_password',
'subscription', 'tenant', 'secret', 'client', 'authorize',
'authorize_password')
def build_field(self, field_name, info, model_class, nested_depth):
if field_name in V1Credential.FIELDS:
return self.build_standard_field(field_name,
V1Credential.FIELDS[field_name])
return super(V1CredentialFields, self).build_field(field_name, info, model_class, nested_depth)
@six.add_metaclass(BaseSerializerMetaclass)
class V2CredentialFields(BaseSerializer):
class Meta:
model = Credential
fields = ('*', 'credential_type', 'inputs')
class CredentialSerializer(BaseSerializer):
show_capabilities = ['edit', 'delete']
class Meta:
model = Credential
fields = ('*', 'organization')
def get_fields(self):
fields = super(CredentialSerializer, self).get_fields()
# TODO: remove when API v1 is removed
if self.version == 1:
fields.update(V1CredentialFields().get_fields())
else:
fields.update(V2CredentialFields().get_fields())
return fields
def to_representation(self, data):
value = super(CredentialSerializer, self).to_representation(data)
# TODO: remove when API v1 is removed
if self.version == 1:
if value.get('kind') == 'vault':
value['kind'] = 'ssh'
for field in V1Credential.PASSWORD_FIELDS:
if field in value and force_text(value[field]).startswith('$encrypted$'):
value[field] = '$encrypted$'
if 'inputs' in value:
value['inputs'] = data.display_inputs()
return value
def get_related(self, obj):
res = super(CredentialSerializer, self).get_related(obj)
if obj.organization:
res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization.pk})
res.update(dict(
activity_stream = self.reverse('api:credential_activity_stream_list', kwargs={'pk': obj.pk}),
access_list = self.reverse('api:credential_access_list', kwargs={'pk': obj.pk}),
object_roles = self.reverse('api:credential_object_roles_list', kwargs={'pk': obj.pk}),
owner_users = self.reverse('api:credential_owner_users_list', kwargs={'pk': obj.pk}),
owner_teams = self.reverse('api:credential_owner_teams_list', kwargs={'pk': obj.pk}),
copy = self.reverse('api:credential_copy', kwargs={'pk': obj.pk}),
))
# TODO: remove when API v1 is removed
if self.version > 1:
res.update(dict(
credential_type = self.reverse('api:credential_type_detail', kwargs={'pk': obj.credential_type.pk}),
))
parents = [role for role in obj.admin_role.parents.all() if role.object_id is not None]
if parents:
res.update({parents[0].content_type.name:parents[0].content_object.get_absolute_url(self.context.get('request'))})
elif len(obj.admin_role.members.all()) > 0:
user = obj.admin_role.members.all()[0]
res.update({'user': self.reverse('api:user_detail', kwargs={'pk': user.pk})})
return res
def get_summary_fields(self, obj):
summary_dict = super(CredentialSerializer, self).get_summary_fields(obj)
summary_dict['owners'] = []
for user in obj.admin_role.members.all():
summary_dict['owners'].append({
'id': user.pk,
'type': 'user',
'name': user.username,
'description': ' '.join([user.first_name, user.last_name]),
'url': self.reverse('api:user_detail', kwargs={'pk': user.pk}),
})
for parent in [role for role in obj.admin_role.parents.all() if role.object_id is not None]:
summary_dict['owners'].append({
'id': parent.content_object.pk,
'type': camelcase_to_underscore(parent.content_object.__class__.__name__),
'name': parent.content_object.name,
'description': parent.content_object.description,
'url': parent.content_object.get_absolute_url(self.context.get('request')),
})
return summary_dict
def get_validation_exclusions(self, obj=None):
# CredentialType is now part of validation; legacy v1 fields (e.g.,
# 'username', 'password') in JSON POST payloads use the
# CredentialType's inputs definition to determine their validity
ret = super(CredentialSerializer, self).get_validation_exclusions(obj)
for field in ('credential_type', 'inputs'):
if field in ret:
ret.remove(field)
return ret
def to_internal_value(self, data):
# TODO: remove when API v1 is removed
if 'credential_type' not in data and self.version == 1:
# If `credential_type` is not provided, assume the payload is a
# v1 credential payload that specifies a `kind` and a flat list
# of field values
#
# In this scenario, we should automatically detect the proper
# CredentialType based on the provided values
kind = data.get('kind', 'ssh')
credential_type = CredentialType.from_v1_kind(kind, data)
if credential_type is None:
raise serializers.ValidationError({"kind": _('"%s" is not a valid choice' % kind)})
data['credential_type'] = credential_type.pk
value = OrderedDict(
{'credential_type': credential_type}.items() +
super(CredentialSerializer, self).to_internal_value(data).items()
)
# Make a set of the keys in the POST/PUT payload
# - Subtract real fields (name, organization, inputs)
# - Subtract virtual v1 fields defined on the determined credential
# type (username, password, etc...)
# - Any leftovers are invalid for the determined credential type
valid_fields = set(super(CredentialSerializer, self).get_fields().keys())
valid_fields.update(V2CredentialFields().get_fields().keys())
valid_fields.update(['kind', 'cloud'])
for field in set(data.keys()) - valid_fields - set(credential_type.defined_fields):
if data.get(field):
raise serializers.ValidationError(
{"detail": _("'%s' is not a valid field for %s") % (field, credential_type.name)}
)
value.pop('kind', None)
return value
return super(CredentialSerializer, self).to_internal_value(data)
def validate_credential_type(self, credential_type):
if self.instance and credential_type.pk != self.instance.credential_type.pk:
for rel in (
'ad_hoc_commands',
'insights_inventories',
'unifiedjobs',
'unifiedjobtemplates',
'projects',
'projectupdates',
'workflowjobnodes'
):
if getattr(self.instance, rel).count() > 0:
raise ValidationError(
_('You cannot change the credential type of the credential, as it may break the functionality'
' of the resources using it.'),
)
return credential_type
class CredentialSerializerCreate(CredentialSerializer):
user = serializers.PrimaryKeyRelatedField(
queryset=User.objects.all(),
required=False, default=None, write_only=True, allow_null=True,
help_text=_('Write-only field used to add user to owner role. If provided, '
'do not give either team or organization. Only valid for creation.'))
team = serializers.PrimaryKeyRelatedField(
queryset=Team.objects.all(),
required=False, default=None, write_only=True, allow_null=True,
help_text=_('Write-only field used to add team to owner role. If provided, '
'do not give either user or organization. Only valid for creation.'))
organization = serializers.PrimaryKeyRelatedField(
queryset=Organization.objects.all(),
required=False, default=None, allow_null=True,
help_text=_('Inherit permissions from organization roles. If provided on creation, '
'do not give either user or team.'))
class Meta:
model = Credential
fields = ('*', 'user', 'team')
def validate(self, attrs):
owner_fields = set()
for field in ('user', 'team', 'organization'):
if field in attrs:
if attrs[field]:
owner_fields.add(field)
else:
attrs.pop(field)
if not owner_fields:
raise serializers.ValidationError({"detail": _("Missing 'user', 'team', or 'organization'.")})
if attrs.get('team'):
attrs['organization'] = attrs['team'].organization
try:
return super(CredentialSerializerCreate, self).validate(attrs)
except ValidationError as e:
# TODO: remove when API v1 is removed
# If we have an `inputs` error on `/api/v1/`:
# {'inputs': {'username': [...]}}
# ...instead, send back:
# {'username': [...]}
if self.version == 1 and isinstance(e.detail.get('inputs'), dict):
e.detail = e.detail['inputs']
raise e
else:
raise
def create(self, validated_data):
user = validated_data.pop('user', None)
team = validated_data.pop('team', None)
# If our payload contains v1 credential fields, translate to the new
# model
# TODO: remove when API v1 is removed
if self.version == 1:
for attr in (
set(V1Credential.FIELDS) & set(validated_data.keys()) # set intersection
):
validated_data.setdefault('inputs', {})
value = validated_data.pop(attr)
if value:
validated_data['inputs'][attr] = value
credential = super(CredentialSerializerCreate, self).create(validated_data)
if user:
credential.admin_role.members.add(user)
if team:
if not credential.organization or team.organization.id != credential.organization.id:
raise serializers.ValidationError({"detail": _("Credential organization must be set and match before assigning to a team")})
credential.admin_role.parents.add(team.admin_role)
credential.use_role.parents.add(team.member_role)
return credential
class UserCredentialSerializerCreate(CredentialSerializerCreate):
class Meta:
model = Credential
fields = ('*', '-team', '-organization')
class TeamCredentialSerializerCreate(CredentialSerializerCreate):
class Meta:
model = Credential
fields = ('*', '-user', '-organization')
class OrganizationCredentialSerializerCreate(CredentialSerializerCreate):
class Meta:
model = Credential
fields = ('*', '-user', '-team')
class LabelsListMixin(object):
def _summary_field_labels(self, obj):
label_list = [{'id': x.id, 'name': x.name} for x in obj.labels.all()[:10]]
if has_model_field_prefetched(obj, 'labels'):
label_ct = len(obj.labels.all())
else:
if len(label_list) < 10:
label_ct = len(label_list)
else:
label_ct = obj.labels.count()
return {'count': label_ct, 'results': label_list}
def get_summary_fields(self, obj):
res = super(LabelsListMixin, self).get_summary_fields(obj)
res['labels'] = self._summary_field_labels(obj)
return res
# TODO: remove when API v1 is removed
@six.add_metaclass(BaseSerializerMetaclass)
class V1JobOptionsSerializer(BaseSerializer):
class Meta:
model = Credential
fields = ('*', 'cloud_credential', 'network_credential')
V1_FIELDS = {
'cloud_credential': models.PositiveIntegerField(blank=True, null=True, default=None, help_text=DEPRECATED),
'network_credential': models.PositiveIntegerField(blank=True, null=True, default=None, help_text=DEPRECATED),
}
def build_field(self, field_name, info, model_class, nested_depth):
if field_name in self.V1_FIELDS:
return self.build_standard_field(field_name,
self.V1_FIELDS[field_name])
return super(V1JobOptionsSerializer, self).build_field(field_name, info, model_class, nested_depth)
@six.add_metaclass(BaseSerializerMetaclass)
class LegacyCredentialFields(BaseSerializer):
class Meta:
model = Credential
fields = ('*', 'credential', 'vault_credential')
LEGACY_FIELDS = {
'credential': models.PositiveIntegerField(blank=True, null=True, default=None, help_text=DEPRECATED),
'vault_credential': models.PositiveIntegerField(blank=True, null=True, default=None, help_text=DEPRECATED),
}
def build_field(self, field_name, info, model_class, nested_depth):
if field_name in self.LEGACY_FIELDS:
return self.build_standard_field(field_name,
self.LEGACY_FIELDS[field_name])
return super(LegacyCredentialFields, self).build_field(field_name, info, model_class, nested_depth)
class JobOptionsSerializer(LabelsListMixin, BaseSerializer):
class Meta:
fields = ('*', 'job_type', 'inventory', 'project', 'playbook',
'forks', 'limit', 'verbosity', 'extra_vars', 'job_tags',
'force_handlers', 'skip_tags', 'start_at_task', 'timeout',
'use_fact_cache',)
def get_fields(self):
fields = super(JobOptionsSerializer, self).get_fields()
# TODO: remove when API v1 is removed
if self.version == 1:
fields.update(V1JobOptionsSerializer().get_fields())
fields.update(LegacyCredentialFields().get_fields())
return fields
def get_related(self, obj):
res = super(JobOptionsSerializer, self).get_related(obj)
res['labels'] = self.reverse('api:job_template_label_list', kwargs={'pk': obj.pk})
try:
if obj.inventory:
res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory.pk})
except ObjectDoesNotExist:
setattr(obj, 'inventory', None)
try:
if obj.project:
res['project'] = self.reverse('api:project_detail', kwargs={'pk': obj.project.pk})
except ObjectDoesNotExist:
setattr(obj, 'project', None)
try:
if obj.credential:
res['credential'] = self.reverse(
'api:credential_detail', kwargs={'pk': obj.credential}
)
except ObjectDoesNotExist:
setattr(obj, 'credential', None)
try:
if obj.vault_credential:
res['vault_credential'] = self.reverse(
'api:credential_detail', kwargs={'pk': obj.vault_credential}
)
except ObjectDoesNotExist:
setattr(obj, 'vault_credential', None)
if self.version > 1:
if isinstance(obj, UnifiedJobTemplate):
res['extra_credentials'] = self.reverse(
'api:job_template_extra_credentials_list',
kwargs={'pk': obj.pk}
)
res['credentials'] = self.reverse(
'api:job_template_credentials_list',
kwargs={'pk': obj.pk}
)
elif isinstance(obj, UnifiedJob):
res['extra_credentials'] = self.reverse('api:job_extra_credentials_list', kwargs={'pk': obj.pk})
res['credentials'] = self.reverse('api:job_credentials_list', kwargs={'pk': obj.pk})
else:
cloud_cred = obj.cloud_credential
if cloud_cred:
res['cloud_credential'] = self.reverse('api:credential_detail', kwargs={'pk': cloud_cred})
net_cred = obj.network_credential
if net_cred:
res['network_credential'] = self.reverse('api:credential_detail', kwargs={'pk': net_cred})
return res
def to_representation(self, obj):
ret = super(JobOptionsSerializer, self).to_representation(obj)
if obj is None:
return ret
if 'inventory' in ret and not obj.inventory:
ret['inventory'] = None
if 'project' in ret and not obj.project:
ret['project'] = None
if 'playbook' in ret:
ret['playbook'] = ''
ret['credential'] = obj.credential
ret['vault_credential'] = obj.vault_credential
if self.version == 1:
ret['cloud_credential'] = obj.cloud_credential
ret['network_credential'] = obj.network_credential
return ret
def create(self, validated_data):
deprecated_fields = {}
for key in ('credential', 'vault_credential', 'cloud_credential', 'network_credential'):
if key in validated_data:
deprecated_fields[key] = validated_data.pop(key)
obj = super(JobOptionsSerializer, self).create(validated_data)
if deprecated_fields: # TODO: remove in 3.3
self._update_deprecated_fields(deprecated_fields, obj)
return obj
def update(self, obj, validated_data):
deprecated_fields = {}
for key in ('credential', 'vault_credential', 'cloud_credential', 'network_credential'):
if key in validated_data:
deprecated_fields[key] = validated_data.pop(key)
obj = super(JobOptionsSerializer, self).update(obj, validated_data)
if deprecated_fields: # TODO: remove in 3.3
self._update_deprecated_fields(deprecated_fields, obj)
return obj
def _update_deprecated_fields(self, fields, obj):
for key, existing in (
('credential', obj.credentials.filter(credential_type__kind='ssh')),
('vault_credential', obj.credentials.filter(credential_type__kind='vault')),
('cloud_credential', obj.cloud_credentials),
('network_credential', obj.network_credentials),
):
if key in fields:
for cred in existing:
obj.credentials.remove(cred)
if fields[key]:
obj.credentials.add(fields[key])
obj.save()
def validate(self, attrs):
v1_credentials = {}
view = self.context.get('view', None)
for attr, kind, error in (
('cloud_credential', 'cloud', _('You must provide a cloud credential.')),
('network_credential', 'net', _('You must provide a network credential.')),
('credential', 'ssh', _('You must provide an SSH credential.')),
('vault_credential', 'vault', _('You must provide a vault credential.')),
):
if kind in ('cloud', 'net') and self.version > 1:
continue # cloud and net deprecated creds are v1 only
if attr in attrs:
v1_credentials[attr] = None
pk = attrs.pop(attr)
if pk:
cred = v1_credentials[attr] = Credential.objects.get(pk=pk)
if cred.credential_type.kind != kind:
raise serializers.ValidationError({attr: error})
if ((not self.instance or cred.pk != getattr(self.instance, attr)) and
view and view.request and view.request.user not in cred.use_role):
raise PermissionDenied()
if 'project' in self.fields and 'playbook' in self.fields:
project = attrs.get('project', self.instance and self.instance.project or None)
playbook = attrs.get('playbook', self.instance and self.instance.playbook or '')
if not project:
raise serializers.ValidationError({'project': _('This field is required.')})
if project and project.scm_type and playbook and force_text(playbook) not in project.playbook_files:
raise serializers.ValidationError({'playbook': _('Playbook not found for project.')})
if project and not project.scm_type and playbook and force_text(playbook) not in project.playbooks:
raise serializers.ValidationError({'playbook': _('Playbook not found for project.')})
if project and not playbook:
raise serializers.ValidationError({'playbook': _('Must select playbook for project.')})
ret = super(JobOptionsSerializer, self).validate(attrs)
ret.update(v1_credentials)
return ret
class JobTemplateMixin(object):
'''
Provide recent jobs and survey details in summary_fields
'''
def _recent_jobs(self, obj):
if hasattr(obj, 'workflow_jobs'):
job_mgr = obj.workflow_jobs
else:
job_mgr = obj.jobs
return [{'id': x.id, 'status': x.status, 'finished': x.finished}
for x in job_mgr.all().order_by('-created')[:10]]
def get_summary_fields(self, obj):
d = super(JobTemplateMixin, self).get_summary_fields(obj)
if obj.survey_spec is not None and ('name' in obj.survey_spec and 'description' in obj.survey_spec):
d['survey'] = dict(title=obj.survey_spec['name'], description=obj.survey_spec['description'])
d['recent_jobs'] = self._recent_jobs(obj)
# TODO: remove in 3.3
if self.version == 1 and 'vault_credential' in d:
if d['vault_credential'].get('kind','') == 'vault':
d['vault_credential']['kind'] = 'ssh'
return d
class JobTemplateSerializer(JobTemplateMixin, UnifiedJobTemplateSerializer, JobOptionsSerializer):
show_capabilities = ['start', 'schedule', 'copy', 'edit', 'delete']
status = serializers.ChoiceField(choices=JobTemplate.JOB_TEMPLATE_STATUS_CHOICES, read_only=True, required=False)
class Meta:
model = JobTemplate
fields = ('*', 'host_config_key', 'ask_diff_mode_on_launch', 'ask_variables_on_launch', 'ask_limit_on_launch', 'ask_tags_on_launch',
'ask_skip_tags_on_launch', 'ask_job_type_on_launch', 'ask_verbosity_on_launch', 'ask_inventory_on_launch',
'ask_credential_on_launch', 'survey_enabled', 'become_enabled', 'diff_mode',
'allow_simultaneous', 'custom_virtualenv')
def get_related(self, obj):
res = super(JobTemplateSerializer, self).get_related(obj)
res.update(dict(
jobs = self.reverse('api:job_template_jobs_list', kwargs={'pk': obj.pk}),
schedules = self.reverse('api:job_template_schedules_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:job_template_activity_stream_list', kwargs={'pk': obj.pk}),
launch = self.reverse('api:job_template_launch', kwargs={'pk': obj.pk}),
notification_templates_any = self.reverse('api:job_template_notification_templates_any_list', kwargs={'pk': obj.pk}),
notification_templates_success = self.reverse('api:job_template_notification_templates_success_list', kwargs={'pk': obj.pk}),
notification_templates_error = self.reverse('api:job_template_notification_templates_error_list', kwargs={'pk': obj.pk}),
access_list = self.reverse('api:job_template_access_list', kwargs={'pk': obj.pk}),
survey_spec = self.reverse('api:job_template_survey_spec', kwargs={'pk': obj.pk}),
labels = self.reverse('api:job_template_label_list', kwargs={'pk': obj.pk}),
object_roles = self.reverse('api:job_template_object_roles_list', kwargs={'pk': obj.pk}),
instance_groups = self.reverse('api:job_template_instance_groups_list', kwargs={'pk': obj.pk}),
copy = self.reverse('api:job_template_copy', kwargs={'pk': obj.pk}),
))
if obj.host_config_key:
res['callback'] = self.reverse('api:job_template_callback', kwargs={'pk': obj.pk})
return res
def validate(self, attrs):
def get_field_from_model_or_attrs(fd):
return attrs.get(fd, self.instance and getattr(self.instance, fd) or None)
inventory = get_field_from_model_or_attrs('inventory')
project = get_field_from_model_or_attrs('project')
prompting_error_message = _("Must either set a default value or ask to prompt on launch.")
if project is None:
raise serializers.ValidationError({'project': _("Job types 'run' and 'check' must have assigned a project.")})
elif inventory is None and not get_field_from_model_or_attrs('ask_inventory_on_launch'):
raise serializers.ValidationError({'inventory': prompting_error_message})
return super(JobTemplateSerializer, self).validate(attrs)
def validate_extra_vars(self, value):
return vars_validate_or_raise(value)
def get_summary_fields(self, obj):
summary_fields = super(JobTemplateSerializer, self).get_summary_fields(obj)
all_creds = []
extra_creds = []
if obj.pk:
for cred in obj.credentials.all():
summarized_cred = {
'id': cred.pk,
'name': cred.name,
'description': cred.description,
'kind': cred.kind,
'credential_type_id': cred.credential_type_id
}
all_creds.append(summarized_cred)
if self.is_detail_view:
for summarized_cred in all_creds:
if summarized_cred['kind'] in ('cloud', 'net'):
extra_creds.append(summarized_cred)
elif summarized_cred['kind'] == 'ssh':
summary_fields['credential'] = summarized_cred
elif summarized_cred['kind'] == 'vault':
summary_fields['vault_credential'] = summarized_cred
if self.version > 1:
if self.is_detail_view:
summary_fields['extra_credentials'] = extra_creds
else:
# Credential would be an empty dictionary in this case
summary_fields.pop('credential', None)
summary_fields['credentials'] = all_creds
return summary_fields
class JobSerializer(UnifiedJobSerializer, JobOptionsSerializer):
passwords_needed_to_start = serializers.ReadOnlyField()
ask_diff_mode_on_launch = serializers.ReadOnlyField()
ask_variables_on_launch = serializers.ReadOnlyField()
ask_limit_on_launch = serializers.ReadOnlyField()
ask_skip_tags_on_launch = serializers.ReadOnlyField()
ask_tags_on_launch = serializers.ReadOnlyField()
ask_job_type_on_launch = serializers.ReadOnlyField()
ask_verbosity_on_launch = serializers.ReadOnlyField()
ask_inventory_on_launch = serializers.ReadOnlyField()
ask_credential_on_launch = serializers.ReadOnlyField()
artifacts = serializers.SerializerMethodField()
class Meta:
model = Job
fields = ('*', 'job_template', 'passwords_needed_to_start', 'ask_diff_mode_on_launch',
'ask_variables_on_launch', 'ask_limit_on_launch', 'ask_tags_on_launch', 'ask_skip_tags_on_launch',
'ask_job_type_on_launch', 'ask_verbosity_on_launch', 'ask_inventory_on_launch',
'ask_credential_on_launch', 'allow_simultaneous', 'artifacts', 'scm_revision',
'instance_group', 'diff_mode')
def get_related(self, obj):
res = super(JobSerializer, self).get_related(obj)
res.update(dict(
job_events = self.reverse('api:job_job_events_list', kwargs={'pk': obj.pk}),
job_host_summaries = self.reverse('api:job_job_host_summaries_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:job_activity_stream_list', kwargs={'pk': obj.pk}),
notifications = self.reverse('api:job_notifications_list', kwargs={'pk': obj.pk}),
labels = self.reverse('api:job_label_list', kwargs={'pk': obj.pk}),
))
try:
if obj.job_template:
res['job_template'] = self.reverse('api:job_template_detail',
kwargs={'pk': obj.job_template.pk})
except ObjectDoesNotExist:
setattr(obj, 'job_template', None)
if (obj.can_start or True) and self.version == 1: # TODO: remove in 3.3
res['start'] = self.reverse('api:job_start', kwargs={'pk': obj.pk})
if obj.can_cancel or True:
res['cancel'] = self.reverse('api:job_cancel', kwargs={'pk': obj.pk})
try:
if obj.project_update:
res['project_update'] = self.reverse(
'api:project_update_detail', kwargs={'pk': obj.project_update.pk}
)
except ObjectDoesNotExist:
pass
res['create_schedule'] = self.reverse('api:job_create_schedule', kwargs={'pk': obj.pk})
res['relaunch'] = self.reverse('api:job_relaunch', kwargs={'pk': obj.pk})
return res
def get_artifacts(self, obj):
if obj:
return obj.display_artifacts()
return {}
def to_internal_value(self, data):
# When creating a new job and a job template is specified, populate any
# fields not provided in data from the job template.
if not self.instance and isinstance(data, dict) and data.get('job_template', False):
try:
job_template = JobTemplate.objects.get(pk=data['job_template'])
except JobTemplate.DoesNotExist:
raise serializers.ValidationError({'job_template': _('Invalid job template.')})
data.setdefault('name', job_template.name)
data.setdefault('description', job_template.description)
data.setdefault('job_type', job_template.job_type)
if job_template.inventory:
data.setdefault('inventory', job_template.inventory.pk)
if job_template.project:
data.setdefault('project', job_template.project.pk)
data.setdefault('playbook', job_template.playbook)
if job_template.credential:
data.setdefault('credential', job_template.credential.pk)
data.setdefault('forks', job_template.forks)
data.setdefault('limit', job_template.limit)
data.setdefault('verbosity', job_template.verbosity)
data.setdefault('extra_vars', job_template.extra_vars)
data.setdefault('job_tags', job_template.job_tags)
data.setdefault('force_handlers', job_template.force_handlers)
data.setdefault('skip_tags', job_template.skip_tags)
data.setdefault('start_at_task', job_template.start_at_task)
return super(JobSerializer, self).to_internal_value(data)
def to_representation(self, obj):
ret = super(JobSerializer, self).to_representation(obj)
if obj is None:
return ret
if 'job_template' in ret and not obj.job_template:
ret['job_template'] = None
if 'extra_vars' in ret:
ret['extra_vars'] = obj.display_extra_vars()
return ret
def get_summary_fields(self, obj):
summary_fields = super(JobSerializer, self).get_summary_fields(obj)
if self.is_detail_view: # TODO: remove version check in 3.3
all_creds = []
extra_creds = []
for cred in obj.credentials.all():
summarized_cred = {
'id': cred.pk,
'name': cred.name,
'description': cred.description,
'kind': cred.kind,
'credential_type_id': cred.credential_type_id
}
all_creds.append(summarized_cred)
if cred.credential_type.kind in ('cloud', 'net'):
extra_creds.append(summarized_cred)
elif cred.credential_type.kind == 'ssh':
summary_fields['credential'] = summarized_cred
elif cred.credential_type.kind == 'vault':
summary_fields['vault_credential'] = summarized_cred
if self.version > 1:
summary_fields['extra_credentials'] = extra_creds
summary_fields['credentials'] = all_creds
return summary_fields
class JobCancelSerializer(JobSerializer):
can_cancel = serializers.BooleanField(read_only=True)
class Meta:
fields = ('can_cancel',)
class JobRelaunchSerializer(BaseSerializer):
passwords_needed_to_start = serializers.SerializerMethodField()
retry_counts = serializers.SerializerMethodField()
hosts = serializers.ChoiceField(
required=False, allow_null=True, default='all',
choices=[
('all', _('No change to job limit')),
('failed', _('All failed and unreachable hosts'))
],
write_only=True
)
class Meta:
model = Job
fields = ('passwords_needed_to_start', 'retry_counts', 'hosts',)
def to_internal_value(self, data):
obj = self.context.get('obj')
all_data = self.to_representation(obj)
all_data.update(data)
ret = super(JobRelaunchSerializer, self).to_internal_value(all_data)
return ret
def to_representation(self, obj):
res = super(JobRelaunchSerializer, self).to_representation(obj)
view = self.context.get('view', None)
if hasattr(view, '_raw_data_form_marker'):
password_keys = dict([(p, u'') for p in self.get_passwords_needed_to_start(obj)])
res.update(password_keys)
return res
def get_passwords_needed_to_start(self, obj):
if obj:
return obj.passwords_needed_to_start
return ''
def get_retry_counts(self, obj):
if obj.status in ACTIVE_STATES:
return _('Relaunch by host status not available until job finishes running.')
data = OrderedDict([])
for status in self.fields['hosts'].choices.keys():
data[status] = obj.retry_qs(status).count()
return data
def validate_passwords_needed_to_start(self, value):
obj = self.context.get('obj')
data = self.context.get('data')
# Check for passwords needed
needed = self.get_passwords_needed_to_start(obj)
provided = dict([(field, data.get(field, '')) for field in needed])
if not all(provided.values()):
raise serializers.ValidationError(needed)
return value
def validate(self, attrs):
obj = self.context.get('obj')
if obj.project is None:
raise serializers.ValidationError(dict(errors=[_("Job Template Project is missing or undefined.")]))
if obj.inventory is None or obj.inventory.pending_deletion:
raise serializers.ValidationError(dict(errors=[_("Job Template Inventory is missing or undefined.")]))
attrs.pop('hosts', None)
attrs = super(JobRelaunchSerializer, self).validate(attrs)
return attrs
class JobCreateScheduleSerializer(BaseSerializer):
can_schedule = serializers.SerializerMethodField()
prompts = serializers.SerializerMethodField()
class Meta:
model = Job
fields = ('can_schedule', 'prompts',)
def get_can_schedule(self, obj):
'''
Need both a job template and job prompts to schedule
'''
return obj.can_schedule
@staticmethod
def _summarize(res_name, obj):
summary = {}
for field in SUMMARIZABLE_FK_FIELDS[res_name]:
summary[field] = getattr(obj, field, None)
return summary
def get_prompts(self, obj):
try:
config = obj.launch_config
ret = config.prompts_dict(display=True)
if 'inventory' in ret:
ret['inventory'] = self._summarize('inventory', ret['inventory'])
if 'credentials' in ret:
all_creds = [self._summarize('credential', cred) for cred in ret['credentials']]
ret['credentials'] = all_creds
return ret
except JobLaunchConfig.DoesNotExist:
return {'all': _('Unknown, job may have been ran before launch configurations were saved.')}
class AdHocCommandSerializer(UnifiedJobSerializer):
class Meta:
model = AdHocCommand
fields = ('*', 'job_type', 'inventory', 'limit', 'credential',
'module_name', 'module_args', 'forks', 'verbosity', 'extra_vars',
'become_enabled', 'diff_mode', '-unified_job_template', '-description')
extra_kwargs = {
'name': {
'read_only': True,
},
}
def get_field_names(self, declared_fields, info):
field_names = super(AdHocCommandSerializer, self).get_field_names(declared_fields, info)
# Meta multiple inheritance and -field_name options don't seem to be
# taking effect above, so remove the undesired fields here.
return tuple(x for x in field_names if x not in ('unified_job_template', 'description'))
def build_standard_field(self, field_name, model_field):
field_class, field_kwargs = super(AdHocCommandSerializer, self).build_standard_field(field_name, model_field)
# Load module name choices dynamically from DB settings.
if field_name == 'module_name':
field_class = serializers.ChoiceField
module_name_choices = [(x, x) for x in settings.AD_HOC_COMMANDS]
module_name_default = 'command' if 'command' in [x[0] for x in module_name_choices] else ''
field_kwargs['choices'] = module_name_choices
field_kwargs['required'] = bool(not module_name_default)
field_kwargs['default'] = module_name_default or serializers.empty
field_kwargs['allow_blank'] = bool(module_name_default)
field_kwargs.pop('max_length', None)
return field_class, field_kwargs
def get_related(self, obj):
res = super(AdHocCommandSerializer, self).get_related(obj)
if obj.inventory:
res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory.pk})
if obj.credential:
res['credential'] = self.reverse('api:credential_detail', kwargs={'pk': obj.credential.pk})
res.update(dict(
events = self.reverse('api:ad_hoc_command_ad_hoc_command_events_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:ad_hoc_command_activity_stream_list', kwargs={'pk': obj.pk}),
notifications = self.reverse('api:ad_hoc_command_notifications_list', kwargs={'pk': obj.pk}),
))
res['cancel'] = self.reverse('api:ad_hoc_command_cancel', kwargs={'pk': obj.pk})
res['relaunch'] = self.reverse('api:ad_hoc_command_relaunch', kwargs={'pk': obj.pk})
return res
def to_representation(self, obj):
ret = super(AdHocCommandSerializer, self).to_representation(obj)
if 'inventory' in ret and not obj.inventory:
ret['inventory'] = None
if 'credential' in ret and not obj.credential:
ret['credential'] = None
# For the UI, only module_name is returned for name, instead of the
# longer module name + module_args format.
if 'name' in ret:
ret['name'] = obj.module_name
return ret
def validate_extra_vars(self, value):
redacted_extra_vars, removed_vars = extract_ansible_vars(value)
if removed_vars:
raise serializers.ValidationError(_(
"{} are prohibited from use in ad hoc commands."
).format(", ".join(removed_vars)))
return vars_validate_or_raise(value)
class AdHocCommandCancelSerializer(AdHocCommandSerializer):
can_cancel = serializers.BooleanField(read_only=True)
class Meta:
fields = ('can_cancel',)
class AdHocCommandRelaunchSerializer(AdHocCommandSerializer):
class Meta:
fields = ()
def to_representation(self, obj):
if obj:
return dict([(p, u'') for p in obj.passwords_needed_to_start])
else:
return {}
class SystemJobTemplateSerializer(UnifiedJobTemplateSerializer):
class Meta:
model = SystemJobTemplate
fields = ('*', 'job_type',)
def get_related(self, obj):
res = super(SystemJobTemplateSerializer, self).get_related(obj)
res.update(dict(
jobs = self.reverse('api:system_job_template_jobs_list', kwargs={'pk': obj.pk}),
schedules = self.reverse('api:system_job_template_schedules_list', kwargs={'pk': obj.pk}),
launch = self.reverse('api:system_job_template_launch', kwargs={'pk': obj.pk}),
notification_templates_any = self.reverse('api:system_job_template_notification_templates_any_list', kwargs={'pk': obj.pk}),
notification_templates_success = self.reverse('api:system_job_template_notification_templates_success_list', kwargs={'pk': obj.pk}),
notification_templates_error = self.reverse('api:system_job_template_notification_templates_error_list', kwargs={'pk': obj.pk}),
))
return res
class SystemJobSerializer(UnifiedJobSerializer):
result_stdout = serializers.SerializerMethodField()
class Meta:
model = SystemJob
fields = ('*', 'system_job_template', 'job_type', 'extra_vars', 'result_stdout')
def get_related(self, obj):
res = super(SystemJobSerializer, self).get_related(obj)
if obj.system_job_template:
res['system_job_template'] = self.reverse('api:system_job_template_detail',
kwargs={'pk': obj.system_job_template.pk})
res['notifications'] = self.reverse('api:system_job_notifications_list', kwargs={'pk': obj.pk})
if obj.can_cancel or True:
res['cancel'] = self.reverse('api:system_job_cancel', kwargs={'pk': obj.pk})
res['events'] = self.reverse('api:system_job_events_list', kwargs={'pk': obj.pk})
return res
def get_result_stdout(self, obj):
try:
return obj.result_stdout
except StdoutMaxBytesExceeded as e:
return _(
"Standard Output too large to display ({text_size} bytes), "
"only download supported for sizes over {supported_size} bytes.").format(
text_size=e.total, supported_size=e.supported
)
class SystemJobCancelSerializer(SystemJobSerializer):
can_cancel = serializers.BooleanField(read_only=True)
class Meta:
fields = ('can_cancel',)
class WorkflowJobTemplateSerializer(JobTemplateMixin, LabelsListMixin, UnifiedJobTemplateSerializer):
show_capabilities = ['start', 'schedule', 'edit', 'copy', 'delete']
class Meta:
model = WorkflowJobTemplate
fields = ('*', 'extra_vars', 'organization', 'survey_enabled', 'allow_simultaneous',
'ask_variables_on_launch',)
def get_related(self, obj):
res = super(WorkflowJobTemplateSerializer, self).get_related(obj)
res.update(dict(
workflow_jobs = self.reverse('api:workflow_job_template_jobs_list', kwargs={'pk': obj.pk}),
schedules = self.reverse('api:workflow_job_template_schedules_list', kwargs={'pk': obj.pk}),
launch = self.reverse('api:workflow_job_template_launch', kwargs={'pk': obj.pk}),
copy = self.reverse('api:workflow_job_template_copy', kwargs={'pk': obj.pk}),
workflow_nodes = self.reverse('api:workflow_job_template_workflow_nodes_list', kwargs={'pk': obj.pk}),
labels = self.reverse('api:workflow_job_template_label_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:workflow_job_template_activity_stream_list', kwargs={'pk': obj.pk}),
notification_templates_any = self.reverse('api:workflow_job_template_notification_templates_any_list', kwargs={'pk': obj.pk}),
notification_templates_success = self.reverse('api:workflow_job_template_notification_templates_success_list', kwargs={'pk': obj.pk}),
notification_templates_error = self.reverse('api:workflow_job_template_notification_templates_error_list', kwargs={'pk': obj.pk}),
access_list = self.reverse('api:workflow_job_template_access_list', kwargs={'pk': obj.pk}),
object_roles = self.reverse('api:workflow_job_template_object_roles_list', kwargs={'pk': obj.pk}),
survey_spec = self.reverse('api:workflow_job_template_survey_spec', kwargs={'pk': obj.pk}),
))
if obj.organization:
res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization.pk})
return res
def validate_extra_vars(self, value):
return vars_validate_or_raise(value)
# TODO:
class WorkflowJobTemplateListSerializer(WorkflowJobTemplateSerializer):
pass
# TODO:
class WorkflowJobSerializer(LabelsListMixin, UnifiedJobSerializer):
class Meta:
model = WorkflowJob
fields = ('*', 'workflow_job_template', 'extra_vars', 'allow_simultaneous', '-execution_node',)
def get_related(self, obj):
res = super(WorkflowJobSerializer, self).get_related(obj)
if obj.workflow_job_template:
res['workflow_job_template'] = self.reverse('api:workflow_job_template_detail',
kwargs={'pk': obj.workflow_job_template.pk})
res['notifications'] = self.reverse('api:workflow_job_notifications_list', kwargs={'pk': obj.pk})
res['workflow_nodes'] = self.reverse('api:workflow_job_workflow_nodes_list', kwargs={'pk': obj.pk})
res['labels'] = self.reverse('api:workflow_job_label_list', kwargs={'pk': obj.pk})
res['activity_stream'] = self.reverse('api:workflow_job_activity_stream_list', kwargs={'pk': obj.pk})
res['relaunch'] = self.reverse('api:workflow_job_relaunch', kwargs={'pk': obj.pk})
if obj.can_cancel or True:
res['cancel'] = self.reverse('api:workflow_job_cancel', kwargs={'pk': obj.pk})
return res
def to_representation(self, obj):
ret = super(WorkflowJobSerializer, self).to_representation(obj)
if obj is None:
return ret
if 'extra_vars' in ret:
ret['extra_vars'] = obj.display_extra_vars()
return ret
# TODO:
class WorkflowJobListSerializer(WorkflowJobSerializer, UnifiedJobListSerializer):
class Meta:
fields = ('*', '-execution_node',)
class WorkflowJobCancelSerializer(WorkflowJobSerializer):
can_cancel = serializers.BooleanField(read_only=True)
class Meta:
fields = ('can_cancel',)
class LaunchConfigurationBaseSerializer(BaseSerializer):
job_type = serializers.ChoiceField(allow_blank=True, allow_null=True, required=False, default=None,
choices=NEW_JOB_TYPE_CHOICES)
job_tags = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
limit = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
skip_tags = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
diff_mode = serializers.NullBooleanField(required=False, default=None)
verbosity = serializers.ChoiceField(allow_null=True, required=False, default=None,
choices=VERBOSITY_CHOICES)
exclude_errors = ()
class Meta:
fields = ('*', 'extra_data', 'inventory', # Saved launch-time config fields
'job_type', 'job_tags', 'skip_tags', 'limit', 'skip_tags', 'diff_mode', 'verbosity')
def get_related(self, obj):
res = super(LaunchConfigurationBaseSerializer, self).get_related(obj)
if obj.inventory_id:
res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory_id})
res['credentials'] = self.reverse(
'api:{}_credentials_list'.format(get_type_for_model(self.Meta.model)),
kwargs={'pk': obj.pk}
)
return res
def _build_mock_obj(self, attrs):
mock_obj = self.Meta.model()
if self.instance:
for field in self.instance._meta.fields:
setattr(mock_obj, field.name, getattr(self.instance, field.name))
field_names = set(field.name for field in self.Meta.model._meta.fields)
for field_name, value in attrs.items():
setattr(mock_obj, field_name, value)
if field_name not in field_names:
attrs.pop(field_name)
return mock_obj
def to_representation(self, obj):
ret = super(LaunchConfigurationBaseSerializer, self).to_representation(obj)
if obj is None:
return ret
if 'extra_data' in ret and obj.survey_passwords:
ret['extra_data'] = obj.display_extra_data()
return ret
def get_summary_fields(self, obj):
summary_fields = super(LaunchConfigurationBaseSerializer, self).get_summary_fields(obj)
# Credential would be an empty dictionary in this case
summary_fields.pop('credential', None)
return summary_fields
def validate(self, attrs):
attrs = super(LaunchConfigurationBaseSerializer, self).validate(attrs)
ujt = None
if 'unified_job_template' in attrs:
ujt = attrs['unified_job_template']
elif self.instance:
ujt = self.instance.unified_job_template
# Replace $encrypted$ submissions with db value if exists
# build additional field survey_passwords to track redacted variables
if 'extra_data' in attrs:
extra_data = parse_yaml_or_json(attrs.get('extra_data', {}))
if hasattr(ujt, 'survey_password_variables'):
# Prepare additional field survey_passwords for save
password_dict = {}
for key in ujt.survey_password_variables():
if key in extra_data:
password_dict[key] = REPLACE_STR
if not self.instance or password_dict != self.instance.survey_passwords:
attrs['survey_passwords'] = password_dict.copy()
# Force dict type (cannot preserve YAML formatting if passwords are involved)
if not isinstance(attrs['extra_data'], dict):
attrs['extra_data'] = parse_yaml_or_json(attrs['extra_data'])
# Encrypt the extra_data for save, only current password vars in JT survey
encrypt_dict(attrs['extra_data'], password_dict.keys())
# For any raw $encrypted$ string, either
# - replace with existing DB value
# - raise a validation error
# - remove key from extra_data if survey default is present
if self.instance:
db_extra_data = parse_yaml_or_json(self.instance.extra_data)
else:
db_extra_data = {}
for key in password_dict.keys():
if attrs['extra_data'].get(key, None) == REPLACE_STR:
if key not in db_extra_data:
element = ujt.pivot_spec(ujt.survey_spec)[key]
if 'default' in element and element['default']:
attrs['survey_passwords'].pop(key, None)
attrs['extra_data'].pop(key, None)
else:
raise serializers.ValidationError(
{"extra_data": _('Provided variable {} has no database value to replace with.').format(key)})
else:
attrs['extra_data'][key] = db_extra_data[key]
# Build unsaved version of this config, use it to detect prompts errors
mock_obj = self._build_mock_obj(attrs)
accepted, rejected, errors = ujt._accept_or_ignore_job_kwargs(
_exclude_errors=self.exclude_errors, **mock_obj.prompts_dict())
# Launch configs call extra_vars extra_data for historical reasons
if 'extra_vars' in errors:
errors['extra_data'] = errors.pop('extra_vars')
if errors:
raise serializers.ValidationError(errors)
# Model `.save` needs the container dict, not the psuedo fields
if mock_obj.char_prompts:
attrs['char_prompts'] = mock_obj.char_prompts
return attrs
class WorkflowJobTemplateNodeSerializer(LaunchConfigurationBaseSerializer):
credential = models.PositiveIntegerField(
blank=True, null=True, default=None,
help_text='This resource has been deprecated and will be removed in a future release')
success_nodes = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
failure_nodes = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
always_nodes = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
exclude_errors = ('required',) # required variables may be provided by WFJT or on launch
class Meta:
model = WorkflowJobTemplateNode
fields = ('*', 'credential', 'workflow_job_template', '-name', '-description', 'id', 'url', 'related',
'unified_job_template', 'success_nodes', 'failure_nodes', 'always_nodes',)
def get_related(self, obj):
res = super(WorkflowJobTemplateNodeSerializer, self).get_related(obj)
res['success_nodes'] = self.reverse('api:workflow_job_template_node_success_nodes_list', kwargs={'pk': obj.pk})
res['failure_nodes'] = self.reverse('api:workflow_job_template_node_failure_nodes_list', kwargs={'pk': obj.pk})
res['always_nodes'] = self.reverse('api:workflow_job_template_node_always_nodes_list', kwargs={'pk': obj.pk})
if obj.unified_job_template:
res['unified_job_template'] = obj.unified_job_template.get_absolute_url(self.context.get('request'))
try:
res['workflow_job_template'] = self.reverse('api:workflow_job_template_detail', kwargs={'pk': obj.workflow_job_template.pk})
except WorkflowJobTemplate.DoesNotExist:
pass
return res
def build_field(self, field_name, info, model_class, nested_depth):
# have to special-case the field so that DRF will not automagically make it
# read-only because it's a property on the model.
if field_name == 'credential':
return self.build_standard_field(field_name,
self.credential)
return super(WorkflowJobTemplateNodeSerializer, self).build_field(field_name, info, model_class, nested_depth)
def build_relational_field(self, field_name, relation_info):
field_class, field_kwargs = super(WorkflowJobTemplateNodeSerializer, self).build_relational_field(field_name, relation_info)
# workflow_job_template is read-only unless creating a new node.
if self.instance and field_name == 'workflow_job_template':
field_kwargs['read_only'] = True
field_kwargs.pop('queryset', None)
return field_class, field_kwargs
def validate(self, attrs):
deprecated_fields = {}
if 'credential' in attrs: # TODO: remove when v2 API is deprecated
deprecated_fields['credential'] = attrs.pop('credential')
view = self.context.get('view')
attrs = super(WorkflowJobTemplateNodeSerializer, self).validate(attrs)
ujt_obj = None
if 'unified_job_template' in attrs:
ujt_obj = attrs['unified_job_template']
elif self.instance:
ujt_obj = self.instance.unified_job_template
if isinstance(ujt_obj, (WorkflowJobTemplate)):
raise serializers.ValidationError({
"unified_job_template": _("Cannot nest a %s inside a WorkflowJobTemplate") % ujt_obj.__class__.__name__})
if 'credential' in deprecated_fields: # TODO: remove when v2 API is deprecated
cred = deprecated_fields['credential']
attrs['credential'] = cred
if cred is not None:
if not ujt_obj.ask_credential_on_launch:
raise serializers.ValidationError({"credential": _(
"Related template is not configured to accept credentials on launch.")})
cred = Credential.objects.get(pk=cred)
view = self.context.get('view', None)
if (not view) or (not view.request) or (view.request.user not in cred.use_role):
raise PermissionDenied()
return attrs
def create(self, validated_data): # TODO: remove when v2 API is deprecated
deprecated_fields = {}
if 'credential' in validated_data:
deprecated_fields['credential'] = validated_data.pop('credential')
obj = super(WorkflowJobTemplateNodeSerializer, self).create(validated_data)
if 'credential' in deprecated_fields:
if deprecated_fields['credential']:
obj.credentials.add(deprecated_fields['credential'])
return obj
def update(self, obj, validated_data): # TODO: remove when v2 API is deprecated
deprecated_fields = {}
if 'credential' in validated_data:
deprecated_fields['credential'] = validated_data.pop('credential')
obj = super(WorkflowJobTemplateNodeSerializer, self).update(obj, validated_data)
if 'credential' in deprecated_fields:
for cred in obj.credentials.filter(credential_type__kind='ssh'):
obj.credentials.remove(cred)
if deprecated_fields['credential']:
obj.credentials.add(deprecated_fields['credential'])
return obj
class WorkflowJobNodeSerializer(LaunchConfigurationBaseSerializer):
credential = models.PositiveIntegerField(
blank=True, null=True, default=None,
help_text='This resource has been deprecated and will be removed in a future release')
success_nodes = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
failure_nodes = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
always_nodes = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
class Meta:
model = WorkflowJobNode
fields = ('*', 'credential', 'job', 'workflow_job', '-name', '-description', 'id', 'url', 'related',
'unified_job_template', 'success_nodes', 'failure_nodes', 'always_nodes',)
def get_related(self, obj):
res = super(WorkflowJobNodeSerializer, self).get_related(obj)
res['success_nodes'] = self.reverse('api:workflow_job_node_success_nodes_list', kwargs={'pk': obj.pk})
res['failure_nodes'] = self.reverse('api:workflow_job_node_failure_nodes_list', kwargs={'pk': obj.pk})
res['always_nodes'] = self.reverse('api:workflow_job_node_always_nodes_list', kwargs={'pk': obj.pk})
if obj.unified_job_template:
res['unified_job_template'] = obj.unified_job_template.get_absolute_url(self.context.get('request'))
if obj.job:
res['job'] = obj.job.get_absolute_url(self.context.get('request'))
if obj.workflow_job:
res['workflow_job'] = self.reverse('api:workflow_job_detail', kwargs={'pk': obj.workflow_job.pk})
return res
class WorkflowJobNodeListSerializer(WorkflowJobNodeSerializer):
pass
class WorkflowJobNodeDetailSerializer(WorkflowJobNodeSerializer):
pass
class WorkflowJobTemplateNodeDetailSerializer(WorkflowJobTemplateNodeSerializer):
'''
Influence the api browser sample data to not include workflow_job_template
when editing a WorkflowNode.
Note: I was not able to accomplish this trough the use of extra_kwargs.
Maybe something to do with workflow_job_template being a relational field?
'''
def build_relational_field(self, field_name, relation_info):
field_class, field_kwargs = super(WorkflowJobTemplateNodeDetailSerializer, self).build_relational_field(field_name, relation_info)
if self.instance and field_name == 'workflow_job_template':
field_kwargs['read_only'] = True
field_kwargs.pop('queryset', None)
return field_class, field_kwargs
class JobListSerializer(JobSerializer, UnifiedJobListSerializer):
pass
class AdHocCommandListSerializer(AdHocCommandSerializer, UnifiedJobListSerializer):
pass
class SystemJobListSerializer(SystemJobSerializer, UnifiedJobListSerializer):
pass
class JobHostSummarySerializer(BaseSerializer):
class Meta:
model = JobHostSummary
fields = ('*', '-name', '-description', 'job', 'host', 'host_name', 'changed',
'dark', 'failures', 'ok', 'processed', 'skipped', 'failed')
def get_related(self, obj):
res = super(JobHostSummarySerializer, self).get_related(obj)
res.update(dict(
job=self.reverse('api:job_detail', kwargs={'pk': obj.job.pk})))
if obj.host is not None:
res.update(dict(
host=self.reverse('api:host_detail', kwargs={'pk': obj.host.pk})
))
return res
def get_summary_fields(self, obj):
d = super(JobHostSummarySerializer, self).get_summary_fields(obj)
try:
d['job']['job_template_id'] = obj.job.job_template.id
d['job']['job_template_name'] = obj.job.job_template.name
except (KeyError, AttributeError):
pass
return d
class JobEventSerializer(BaseSerializer):
event_display = serializers.CharField(source='get_event_display2', read_only=True)
event_level = serializers.IntegerField(read_only=True)
class Meta:
model = JobEvent
fields = ('*', '-name', '-description', 'job', 'event', 'counter',
'event_display', 'event_data', 'event_level', 'failed',
'changed', 'uuid', 'parent_uuid', 'host', 'host_name', 'parent',
'playbook', 'play', 'task', 'role', 'stdout', 'start_line', 'end_line',
'verbosity')
def get_related(self, obj):
res = super(JobEventSerializer, self).get_related(obj)
res.update(dict(
job = self.reverse('api:job_detail', kwargs={'pk': obj.job_id}),
))
if obj.parent_id:
res['parent'] = self.reverse('api:job_event_detail', kwargs={'pk': obj.parent_id})
if obj.children.exists():
res['children'] = self.reverse('api:job_event_children_list', kwargs={'pk': obj.pk})
if obj.host_id:
res['host'] = self.reverse('api:host_detail', kwargs={'pk': obj.host_id})
if obj.hosts.exists():
res['hosts'] = self.reverse('api:job_event_hosts_list', kwargs={'pk': obj.pk})
return res
def get_summary_fields(self, obj):
d = super(JobEventSerializer, self).get_summary_fields(obj)
try:
d['job']['job_template_id'] = obj.job.job_template.id
d['job']['job_template_name'] = obj.job.job_template.name
except (KeyError, AttributeError):
pass
return d
def to_representation(self, obj):
ret = super(JobEventSerializer, self).to_representation(obj)
# Show full stdout for event detail view, truncate only for list view.
if hasattr(self.context.get('view', None), 'retrieve'):
return ret
# Show full stdout for playbook_on_* events.
if obj and obj.event.startswith('playbook_on'):
return ret
max_bytes = settings.EVENT_STDOUT_MAX_BYTES_DISPLAY
if max_bytes > 0 and 'stdout' in ret and len(ret['stdout']) >= max_bytes:
ret['stdout'] = ret['stdout'][:(max_bytes - 1)] + u'\u2026'
set_count = 0
reset_count = 0
for m in ANSI_SGR_PATTERN.finditer(ret['stdout']):
if m.string[m.start():m.end()] == u'\u001b[0m':
reset_count += 1
else:
set_count += 1
ret['stdout'] += u'\u001b[0m' * (set_count - reset_count)
return ret
class JobEventWebSocketSerializer(JobEventSerializer):
created = serializers.SerializerMethodField()
modified = serializers.SerializerMethodField()
event_name = serializers.CharField(source='event')
group_name = serializers.SerializerMethodField()
class Meta:
model = JobEvent
fields = ('*', 'event_name', 'group_name',)
def get_created(self, obj):
return obj.created.isoformat()
def get_modified(self, obj):
return obj.modified.isoformat()
def get_group_name(self, obj):
return 'job_events'
class ProjectUpdateEventSerializer(JobEventSerializer):
class Meta:
model = ProjectUpdateEvent
fields = ('*', '-name', '-description', '-job', '-job_id',
'-parent_uuid', '-parent', '-host', 'project_update')
def get_related(self, obj):
res = super(JobEventSerializer, self).get_related(obj)
res['project_update'] = self.reverse(
'api:project_update_detail', kwargs={'pk': obj.project_update_id}
)
return res
class ProjectUpdateEventWebSocketSerializer(ProjectUpdateEventSerializer):
created = serializers.SerializerMethodField()
modified = serializers.SerializerMethodField()
event_name = serializers.CharField(source='event')
group_name = serializers.SerializerMethodField()
class Meta:
model = ProjectUpdateEvent
fields = ('*', 'event_name', 'group_name',)
def get_created(self, obj):
return obj.created.isoformat()
def get_modified(self, obj):
return obj.modified.isoformat()
def get_group_name(self, obj):
return 'project_update_events'
class AdHocCommandEventSerializer(BaseSerializer):
event_display = serializers.CharField(source='get_event_display', read_only=True)
class Meta:
model = AdHocCommandEvent
fields = ('*', '-name', '-description', 'ad_hoc_command', 'event',
'counter', 'event_display', 'event_data', 'failed',
'changed', 'uuid', 'host', 'host_name', 'stdout',
'start_line', 'end_line', 'verbosity')
def get_related(self, obj):
res = super(AdHocCommandEventSerializer, self).get_related(obj)
res.update(dict(
ad_hoc_command = self.reverse('api:ad_hoc_command_detail', kwargs={'pk': obj.ad_hoc_command_id}),
))
if obj.host:
res['host'] = self.reverse('api:host_detail', kwargs={'pk': obj.host.pk})
return res
def to_representation(self, obj):
ret = super(AdHocCommandEventSerializer, self).to_representation(obj)
# Show full stdout for event detail view, truncate only for list view.
if hasattr(self.context.get('view', None), 'retrieve'):
return ret
max_bytes = settings.EVENT_STDOUT_MAX_BYTES_DISPLAY
if max_bytes > 0 and 'stdout' in ret and len(ret['stdout']) >= max_bytes:
ret['stdout'] = ret['stdout'][:(max_bytes - 1)] + u'\u2026'
set_count = 0
reset_count = 0
for m in ANSI_SGR_PATTERN.finditer(ret['stdout']):
if m.string[m.start():m.end()] == u'\u001b[0m':
reset_count += 1
else:
set_count += 1
ret['stdout'] += u'\u001b[0m' * (set_count - reset_count)
return ret
class AdHocCommandEventWebSocketSerializer(AdHocCommandEventSerializer):
created = serializers.SerializerMethodField()
modified = serializers.SerializerMethodField()
event_name = serializers.CharField(source='event')
group_name = serializers.SerializerMethodField()
class Meta:
model = AdHocCommandEvent
fields = ('*', 'event_name', 'group_name',)
def get_created(self, obj):
return obj.created.isoformat()
def get_modified(self, obj):
return obj.modified.isoformat()
def get_group_name(self, obj):
return 'ad_hoc_command_events'
class InventoryUpdateEventSerializer(AdHocCommandEventSerializer):
class Meta:
model = InventoryUpdateEvent
fields = ('*', '-name', '-description', '-ad_hoc_command', '-host',
'-host_name', 'inventory_update')
def get_related(self, obj):
res = super(AdHocCommandEventSerializer, self).get_related(obj)
res['inventory_update'] = self.reverse(
'api:inventory_update_detail', kwargs={'pk': obj.inventory_update_id}
)
return res
class InventoryUpdateEventWebSocketSerializer(InventoryUpdateEventSerializer):
created = serializers.SerializerMethodField()
modified = serializers.SerializerMethodField()
event_name = serializers.CharField(source='event')
group_name = serializers.SerializerMethodField()
class Meta:
model = InventoryUpdateEvent
fields = ('*', 'event_name', 'group_name',)
def get_created(self, obj):
return obj.created.isoformat()
def get_modified(self, obj):
return obj.modified.isoformat()
def get_group_name(self, obj):
return 'inventory_update_events'
class SystemJobEventSerializer(AdHocCommandEventSerializer):
class Meta:
model = SystemJobEvent
fields = ('*', '-name', '-description', '-ad_hoc_command', '-host',
'-host_name', 'system_job')
def get_related(self, obj):
res = super(AdHocCommandEventSerializer, self).get_related(obj)
res['system_job'] = self.reverse(
'api:system_job_detail', kwargs={'pk': obj.system_job_id}
)
return res
class SystemJobEventWebSocketSerializer(SystemJobEventSerializer):
created = serializers.SerializerMethodField()
modified = serializers.SerializerMethodField()
event_name = serializers.CharField(source='event')
group_name = serializers.SerializerMethodField()
class Meta:
model = SystemJobEvent
fields = ('*', 'event_name', 'group_name',)
def get_created(self, obj):
return obj.created.isoformat()
def get_modified(self, obj):
return obj.modified.isoformat()
def get_group_name(self, obj):
return 'system_job_events'
class JobLaunchSerializer(BaseSerializer):
# Representational fields
passwords_needed_to_start = serializers.ReadOnlyField()
can_start_without_user_input = serializers.BooleanField(read_only=True)
variables_needed_to_start = serializers.ReadOnlyField()
credential_needed_to_start = serializers.SerializerMethodField()
inventory_needed_to_start = serializers.SerializerMethodField()
survey_enabled = serializers.SerializerMethodField()
job_template_data = serializers.SerializerMethodField()
defaults = serializers.SerializerMethodField()
# Accepted on launch fields
extra_vars = serializers.JSONField(required=False, write_only=True)
inventory = serializers.PrimaryKeyRelatedField(
queryset=Inventory.objects.all(),
required=False, write_only=True
)
credentials = serializers.PrimaryKeyRelatedField(
many=True, queryset=Credential.objects.all(),
required=False, write_only=True
)
credential_passwords = VerbatimField(required=False, write_only=True)
diff_mode = serializers.BooleanField(required=False, write_only=True)
job_tags = serializers.CharField(required=False, write_only=True, allow_blank=True)
job_type = serializers.ChoiceField(required=False, choices=NEW_JOB_TYPE_CHOICES, write_only=True)
skip_tags = serializers.CharField(required=False, write_only=True, allow_blank=True)
limit = serializers.CharField(required=False, write_only=True, allow_blank=True)
verbosity = serializers.ChoiceField(required=False, choices=VERBOSITY_CHOICES, write_only=True)
class Meta:
model = JobTemplate
fields = ('can_start_without_user_input', 'passwords_needed_to_start',
'extra_vars', 'inventory', 'limit', 'job_tags', 'skip_tags', 'job_type', 'verbosity', 'diff_mode',
'credentials', 'credential_passwords', 'ask_variables_on_launch', 'ask_tags_on_launch',
'ask_diff_mode_on_launch', 'ask_skip_tags_on_launch', 'ask_job_type_on_launch', 'ask_limit_on_launch',
'ask_verbosity_on_launch', 'ask_inventory_on_launch', 'ask_credential_on_launch',
'survey_enabled', 'variables_needed_to_start', 'credential_needed_to_start',
'inventory_needed_to_start', 'job_template_data', 'defaults', 'verbosity')
read_only_fields = (
'ask_diff_mode_on_launch', 'ask_variables_on_launch', 'ask_limit_on_launch', 'ask_tags_on_launch',
'ask_skip_tags_on_launch', 'ask_job_type_on_launch', 'ask_verbosity_on_launch',
'ask_inventory_on_launch', 'ask_credential_on_launch',)
def get_credential_needed_to_start(self, obj):
return False
def get_inventory_needed_to_start(self, obj):
return not (obj and obj.inventory)
def get_survey_enabled(self, obj):
if obj:
return obj.survey_enabled and 'spec' in obj.survey_spec
return False
def get_defaults(self, obj):
defaults_dict = {}
for field_name in JobTemplate.get_ask_mapping().keys():
if field_name == 'inventory':
defaults_dict[field_name] = dict(
name=getattrd(obj, '%s.name' % field_name, None),
id=getattrd(obj, '%s.pk' % field_name, None))
elif field_name == 'credentials':
if self.version > 1:
for cred in obj.credentials.all():
cred_dict = dict(
id=cred.id,
name=cred.name,
credential_type=cred.credential_type.pk,
passwords_needed=cred.passwords_needed
)
if cred.credential_type.managed_by_tower and 'vault_id' in cred.credential_type.defined_fields:
cred_dict['vault_id'] = cred.inputs.get('vault_id') or None
defaults_dict.setdefault(field_name, []).append(cred_dict)
else:
defaults_dict[field_name] = getattr(obj, field_name)
return defaults_dict
def get_job_template_data(self, obj):
return dict(name=obj.name, id=obj.id, description=obj.description)
def validate_extra_vars(self, value):
return vars_validate_or_raise(value)
def validate(self, attrs):
template = self.context.get('template')
accepted, rejected, errors = template._accept_or_ignore_job_kwargs(
_exclude_errors=['prompts'], # make several error types non-blocking
**attrs)
self._ignored_fields = rejected
if template.inventory and template.inventory.pending_deletion is True:
errors['inventory'] = _("The inventory associated with this Job Template is being deleted.")
elif 'inventory' in accepted and accepted['inventory'].pending_deletion:
errors['inventory'] = _("The provided inventory is being deleted.")
# Prohibit providing multiple credentials of the same CredentialType.kind
# or multiples of same vault id
distinct_cred_kinds = []
for cred in accepted.get('credentials', []):
if cred.unique_hash() in distinct_cred_kinds:
errors.setdefault('credentials', []).append(_(
'Cannot assign multiple {} credentials.'
).format(cred.unique_hash(display=True)))
distinct_cred_kinds.append(cred.unique_hash())
# Prohibit removing credentials from the JT list (unsupported for now)
template_credentials = template.credentials.all()
if 'credentials' in attrs:
removed_creds = set(template_credentials) - set(attrs['credentials'])
provided_mapping = Credential.unique_dict(attrs['credentials'])
for cred in removed_creds:
if cred.unique_hash() in provided_mapping.keys():
continue # User replaced credential with new of same type
errors.setdefault('credentials', []).append(_(
'Removing {} credential at launch time without replacement is not supported. '
'Provided list lacked credential(s): {}.'
).format(cred.unique_hash(display=True), ', '.join([str(c) for c in removed_creds])))
# verify that credentials (either provided or existing) don't
# require launch-time passwords that have not been provided
if 'credentials' in accepted:
launch_credentials = accepted['credentials']
else:
launch_credentials = template_credentials
passwords = attrs.get('credential_passwords', {}) # get from original attrs
passwords_lacking = []
for cred in launch_credentials:
for p in cred.passwords_needed:
if p not in passwords:
passwords_lacking.append(p)
else:
accepted.setdefault('credential_passwords', {})
accepted['credential_passwords'][p] = passwords[p]
if len(passwords_lacking):
errors['passwords_needed_to_start'] = passwords_lacking
if errors:
raise serializers.ValidationError(errors)
if 'extra_vars' in accepted:
extra_vars_save = accepted['extra_vars']
else:
extra_vars_save = None
# Validate job against JobTemplate clean_ methods
accepted = super(JobLaunchSerializer, self).validate(accepted)
# Preserve extra_vars as dictionary internally
if extra_vars_save:
accepted['extra_vars'] = extra_vars_save
return accepted
class WorkflowJobLaunchSerializer(BaseSerializer):
can_start_without_user_input = serializers.BooleanField(read_only=True)
variables_needed_to_start = serializers.ReadOnlyField()
survey_enabled = serializers.SerializerMethodField()
extra_vars = VerbatimField(required=False, write_only=True)
workflow_job_template_data = serializers.SerializerMethodField()
class Meta:
model = WorkflowJobTemplate
fields = ('can_start_without_user_input', 'extra_vars',
'survey_enabled', 'variables_needed_to_start',
'node_templates_missing', 'node_prompts_rejected',
'workflow_job_template_data')
def get_survey_enabled(self, obj):
if obj:
return obj.survey_enabled and 'spec' in obj.survey_spec
return False
def get_workflow_job_template_data(self, obj):
return dict(name=obj.name, id=obj.id, description=obj.description)
def validate(self, attrs):
obj = self.instance
accepted, rejected, errors = obj._accept_or_ignore_job_kwargs(
_exclude_errors=['required'],
**attrs)
WFJT_extra_vars = obj.extra_vars
attrs = super(WorkflowJobLaunchSerializer, self).validate(attrs)
obj.extra_vars = WFJT_extra_vars
return attrs
class NotificationTemplateSerializer(BaseSerializer):
show_capabilities = ['edit', 'delete']
class Meta:
model = NotificationTemplate
fields = ('*', 'organization', 'notification_type', 'notification_configuration')
type_map = {"string": (str, unicode),
"int": (int,),
"bool": (bool,),
"list": (list,),
"password": (str, unicode),
"object": (dict, OrderedDict)}
def to_representation(self, obj):
ret = super(NotificationTemplateSerializer, self).to_representation(obj)
if 'notification_configuration' in ret:
ret['notification_configuration'] = obj.display_notification_configuration()
return ret
def get_related(self, obj):
res = super(NotificationTemplateSerializer, self).get_related(obj)
res.update(dict(
test = self.reverse('api:notification_template_test', kwargs={'pk': obj.pk}),
notifications = self.reverse('api:notification_template_notification_list', kwargs={'pk': obj.pk}),
copy = self.reverse('api:notification_template_copy', kwargs={'pk': obj.pk}),
))
if obj.organization:
res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization.pk})
return res
def _recent_notifications(self, obj):
return [{'id': x.id, 'status': x.status, 'created': x.created} for x in obj.notifications.all().order_by('-created')[:5]]
def get_summary_fields(self, obj):
d = super(NotificationTemplateSerializer, self).get_summary_fields(obj)
d['recent_notifications'] = self._recent_notifications(obj)
return d
def validate(self, attrs):
from awx.api.views import NotificationTemplateDetail
notification_type = None
if 'notification_type' in attrs:
notification_type = attrs['notification_type']
elif self.instance:
notification_type = self.instance.notification_type
else:
notification_type = None
if not notification_type:
raise serializers.ValidationError(_('Missing required fields for Notification Configuration: notification_type'))
notification_class = NotificationTemplate.CLASS_FOR_NOTIFICATION_TYPE[notification_type]
missing_fields = []
incorrect_type_fields = []
error_list = []
if 'notification_configuration' not in attrs:
return attrs
if self.context['view'].kwargs and isinstance(self.context['view'], NotificationTemplateDetail):
object_actual = self.context['view'].get_object()
else:
object_actual = None
for field in notification_class.init_parameters:
if field not in attrs['notification_configuration']:
missing_fields.append(field)
continue
field_val = attrs['notification_configuration'][field]
field_type = notification_class.init_parameters[field]['type']
expected_types = self.type_map[field_type]
if not type(field_val) in expected_types:
incorrect_type_fields.append((field, field_type))
continue
if field_type == "list" and len(field_val) < 1:
error_list.append(_("No values specified for field '{}'").format(field))
continue
if field_type == "password" and field_val == "$encrypted$" and object_actual is not None:
attrs['notification_configuration'][field] = object_actual.notification_configuration[field]
if missing_fields:
error_list.append(_("Missing required fields for Notification Configuration: {}.").format(missing_fields))
if incorrect_type_fields:
for type_field_error in incorrect_type_fields:
error_list.append(_("Configuration field '{}' incorrect type, expected {}.").format(type_field_error[0],
type_field_error[1]))
if error_list:
raise serializers.ValidationError(error_list)
return super(NotificationTemplateSerializer, self).validate(attrs)
class NotificationSerializer(BaseSerializer):
class Meta:
model = Notification
fields = ('*', '-name', '-description', 'notification_template', 'error', 'status', 'notifications_sent',
'notification_type', 'recipients', 'subject')
def get_related(self, obj):
res = super(NotificationSerializer, self).get_related(obj)
res.update(dict(
notification_template = self.reverse('api:notification_template_detail', kwargs={'pk': obj.notification_template.pk}),
))
return res
class LabelSerializer(BaseSerializer):
class Meta:
model = Label
fields = ('*', '-description', 'organization')
def get_related(self, obj):
res = super(LabelSerializer, self).get_related(obj)
if obj.organization:
res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization.pk})
return res
class SchedulePreviewSerializer(BaseSerializer):
class Meta:
model = Schedule
fields = ('rrule',)
# We reject rrules if:
# - DTSTART is not include
# - INTERVAL is not included
# - SECONDLY is used
# - TZID is used
# - BYDAY prefixed with a number (MO is good but not 20MO)
# - BYYEARDAY
# - BYWEEKNO
# - Multiple DTSTART or RRULE elements
# - Can't contain both COUNT and UNTIL
# - COUNT > 999
def validate_rrule(self, value):
rrule_value = value
multi_by_month_day = ".*?BYMONTHDAY[\:\=][0-9]+,-*[0-9]+"
multi_by_month = ".*?BYMONTH[\:\=][0-9]+,[0-9]+"
by_day_with_numeric_prefix = ".*?BYDAY[\:\=][0-9]+[a-zA-Z]{2}"
match_count = re.match(".*?(COUNT\=[0-9]+)", rrule_value)
match_multiple_dtstart = re.findall(".*?(DTSTART(;[^:]+)?\:[0-9]+T[0-9]+Z?)", rrule_value)
match_native_dtstart = re.findall(".*?(DTSTART:[0-9]+T[0-9]+) ", rrule_value)
match_multiple_rrule = re.findall(".*?(RRULE\:)", rrule_value)
if not len(match_multiple_dtstart):
raise serializers.ValidationError(_('Valid DTSTART required in rrule. Value should start with: DTSTART:YYYYMMDDTHHMMSSZ'))
if len(match_native_dtstart):
raise serializers.ValidationError(_('DTSTART cannot be a naive datetime. Specify ;TZINFO= or YYYYMMDDTHHMMSSZZ.'))
if len(match_multiple_dtstart) > 1:
raise serializers.ValidationError(_('Multiple DTSTART is not supported.'))
if not len(match_multiple_rrule):
raise serializers.ValidationError(_('RRULE required in rrule.'))
if len(match_multiple_rrule) > 1:
raise serializers.ValidationError(_('Multiple RRULE is not supported.'))
if 'interval' not in rrule_value.lower():
raise serializers.ValidationError(_('INTERVAL required in rrule.'))
if 'secondly' in rrule_value.lower():
raise serializers.ValidationError(_('SECONDLY is not supported.'))
if re.match(multi_by_month_day, rrule_value):
raise serializers.ValidationError(_('Multiple BYMONTHDAYs not supported.'))
if re.match(multi_by_month, rrule_value):
raise serializers.ValidationError(_('Multiple BYMONTHs not supported.'))
if re.match(by_day_with_numeric_prefix, rrule_value):
raise serializers.ValidationError(_("BYDAY with numeric prefix not supported."))
if 'byyearday' in rrule_value.lower():
raise serializers.ValidationError(_("BYYEARDAY not supported."))
if 'byweekno' in rrule_value.lower():
raise serializers.ValidationError(_("BYWEEKNO not supported."))
if 'COUNT' in rrule_value and 'UNTIL' in rrule_value:
raise serializers.ValidationError(_("RRULE may not contain both COUNT and UNTIL"))
if match_count:
count_val = match_count.groups()[0].strip().split("=")
if int(count_val[1]) > 999:
raise serializers.ValidationError(_("COUNT > 999 is unsupported."))
try:
Schedule.rrulestr(rrule_value)
except Exception as e:
raise serializers.ValidationError(_("rrule parsing failed validation: {}").format(e))
return value
class ScheduleSerializer(LaunchConfigurationBaseSerializer, SchedulePreviewSerializer):
show_capabilities = ['edit', 'delete']
class Meta:
model = Schedule
fields = ('*', 'unified_job_template', 'enabled', 'dtstart', 'dtend', 'rrule', 'next_run',)
def get_related(self, obj):
res = super(ScheduleSerializer, self).get_related(obj)
res.update(dict(
unified_jobs = self.reverse('api:schedule_unified_jobs_list', kwargs={'pk': obj.pk}),
))
if obj.unified_job_template:
res['unified_job_template'] = obj.unified_job_template.get_absolute_url(self.context.get('request'))
try:
if obj.unified_job_template.project:
res['project'] = obj.unified_job_template.project.get_absolute_url(self.context.get('request'))
except ObjectDoesNotExist:
pass
if obj.inventory:
res['inventory'] = obj.inventory.get_absolute_url(self.context.get('request'))
elif obj.unified_job_template and getattr(obj.unified_job_template, 'inventory', None):
res['inventory'] = obj.unified_job_template.inventory.get_absolute_url(self.context.get('request'))
return res
def validate_unified_job_template(self, value):
if type(value) == InventorySource and value.source not in SCHEDULEABLE_PROVIDERS:
raise serializers.ValidationError(_('Inventory Source must be a cloud resource.'))
elif type(value) == Project and value.scm_type == '':
raise serializers.ValidationError(_('Manual Project cannot have a schedule set.'))
elif type(value) == InventorySource and value.source == 'scm' and value.update_on_project_update:
raise serializers.ValidationError(_(
six.text_type('Inventory sources with `update_on_project_update` cannot be scheduled. '
'Schedule its source project `{}` instead.').format(value.source_project.name)))
return value
class InstanceSerializer(BaseSerializer):
consumed_capacity = serializers.SerializerMethodField()
percent_capacity_remaining = serializers.SerializerMethodField()
jobs_running = serializers.SerializerMethodField()
class Meta:
model = Instance
read_only_fields = ('uuid', 'hostname', 'version')
fields = ("id", "type", "url", "related", "uuid", "hostname", "created", "modified", 'capacity_adjustment',
"version", "capacity", "consumed_capacity", "percent_capacity_remaining", "jobs_running",
"cpu", "memory", "cpu_capacity", "mem_capacity", "enabled")
def get_related(self, obj):
res = super(InstanceSerializer, self).get_related(obj)
res['jobs'] = self.reverse('api:instance_unified_jobs_list', kwargs={'pk': obj.pk})
res['instance_groups'] = self.reverse('api:instance_instance_groups_list', kwargs={'pk': obj.pk})
return res
def get_consumed_capacity(self, obj):
return obj.consumed_capacity
def get_percent_capacity_remaining(self, obj):
if not obj.capacity or obj.consumed_capacity == obj.capacity:
return 0.0
else:
return float("{0:.2f}".format(((float(obj.capacity) - float(obj.consumed_capacity)) / (float(obj.capacity))) * 100))
def get_jobs_running(self, obj):
return UnifiedJob.objects.filter(execution_node=obj.hostname, status__in=('running', 'waiting',)).count()
class InstanceGroupSerializer(BaseSerializer):
committed_capacity = serializers.SerializerMethodField()
consumed_capacity = serializers.SerializerMethodField()
percent_capacity_remaining = serializers.SerializerMethodField()
jobs_running = serializers.SerializerMethodField()
instances = serializers.SerializerMethodField()
class Meta:
model = InstanceGroup
fields = ("id", "type", "url", "related", "name", "created", "modified",
"capacity", "committed_capacity", "consumed_capacity",
"percent_capacity_remaining", "jobs_running", "instances", "controller",
"policy_instance_percentage", "policy_instance_minimum", "policy_instance_list")
def get_related(self, obj):
res = super(InstanceGroupSerializer, self).get_related(obj)
res['jobs'] = self.reverse('api:instance_group_unified_jobs_list', kwargs={'pk': obj.pk})
res['instances'] = self.reverse('api:instance_group_instance_list', kwargs={'pk': obj.pk})
if obj.controller_id:
res['controller'] = self.reverse('api:instance_group_detail', kwargs={'pk': obj.controller_id})
return res
def get_jobs_qs(self):
# Store running jobs queryset in context, so it will be shared in ListView
if 'running_jobs' not in self.context:
self.context['running_jobs'] = UnifiedJob.objects.filter(
status__in=('running', 'waiting'))
return self.context['running_jobs']
def get_capacity_dict(self):
# Store capacity values (globally computed) in the context
if 'capacity_map' not in self.context:
ig_qs = None
if self.parent: # Is ListView:
ig_qs = self.parent.instance
self.context['capacity_map'] = InstanceGroup.objects.capacity_values(
qs=ig_qs, tasks=self.get_jobs_qs(), breakdown=True)
return self.context['capacity_map']
def get_consumed_capacity(self, obj):
return self.get_capacity_dict()[obj.name]['running_capacity']
def get_committed_capacity(self, obj):
return self.get_capacity_dict()[obj.name]['committed_capacity']
def get_percent_capacity_remaining(self, obj):
if not obj.capacity:
return 0.0
else:
return float("{0:.2f}".format(
((float(obj.capacity) - float(self.get_consumed_capacity(obj))) / (float(obj.capacity))) * 100)
)
def get_jobs_running(self, obj):
jobs_qs = self.get_jobs_qs()
return sum(1 for job in jobs_qs if job.instance_group_id == obj.id)
def get_instances(self, obj):
return obj.instances.count()
class ActivityStreamSerializer(BaseSerializer):
changes = serializers.SerializerMethodField()
object_association = serializers.SerializerMethodField()
@cached_property
def _local_summarizable_fk_fields(self):
summary_dict = copy.copy(SUMMARIZABLE_FK_FIELDS)
# Special requests
summary_dict['group'] = summary_dict['group'] + ('inventory_id',)
for key in summary_dict.keys():
if 'id' not in summary_dict[key]:
summary_dict[key] = summary_dict[key] + ('id',)
field_list = summary_dict.items()
# Needed related fields that are not in the default summary fields
field_list += [
('workflow_job_template_node', ('id', 'unified_job_template_id')),
('label', ('id', 'name', 'organization_id')),
('notification', ('id', 'status', 'notification_type', 'notification_template_id')),
('access_token', ('id', 'token'))
]
return field_list
class Meta:
model = ActivityStream
fields = ('*', '-name', '-description', '-created', '-modified',
'timestamp', 'operation', 'changes', 'object1', 'object2', 'object_association')
def get_fields(self):
ret = super(ActivityStreamSerializer, self).get_fields()
for key, field in ret.items():
if key == 'changes':
field.help_text = _('A summary of the new and changed values when an object is created, updated, or deleted')
if key == 'object1':
field.help_text = _('For create, update, and delete events this is the object type that was affected. '
'For associate and disassociate events this is the object type associated or disassociated with object2.')
if key == 'object2':
field.help_text = _('Unpopulated for create, update, and delete events. For associate and disassociate '
'events this is the object type that object1 is being associated with.')
if key == 'operation':
field.help_text = _('The action taken with respect to the given object(s).')
return ret
def get_changes(self, obj):
if obj is None:
return {}
try:
return json.loads(obj.changes)
except Exception:
logger.warn("Error deserializing activity stream json changes")
return {}
def get_object_association(self, obj):
try:
return obj.object_relationship_type.split(".")[-1].split("_")[1]
except Exception:
pass
return ""
def get_related(self, obj):
rel = {}
if obj.actor is not None:
rel['actor'] = self.reverse('api:user_detail', kwargs={'pk': obj.actor.pk})
for fk, __ in self._local_summarizable_fk_fields:
if not hasattr(obj, fk):
continue
m2m_list = self._get_rel(obj, fk)
if m2m_list:
rel[fk] = []
id_list = []
for thisItem in m2m_list:
if getattr(thisItem, 'id', None) in id_list:
continue
id_list.append(getattr(thisItem, 'id', None))
if fk == 'custom_inventory_script':
rel[fk].append(self.reverse('api:inventory_script_detail', kwargs={'pk': thisItem.id}))
elif fk == 'application':
rel[fk].append(self.reverse(
'api:o_auth2_application_detail', kwargs={'pk': thisItem.pk}
))
elif fk == 'access_token':
rel[fk].append(self.reverse(
'api:o_auth2_token_detail', kwargs={'pk': thisItem.pk}
))
else:
rel[fk].append(self.reverse('api:' + fk + '_detail', kwargs={'pk': thisItem.id}))
if fk == 'schedule':
rel['unified_job_template'] = thisItem.unified_job_template.get_absolute_url(self.context.get('request'))
if obj.setting and obj.setting.get('category', None):
rel['setting'] = self.reverse(
'api:setting_singleton_detail',
kwargs={'category_slug': obj.setting['category']}
)
rel['access_token'] = '*************'
return rel
def _get_rel(self, obj, fk):
related_model = ActivityStream._meta.get_field(fk).related_model
related_manager = getattr(obj, fk)
if issubclass(related_model, PolymorphicModel) and hasattr(obj, '_prefetched_objects_cache'):
# HACK: manually fill PolymorphicModel caches to prevent running query multiple times
# unnecessary if django-polymorphic issue #68 is solved
if related_manager.prefetch_cache_name not in obj._prefetched_objects_cache:
obj._prefetched_objects_cache[related_manager.prefetch_cache_name] = list(related_manager.all())
return related_manager.all()
def get_summary_fields(self, obj):
summary_fields = OrderedDict()
for fk, related_fields in self._local_summarizable_fk_fields:
try:
if not hasattr(obj, fk):
continue
m2m_list = self._get_rel(obj, fk)
if m2m_list:
summary_fields[fk] = []
for thisItem in m2m_list:
if fk == 'job':
summary_fields['job_template'] = []
job_template_item = {}
job_template_fields = SUMMARIZABLE_FK_FIELDS['job_template']
job_template = getattr(thisItem, 'job_template', None)
if job_template is not None:
for field in job_template_fields:
fval = getattr(job_template, field, None)
if fval is not None:
job_template_item[field] = fval
summary_fields['job_template'].append(job_template_item)
if fk == 'schedule':
unified_job_template = getattr(thisItem, 'unified_job_template', None)
if unified_job_template is not None:
summary_fields[get_type_for_model(unified_job_template)] = {'id': unified_job_template.id,
'name': unified_job_template.name}
thisItemDict = {}
for field in related_fields:
fval = getattr(thisItem, field, None)
if fval is not None:
thisItemDict[field] = fval
summary_fields[fk].append(thisItemDict)
except ObjectDoesNotExist:
pass
if obj.actor is not None:
summary_fields['actor'] = dict(id = obj.actor.id,
username = obj.actor.username,
first_name = obj.actor.first_name,
last_name = obj.actor.last_name)
if obj.setting:
summary_fields['setting'] = [obj.setting]
summary_fields['access_token'] = '*************'
return summary_fields
class FactVersionSerializer(BaseFactSerializer):
class Meta:
model = Fact
fields = ('related', 'module', 'timestamp')
read_only_fields = ('*',)
def get_related(self, obj):
res = super(FactVersionSerializer, self).get_related(obj)
params = {
'datetime': timestamp_apiformat(obj.timestamp),
'module': obj.module,
}
res['fact_view'] = '%s?%s' % (
reverse('api:host_fact_compare_view', kwargs={'pk': obj.host.pk}, request=self.context.get('request')),
urllib.urlencode(params)
)
return res
class FactSerializer(BaseFactSerializer):
class Meta:
model = Fact
# TODO: Consider adding in host to the fields list ?
fields = ('related', 'timestamp', 'module', 'facts', 'id', 'summary_fields', 'host')
read_only_fields = ('*',)
def get_related(self, obj):
res = super(FactSerializer, self).get_related(obj)
res['host'] = obj.host.get_absolute_url(self.context.get('request'))
return res
def to_representation(self, obj):
ret = super(FactSerializer, self).to_representation(obj)
if obj is None:
return ret
if 'facts' in ret and isinstance(ret['facts'], six.string_types):
ret['facts'] = json.loads(ret['facts'])
return ret
|
py | b40279a43db56cb04a29099b18641c1f77c9d667 | # coding: utf-8
"""
Pure Storage FlashBlade REST 1.9 Python SDK
Pure Storage FlashBlade REST 1.9 Python SDK, developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/).
OpenAPI spec version: 1.9
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class FileSystemSnapshot(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
#BEGIN_CUSTOM
# IR-51527: Prevent Pytest from attempting to collect this class based on name.
__test__ = False
#END_CUSTOM
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'id': 'str',
'destroyed': 'bool',
'owner': 'Reference',
'owner_destroyed': 'bool',
'policy': 'LocationReference',
'source': 'str',
'source_destroyed': 'bool',
'source_id': 'str',
'source_is_local': 'bool',
'source_location': 'Reference',
'source_display_name': 'str',
'suffix': 'str',
'time_remaining': 'int'
}
attribute_map = {
'name': 'name',
'id': 'id',
'destroyed': 'destroyed',
'owner': 'owner',
'owner_destroyed': 'owner_destroyed',
'policy': 'policy',
'source': 'source',
'source_destroyed': 'source_destroyed',
'source_id': 'source_id',
'source_is_local': 'source_is_local',
'source_location': 'source_location',
'source_display_name': 'source_display_name',
'suffix': 'suffix',
'time_remaining': 'time_remaining'
}
def __init__(self, name=None, id=None, destroyed=None, owner=None, owner_destroyed=None, policy=None, source=None, source_destroyed=None, source_id=None, source_is_local=None, source_location=None, source_display_name=None, suffix=None, time_remaining=None): # noqa: E501
"""FileSystemSnapshot - a model defined in Swagger""" # noqa: E501
self._name = None
self._id = None
self._destroyed = None
self._owner = None
self._owner_destroyed = None
self._policy = None
self._source = None
self._source_destroyed = None
self._source_id = None
self._source_is_local = None
self._source_location = None
self._source_display_name = None
self._suffix = None
self._time_remaining = None
self.discriminator = None
if name is not None:
self.name = name
if id is not None:
self.id = id
if destroyed is not None:
self.destroyed = destroyed
if owner is not None:
self.owner = owner
if owner_destroyed is not None:
self.owner_destroyed = owner_destroyed
if policy is not None:
self.policy = policy
if source is not None:
self.source = source
if source_destroyed is not None:
self.source_destroyed = source_destroyed
if source_id is not None:
self.source_id = source_id
if source_is_local is not None:
self.source_is_local = source_is_local
if source_location is not None:
self.source_location = source_location
if source_display_name is not None:
self.source_display_name = source_display_name
if suffix is not None:
self.suffix = suffix
if time_remaining is not None:
self.time_remaining = time_remaining
@property
def name(self):
"""Gets the name of this FileSystemSnapshot. # noqa: E501
The name of the object # noqa: E501
:return: The name of this FileSystemSnapshot. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this FileSystemSnapshot.
The name of the object # noqa: E501
:param name: The name of this FileSystemSnapshot. # noqa: E501
:type: str
"""
self._name = name
@property
def id(self):
"""Gets the id of this FileSystemSnapshot. # noqa: E501
A unique ID chosen by the system. Cannot change. # noqa: E501
:return: The id of this FileSystemSnapshot. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this FileSystemSnapshot.
A unique ID chosen by the system. Cannot change. # noqa: E501
:param id: The id of this FileSystemSnapshot. # noqa: E501
:type: str
"""
self._id = id
@property
def destroyed(self):
"""Gets the destroyed of this FileSystemSnapshot. # noqa: E501
Is the file system snapshot destroyed? False by default. # noqa: E501
:return: The destroyed of this FileSystemSnapshot. # noqa: E501
:rtype: bool
"""
return self._destroyed
@destroyed.setter
def destroyed(self, destroyed):
"""Sets the destroyed of this FileSystemSnapshot.
Is the file system snapshot destroyed? False by default. # noqa: E501
:param destroyed: The destroyed of this FileSystemSnapshot. # noqa: E501
:type: bool
"""
self._destroyed = destroyed
@property
def owner(self):
"""Gets the owner of this FileSystemSnapshot. # noqa: E501
A reference to the file system that owns this snapshot. If the owner is destroyed, this will be destroyed. # noqa: E501
:return: The owner of this FileSystemSnapshot. # noqa: E501
:rtype: Reference
"""
return self._owner
@owner.setter
def owner(self, owner):
"""Sets the owner of this FileSystemSnapshot.
A reference to the file system that owns this snapshot. If the owner is destroyed, this will be destroyed. # noqa: E501
:param owner: The owner of this FileSystemSnapshot. # noqa: E501
:type: Reference
"""
self._owner = owner
@property
def owner_destroyed(self):
"""Gets the owner_destroyed of this FileSystemSnapshot. # noqa: E501
Is the owning file system destroyed? # noqa: E501
:return: The owner_destroyed of this FileSystemSnapshot. # noqa: E501
:rtype: bool
"""
return self._owner_destroyed
@owner_destroyed.setter
def owner_destroyed(self, owner_destroyed):
"""Sets the owner_destroyed of this FileSystemSnapshot.
Is the owning file system destroyed? # noqa: E501
:param owner_destroyed: The owner_destroyed of this FileSystemSnapshot. # noqa: E501
:type: bool
"""
self._owner_destroyed = owner_destroyed
@property
def policy(self):
"""Gets the policy of this FileSystemSnapshot. # noqa: E501
A reference to the associated policy. # noqa: E501
:return: The policy of this FileSystemSnapshot. # noqa: E501
:rtype: LocationReference
"""
return self._policy
@policy.setter
def policy(self, policy):
"""Sets the policy of this FileSystemSnapshot.
A reference to the associated policy. # noqa: E501
:param policy: The policy of this FileSystemSnapshot. # noqa: E501
:type: LocationReference
"""
self._policy = policy
@property
def source(self):
"""Gets the source of this FileSystemSnapshot. # noqa: E501
The name of the file system that was the source of the data in this snapshot. Normally this is the same as the owner, but if the snapshot is replicated, the source is the original file system. # noqa: E501
:return: The source of this FileSystemSnapshot. # noqa: E501
:rtype: str
"""
return self._source
@source.setter
def source(self, source):
"""Sets the source of this FileSystemSnapshot.
The name of the file system that was the source of the data in this snapshot. Normally this is the same as the owner, but if the snapshot is replicated, the source is the original file system. # noqa: E501
:param source: The source of this FileSystemSnapshot. # noqa: E501
:type: str
"""
self._source = source
@property
def source_destroyed(self):
"""Gets the source_destroyed of this FileSystemSnapshot. # noqa: E501
Deprecated. Use `owner_destroyed`. Is the owning file system destroyed? # noqa: E501
:return: The source_destroyed of this FileSystemSnapshot. # noqa: E501
:rtype: bool
"""
return self._source_destroyed
@source_destroyed.setter
def source_destroyed(self, source_destroyed):
"""Sets the source_destroyed of this FileSystemSnapshot.
Deprecated. Use `owner_destroyed`. Is the owning file system destroyed? # noqa: E501
:param source_destroyed: The source_destroyed of this FileSystemSnapshot. # noqa: E501
:type: bool
"""
self._source_destroyed = source_destroyed
@property
def source_id(self):
"""Gets the source_id of this FileSystemSnapshot. # noqa: E501
The unique global ID of the source file system. # noqa: E501
:return: The source_id of this FileSystemSnapshot. # noqa: E501
:rtype: str
"""
return self._source_id
@source_id.setter
def source_id(self, source_id):
"""Sets the source_id of this FileSystemSnapshot.
The unique global ID of the source file system. # noqa: E501
:param source_id: The source_id of this FileSystemSnapshot. # noqa: E501
:type: str
"""
self._source_id = source_id
@property
def source_is_local(self):
"""Gets the source_is_local of this FileSystemSnapshot. # noqa: E501
Is the source of this file system snapshot on the local array? # noqa: E501
:return: The source_is_local of this FileSystemSnapshot. # noqa: E501
:rtype: bool
"""
return self._source_is_local
@source_is_local.setter
def source_is_local(self, source_is_local):
"""Sets the source_is_local of this FileSystemSnapshot.
Is the source of this file system snapshot on the local array? # noqa: E501
:param source_is_local: The source_is_local of this FileSystemSnapshot. # noqa: E501
:type: bool
"""
self._source_is_local = source_is_local
@property
def source_location(self):
"""Gets the source_location of this FileSystemSnapshot. # noqa: E501
A reference to the source array. # noqa: E501
:return: The source_location of this FileSystemSnapshot. # noqa: E501
:rtype: Reference
"""
return self._source_location
@source_location.setter
def source_location(self, source_location):
"""Sets the source_location of this FileSystemSnapshot.
A reference to the source array. # noqa: E501
:param source_location: The source_location of this FileSystemSnapshot. # noqa: E501
:type: Reference
"""
self._source_location = source_location
@property
def source_display_name(self):
"""Gets the source_display_name of this FileSystemSnapshot. # noqa: E501
Full name of the source with remote array information. Response will be same as source for local file system snapshots. # noqa: E501
:return: The source_display_name of this FileSystemSnapshot. # noqa: E501
:rtype: str
"""
return self._source_display_name
@source_display_name.setter
def source_display_name(self, source_display_name):
"""Sets the source_display_name of this FileSystemSnapshot.
Full name of the source with remote array information. Response will be same as source for local file system snapshots. # noqa: E501
:param source_display_name: The source_display_name of this FileSystemSnapshot. # noqa: E501
:type: str
"""
self._source_display_name = source_display_name
@property
def suffix(self):
"""Gets the suffix of this FileSystemSnapshot. # noqa: E501
The suffix of the snapshot, e.g., snap1. # noqa: E501
:return: The suffix of this FileSystemSnapshot. # noqa: E501
:rtype: str
"""
return self._suffix
@suffix.setter
def suffix(self, suffix):
"""Sets the suffix of this FileSystemSnapshot.
The suffix of the snapshot, e.g., snap1. # noqa: E501
:param suffix: The suffix of this FileSystemSnapshot. # noqa: E501
:type: str
"""
self._suffix = suffix
@property
def time_remaining(self):
"""Gets the time_remaining of this FileSystemSnapshot. # noqa: E501
Time in milliseconds before the file system snapshot is eradicated. Null if not destroyed. # noqa: E501
:return: The time_remaining of this FileSystemSnapshot. # noqa: E501
:rtype: int
"""
return self._time_remaining
@time_remaining.setter
def time_remaining(self, time_remaining):
"""Sets the time_remaining of this FileSystemSnapshot.
Time in milliseconds before the file system snapshot is eradicated. Null if not destroyed. # noqa: E501
:param time_remaining: The time_remaining of this FileSystemSnapshot. # noqa: E501
:type: int
"""
self._time_remaining = time_remaining
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(FileSystemSnapshot, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, FileSystemSnapshot):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | b40279e93ca7db12be4dba0bbb8bf6ef96a2e13a | import praw
import string
from datetime import datetime
import time
import random
import credentials
import replies as r
import pekofy as peko # because it sounds better
bot_name = credentials.bot_name
author = credentials.author
reddit = praw.Reddit(client_id=credentials.client_id,
client_secret=credentials.client_secret,
username=credentials.bot_name,
password=credentials.bot_pass,
user_agent=credentials.user_agent)
subreddit_list = ['u_' + bot_name, 'u_' + author, 'hololive', 'VirtualYoutubers', 'Hololewd', 'okbuddyhololive',
'goodanimemes', 'VtuberV8', 'Priconne', 'AmeliaWatson', 'GawrGura']
subreddit = reddit.subreddit('+'.join(subreddit_list))
replies = r.replies
def reply_f(reply, comment_obj, pekofy_msg=None):
"""
reply to a comment
:param reply: the reply type to send
:type reply: string
:param comment_obj: the comment to reply
:type comment_obj: praw.models.Comment
:param pekofy_msg: pekofied message that should only be passed when the reply variable is 'pekofy' to
pass the pekofied reply to the replies, defaults to None
:type pekofy_msg: string
"""
replies["pekofy"]["message"] = pekofy_msg
if pekofy_msg and is_triggering(pekofy_msg, "nothing changed"):
reply = "nothing changed"
reply_content = replies[reply]
if not random.randint(0, 100) <= (replies[reply]['chance'] if 'chance' in reply_content else 100) or \
already_replied_to(comment_obj, reply):
return
message = random.choice(reply_content["messages"]) if "messages" in replies[reply] else reply_content["message"]
try:
comment_obj.reply(message)
global comments_replied
comments_replied += 1
except Exception as e:
print(f"Couldn't reply: {e}")
notify_author(e, comment_obj, message)
print(f"{reply}: https://www.reddit.com{comment_obj.permalink}")
print(f"Reply: {message}")
print("------------------------")
def already_replied_to(comment, reply):
""" returns if already replied the same type of comment or not """
second_refresh = False
for i in range(2):
try:
comment.refresh()
break
except praw.exceptions.ClientException: # work around as stated in the praw issue 838
if second_refresh:
return False
time.sleep(10)
second_refresh = True
comment.replies.replace_more()
child_comments = comment.replies.list()
for top_comment in child_comments:
if top_comment.parent().id != comment.id:
break
if top_comment.author == bot_name:
if top_comment.body in replies[reply]["messages" if "messages" in replies[reply] else "message"]:
print(f"Already {reply}'d: {top_comment.body} \ncontinuing...")
print("------------------------")
return True
return False
def notify_author(exception, comment=None, tried_reply=None):
""" Notifies to the author, don't forget to whitelist the bot if your PM's are closed """
title = datetime.now().strftime("%Y.%m.%d - %H:%M:%S")
if comment and tried_reply:
body = f"{bot_name} has run into an error: {exception}\n" \
f"Here\'s the [link](https://www.reddit.com{comment.permalink}) to the comment.\n" \
f"Tried to reply this: {tried_reply}"
else:
body = f'{bot_name} has run into an error: {exception}\n'
try:
reddit.redditor(author).message(title, body)
except Exception:
print("Couldn't notify the author")
def is_triggering(text, reply):
""" whether the text triggers the given reply type or not """
include_bot = replies[reply]['include_bot'] if 'include_bot' in replies[reply] else False
if replies[reply]["exact"] if (True if "exact" in replies[reply] else False) else False:
condition = [True if trigger == text else False for trigger in replies[reply]["triggers"]]
else:
condition = [True if trigger in text else False for trigger in replies[reply]["triggers"]]
if replies[reply]["trigger_type"] == "all":
return all(condition) and ("bot" in text if include_bot else True)
return any(condition) and ("bot" in text if include_bot else True)
def passed_limit(comment, limit=2):
""" returns true if the same comment has been pekofied too much by
climbing up the comment tree until it reaches the limit """
current_usage = 0
for i in range(limit):
if comment.parent_id == comment.link_id:
break
if comment.parent().author == bot_name:
comment = comment.parent()
if comment.parent_id == comment.link_id:
break
if comment.parent().author and is_triggering(comment.parent().body, "pekofy"):
comment = comment.parent()
current_usage += 1
return current_usage == limit
def is_top_level(comment):
""" returns if the comment is top level (directly replied to the post) """
return comment.parent_id == comment.link_id
def is_anti(comment):
""" Checks if author of the comment is a possible anti/hater by
counting their overall comment score in the same comment tree """
score_sum = comment.score
temp_comment = comment
while True:
if is_top_level(temp_comment.parent()):
break
if temp_comment.parent().author:
temp_comment = temp_comment.parent()
if temp_comment.author == comment.author: # same user in the comment chain, add to sum
score_sum += temp_comment.score
else:
break
if score_sum < -1:
return True
return False
comments_replied, comments_scanned = 0, 0
# used for exponential back off in case reddit server is unable to respond
initial_wait_time = 10
current_wait_time = initial_wait_time
max_wait_time = 600
reset_limit = 50
while 1:
try: # exception handling at it's finest (lol)
# scan each comment
for comment in subreddit.stream.comments():
comments_scanned += 1
# comment has been deleted or it's author is the bot itself
if not comment.author or comment.author == bot_name:
continue
# pain peko reply
reply_f("pain peko", comment) if is_triggering(comment.body.lower(), "pain peko") else None
# hey moona reply
if len(comment.body)<350: # longer messages tend to be more serious, don't "hey moona"
reply_f("hey moona", comment) if is_triggering(comment.body.lower(), "hey moona") else None
# feedback gratitude
replied = False
if not is_top_level(comment):
if comment.parent().author:
if comment.parent().author.name == bot_name:
for feedback in ["love", "cute", "thank", "sorry", "insult"]:
if is_triggering(comment.body.lower(), feedback):
reply_f(feedback, comment)
replied = True
break
if replied:
continue
# both pekofy and unpekofy written
if is_triggering(comment.body, "confused"):
reply_f("confused", comment)
continue
# if keyword found, try to pekofy
if is_triggering(comment.body, "pekofy"):
# can't pekofy due to comment not having any parent
if not comment.parent().author:
continue
# parent is a post, pekofy accordingly
if is_top_level(comment):
reply_f("pekofy", comment, peko.pekofy(
comment.submission.title + '\n\n' + comment.submission.selftext if comment.submission.selftext else comment.submission.title))
continue
# someone tried to break it by recursive calling, kindly say no
if is_triggering(comment.parent().body, "pekofy"):
reply_f("no", comment)
continue
# someone tried to pekofy a good/bad bot reply, don't pekofy
if is_triggering(comment.parent().body.lower(), "bot score abuse"):
reply_f("bot score abuse", comment)
continue
# don't pekofy if limit already reached before.
if comment.parent().body == replies["limit reached"]["message"] and comment.parent().author == bot_name:
continue
# if the same sentence has been pekofied too much already, don't pekofy
if passed_limit(comment):
reply_f("limit reached", comment)
continue
# not pekofy if anti/hater
if is_anti(comment):
reply_f("no", comment)
continue
# try to reply to the comment
reply_f("pekofy", comment, peko.pekofy(comment.parent().body))
# delete keyphrase found
if is_triggering(comment.body, "unpekofy") and comment.parent().author == bot_name and comment.parent().body:
if comment.parent().score < -1:
print(f'Unpekofied: {comment.parent().body}')
comment.parent().delete()
print("------------------------")
# More than [reset_limit] comments has been scanned without an incident, reset wait time.
if comments_scanned % reset_limit == 0:
current_wait_time = initial_wait_time
except KeyboardInterrupt:
print("Keyboard Interrupt. Terminating...")
break
except praw.exceptions.RedditAPIException as e:
print(f"RedditAPIException: {e}")
notify_author(e)
except praw.exceptions.PRAWException as e:
print(f"PRAWException: {e}")
notify_author(e)
except Exception as e:
print(f"Unhandled exception: {e}")
notify_author(e)
finally:
print("------------------------")
print(f"Replied comments so far: {comments_replied}")
print(f"Scanned comments so far: {comments_scanned}")
comments_replied, comments_scanned = 0, 0
# not-so-exponential back off
time.sleep(current_wait_time)
if not current_wait_time > max_wait_time:
current_wait_time *= 2
|
py | b4027a4f70ec7fe980b0e98ef10ed15a698ff24d | from docutils import nodes
class color(nodes.General, nodes.TextElement):
pass
def visit_color_node_html(self, node):
self.body.append(
"""<span style="border: 1px solid #000; background-color: %s">
</span> """
% node.astext()
)
def depart_color_node_html(self, node):
pass
def visit_color_node_latex(self, node):
pass
def depart_color_node_latex(self, node):
pass
def color_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
node = color()
node += nodes.Text(text)
return [node], []
|
py | b4027a553f1efda77d40b7a6e55ebae5bb3acd65 | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 3
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_8_0
from isi_sdk_8_0.models.storagepool_settings import StoragepoolSettings # noqa: E501
from isi_sdk_8_0.rest import ApiException
class TestStoragepoolSettings(unittest.TestCase):
"""StoragepoolSettings unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testStoragepoolSettings(self):
"""Test StoragepoolSettings"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_8_0.models.storagepool_settings.StoragepoolSettings() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
py | b4027aa37b63f7693e3c2beaad5eddcc9360d585 | from .Sensor import Sensor
from .utils import LcApiException
from .utils import _isStringCompat
import json
import yaml
class _Replicant( object ):
def __init__( self, manager ):
self._manager = manager
class Responder( _Replicant ):
'''Responder service manager object.'''
def sweep( self, sid ):
'''Perform a sweep of a given host.
Args:
sid (str): sensor ID to sweep.
'''
if isinstance( sid, Sensor ):
sid = sid.sid
return self._manager.replicantRequest( 'responder', {
'action' : 'sweep',
'sid' : sid,
}, True )
class Yara( _Replicant ):
'''Yara service manager object.'''
def scan( self, sid, sources ):
'''Perform an ad-hoc scan of a sensor with Yara signatures.
Args:
sid (str): sensor ID to scan.
sources (list of str): list of source Yara signature names to use in the scan.
'''
if isinstance( sid, Sensor ):
sid = sid.sid
return self._manager.replicantRequest( 'yara', {
'action' : 'scan',
'sid' : sid,
'sources' : sources,
}, True )
def getRules( self ):
'''Get the constant Yara scanning rules in effect.
Returns:
Dict of rules.
'''
return self._manager.replicantRequest( 'yara', {
'action' : 'list_rules',
}, False )
def getSources( self ):
'''Get the Yara signature sources.
Returns:
Dict of sources.
'''
return self._manager.replicantRequest( 'yara', {
'action' : 'list_sources',
}, False )
def addRule( self, ruleName, sources = [], tags = [], platforms = [] ):
'''Add a constant Yara scanning rule.
Args:
ruleName (str): name of the rule to add.
sources (list of str): list of sources this rule should scan with.
tags (list of str): list of tags sensors must posses for this rule to apply.
platforms (str of str): list of platform names this rule applies to.
'''
return self._manager.replicantRequest( 'yara', {
'action' : 'add_rule',
'name' : ruleName,
'sources' : sources,
'tags' : tags,
'platforms' : platforms,
}, False )
def removeRule( self, ruleName ):
'''Remove a constant Yara scanning rule.
Args:
ruleName (str): name of the rule to remove.
'''
return self._manager.replicantRequest( 'yara', {
'action' : 'remove_rule',
'name' : ruleName,
}, False )
def addSource( self, sourceName, source ):
'''Add a Yara signature source.
Args:
sourceName (str): name of the source to add.
source (str): source URL for the Yara signature(s).
'''
return self._manager.replicantRequest( 'yara', {
'action' : 'add_source',
'name' : sourceName,
'source' : source,
}, False )
def removeSource( self, sourceName ):
'''Remove a Yara rule source.
Args:
sourceName (str): name of the source to remove.
'''
return self._manager.replicantRequest( 'yara', {
'action' : 'remove_source',
'name' : sourceName,
}, False )
class Integrity( _Replicant ):
'''File and Registry Integrity Monitoring (FIM) service manager object.'''
def getRules( self ):
'''Get FIM rules in effect.
Returns:
Dict of rules.
'''
return self._manager.replicantRequest( 'integrity', {
'action' : 'list_rules',
}, False )
def addRule( self, ruleName, patterns = [], tags = [], platforms = [] ):
'''Add an FIM rule.
Args:
ruleName (str): name of the rule to add.
patterns (list of str): list of file/registry patterns to monitor.
tags (list of str): list of tags sensors must posses for this rule to apply.
platforms (list of str): list of platform names this rule applies to.
'''
return self._manager.replicantRequest( 'integrity', {
'action' : 'add_rule',
'name' : ruleName,
'patterns' : patterns,
'tags' : tags,
'platforms' : platforms,
}, False )
def removeRule( self, ruleName ):
'''Remove an FIM rule.
Args:
ruleName (str): name of the rule to remove.
'''
return self._manager.replicantRequest( 'integrity', {
'action' : 'remove_rule',
'name' : ruleName,
}, False )
class Logging( _Replicant ):
'''Logging service manager object.'''
def getRules( self ):
'''Get the Log collection rules in effect.
'''
return self._manager.replicantRequest( 'logging', {
'action' : 'list_rules',
}, False )
def addRule( self, ruleName, patterns = [], tags = [], platforms = [], isDeleteAfter = False, isIgnoreCert = False, daysRetention = 0 ):
'''Add a Log collection rule.
Args:
ruleName (str): name of the rule to add.
patterns (list of str): list of file patterns describing Logs to monitor and retrieve.
tags (list of str): list of tags sensors must posses for this rule to apply.
platforms (list of str): list of platform names this rule applies to.
isDeleteAfter (bool): if True, delete the Log after retrieval.
isIgnoreCert (bool): if True, sensor ignores SSL cert errors during log upload.
'''
return self._manager.replicantRequest( 'logging', {
'action' : 'add_rule',
'name' : ruleName,
'patterns' : patterns,
'is_delete_after' : isDeleteAfter,
'is_ignore_cert' : isIgnoreCert,
'days_retention' : daysRetention,
'tags' : tags,
'platforms' : platforms,
}, False )
def removeRule( self, ruleName ):
'''Remove a Log collection rule.
Args:
ruleName (str): name of the rule to remove.
'''
return self._manager.replicantRequest( 'logging', {
'action' : 'remove_rule',
'name' : ruleName,
}, False )
class Replay( _Replicant ):
'''Replay service manager object.'''
def runJob( self, startTime, endTime, sid = None, ruleName = None, ruleContent = None ):
'''Run a Replay service job.
Args:
startTime (int): epoch start time to replay.
endTime (int): epoch end time to replay.
sid (str): sensor ID to replay the data from.
ruleName (str): optional name of an existing D&R rule to replay.
ruleContent (dict): optional content of a D&R rule to replay.
'''
if isinstance( sid, Sensor ):
sid = sid.sid
req = {
'action' : 'replay',
'start' : startTime,
'end' : endTime,
}
if sid is not None:
req[ 'sid' ] = sid
if ruleName is not None:
req[ 'rule_name' ] = ruleName
if ruleContent is not None:
if _isStringCompat( ruleContent ):
try:
ruleContent = yaml.safeLoad( ruleContent )
except:
try:
ruleContent = json.loads( ruleContent )
except:
raise LcApiException( 'rule content not JSON and not YAML' )
req[ 'rule_content' ] = ruleContent
return self._manager.replicantRequest( 'replay', req, True )
class Exfil( _Replicant ):
'''Exfil control service manager object.'''
def getRules( self ):
'''Get the exfil rules in effect.
Returns:
Dict of rules.
'''
return self._manager.replicantRequest( 'exfil', {
'action' : 'list_rules',
}, False )
def addEventRule( self, ruleName, events = [], tags = [], platforms = [] ):
'''Add an event rule describing events sent to the cloud in real-time.
Args:
ruleName (str): name of the rule to add.
events (list of str): list of event names to send in real-time.
tags (list of str): list of tags sensors must posses for this rule to apply.
platforms (list of str): list of platform names this applies to.
'''
return self._manager.replicantRequest( 'exfil', {
'action' : 'add_event_rule',
'name' : ruleName,
'events' : events,
'tags' : tags,
'platforms' : platforms,
}, False )
def removeEventRule( self, ruleName ):
'''Remove an event rule.
Args:
ruleName (str): name of the rule to remove.
'''
return self._manager.replicantRequest( 'exfil', {
'action' : 'remove_event_rule',
'name' : ruleName,
}, False )
def addWatchRule( self, ruleName, event, operator, value, path = [], tags = [], platforms = [] ):
'''Add a watch rule to send matching events to the cloud in real-time.
Args:
ruleName (str): name of the watch rule to add.
event (str): name of the event this rule applies to.
operator (str): comparison operator name to determine match.
value (str): value to compare to for matching.
path (list of str): path within the event to compare the value of, without a leading "event".
tags (list of str): list of tags sensors must posses for this rule to apply.
platforms (list of str): list of platform names this applies to.
'''
return self._manager.replicantRequest( 'exfil', {
'action' : 'add_watch',
'name' : ruleName,
'operator' : operator,
'event' : event,
'value' : value,
'path' : path,
'tags' : tags,
'platforms' : platforms,
}, False )
def removeWatchRule( self, ruleName ):
'''Remove a watch rule.
Args:
ruleName (str): name of the rule to remove.
'''
return self._manager.replicantRequest( 'exfil', {
'action' : 'remove_watch',
'name' : ruleName,
}, False )
class Dumper( _Replicant ):
'''Memory dumper service object.'''
def dump( self, sid ):
'''Dump the full memory of a given host.
Args:
sid (str): sensor ID to sweep.
'''
if isinstance( sid, Sensor ):
sid = sid.sid
return self._manager.replicantRequest( 'dumper', {
'sid' : sid,
}, True )
class ReliableTasking( _Replicant ):
'''Reliable Tasking service object.'''
def task( self, task, sid = None, tag = None, ttl = None ):
'''Issue a task for a set of sensors even if offline.
Args:
task (str): actual task command line to send.
sid (str): optional sensor ID to task or '*' for all.
tag (str): optional tag to select sensors to send the task to.
ttl (int): optional number of seconds before unsent tasks expire, defaults to a week.
'''
req = {
'action' : 'task',
'task' : task,
}
if sid is not None:
if isinstance( sid, Sensor ):
sid = sid.sid
req[ 'sid' ] = sid
if tag is not None:
req[ 'tag' ] = tag
if ttl is not None:
req[ 'ttl' ] = ttl
return self._manager.replicantRequest( 'reliable-tasking', req, True )
def getTasks( self, sid = None, tag = None ):
'''Issue a task for a set of sensors even if offline.
Args:
sid (str): optional sensor ID to get the tasks for or '*' for all.
tag (str): optional tag to select sensors to get the tasks for.
'''
req = {
'action' : 'list',
}
if sid is not None:
if isinstance( sid, Sensor ):
sid = sid.sid
req[ 'sid' ] = sid
if tag is not None:
req[ 'tag' ] = tag
return self._manager.replicantRequest( 'reliable-tasking', req, False ) |
py | b4027b7629e7595e5b52f65b7322db2234eaca09 | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import BadRequest
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import ExchangeNotAvailable
class bitmex (Exchange):
def describe(self):
return self.deep_extend(super(bitmex, self).describe(), {
'id': 'bitmex',
'name': 'BitMEX',
'countries': ['SC'], # Seychelles
'version': 'v1',
'userAgent': None,
'rateLimit': 2000,
'has': {
'CORS': False,
'fetchOHLCV': True,
'withdraw': True,
'editOrder': True,
'fetchOrder': True,
'fetchOrders': True,
'fetchOpenOrders': True,
'fetchClosedOrders': True,
'fetchMyTrades': True,
},
'timeframes': {
'1m': '1m',
'5m': '5m',
'1h': '1h',
'1d': '1d',
},
'urls': {
'test': 'https://testnet.bitmex.com',
'logo': 'https://user-images.githubusercontent.com/1294454/27766319-f653c6e6-5ed4-11e7-933d-f0bc3699ae8f.jpg',
'api': 'https://www.bitmex.com',
'www': 'https://www.bitmex.com',
'doc': [
'https://www.bitmex.com/app/apiOverview',
'https://github.com/BitMEX/api-connectors/tree/master/official-http',
],
'fees': 'https://www.bitmex.com/app/fees',
'referral': 'https://www.bitmex.com/register/rm3C16',
},
'api': {
'public': {
'get': [
'announcement',
'announcement/urgent',
'funding',
'instrument',
'instrument/active',
'instrument/activeAndIndices',
'instrument/activeIntervals',
'instrument/compositeIndex',
'instrument/indices',
'insurance',
'leaderboard',
'liquidation',
'orderBook',
'orderBook/L2',
'quote',
'quote/bucketed',
'schema',
'schema/websocketHelp',
'settlement',
'stats',
'stats/history',
'trade',
'trade/bucketed',
],
},
'private': {
'get': [
'apiKey',
'chat',
'chat/channels',
'chat/connected',
'execution',
'execution/tradeHistory',
'notification',
'order',
'position',
'user',
'user/affiliateStatus',
'user/checkReferralCode',
'user/commission',
'user/depositAddress',
'user/margin',
'user/minWithdrawalFee',
'user/wallet',
'user/walletHistory',
'user/walletSummary',
],
'post': [
'apiKey',
'apiKey/disable',
'apiKey/enable',
'chat',
'order',
'order/bulk',
'order/cancelAllAfter',
'order/closePosition',
'position/isolate',
'position/leverage',
'position/riskLimit',
'position/transferMargin',
'user/cancelWithdrawal',
'user/confirmEmail',
'user/confirmEnableTFA',
'user/confirmWithdrawal',
'user/disableTFA',
'user/logout',
'user/logoutAll',
'user/preferences',
'user/requestEnableTFA',
'user/requestWithdrawal',
],
'put': [
'order',
'order/bulk',
'user',
],
'delete': [
'apiKey',
'order',
'order/all',
],
},
},
'exceptions': {
'exact': {
'Invalid API Key.': AuthenticationError,
'Access Denied': PermissionDenied,
'Duplicate clOrdID': InvalidOrder,
'Signature not valid': AuthenticationError,
},
'broad': {
'overloaded': ExchangeNotAvailable,
'Account has insufficient Available Balance': InsufficientFunds,
},
},
'options': {
# https://blog.bitmex.com/api_announcement/deprecation-of-api-nonce-header/
# https://github.com/ccxt/ccxt/issues/4789
'api-expires': 5, # in seconds
},
})
def fetch_markets(self, params={}):
response = self.publicGetInstrumentActiveAndIndices(params)
result = []
for i in range(0, len(response)):
market = response[i]
active = (market['state'] != 'Unlisted')
id = market['symbol']
baseId = market['underlying']
quoteId = market['quoteCurrency']
basequote = baseId + quoteId
base = self.common_currency_code(baseId)
quote = self.common_currency_code(quoteId)
swap = (id == basequote)
# 'positionCurrency' may be empty("", as Bitmex currently returns for ETHUSD)
# so let's take the quote currency first and then adjust if needed
positionId = self.safe_string_2(market, 'positionCurrency', 'quoteCurrency')
type = None
future = False
prediction = False
position = self.common_currency_code(positionId)
symbol = id
if swap:
type = 'swap'
symbol = base + '/' + quote
elif id.find('B_') >= 0:
prediction = True
type = 'prediction'
else:
future = True
type = 'future'
precision = {
'amount': None,
'price': None,
}
lotSize = self.safe_float(market, 'lotSize')
tickSize = self.safe_float(market, 'tickSize')
if lotSize is not None:
precision['amount'] = self.precision_from_string(self.truncate_to_string(lotSize, 16))
if tickSize is not None:
precision['price'] = self.precision_from_string(self.truncate_to_string(tickSize, 16))
limits = {
'amount': {
'min': None,
'max': None,
},
'price': {
'min': tickSize,
'max': self.safe_float(market, 'maxPrice'),
},
'cost': {
'min': None,
'max': None,
},
}
limitField = 'cost' if (position == quote) else 'amount'
limits[limitField] = {
'min': lotSize,
'max': self.safe_float(market, 'maxOrderQty'),
}
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'active': active,
'precision': precision,
'limits': limits,
'taker': market['takerFee'],
'maker': market['makerFee'],
'type': type,
'spot': False,
'swap': swap,
'future': future,
'prediction': prediction,
'info': market,
})
return result
def fetch_balance(self, params={}):
self.load_markets()
request = {'currency': 'all'}
response = self.privateGetUserMargin(self.extend(request, params))
result = {'info': response}
for b in range(0, len(response)):
balance = response[b]
currencyId = self.safe_string(balance, 'currency')
currencyId = currencyId.upper()
code = self.common_currency_code(currencyId)
account = {
'free': balance['availableMargin'],
'used': 0.0,
'total': balance['marginBalance'],
}
if code == 'BTC':
account['free'] = account['free'] * 0.00000001
account['total'] = account['total'] * 0.00000001
account['used'] = account['total'] - account['free']
result[code] = account
return self.parse_balance(result)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if limit is not None:
request['depth'] = limit
orderbook = self.publicGetOrderBookL2(self.extend(request, params))
result = {
'bids': [],
'asks': [],
'timestamp': None,
'datetime': None,
'nonce': None,
}
for o in range(0, len(orderbook)):
order = orderbook[o]
side = 'asks' if (order['side'] == 'Sell') else 'bids'
amount = self.safe_float(order, 'size')
price = self.safe_float(order, 'price')
# https://github.com/ccxt/ccxt/issues/4926
# https://github.com/ccxt/ccxt/issues/4927
# the exchange sometimes returns null price in the orderbook
if price is not None:
result[side].append([price, amount])
result['bids'] = self.sort_by(result['bids'], 0, True)
result['asks'] = self.sort_by(result['asks'], 0)
return result
def fetch_order(self, id, symbol=None, params={}):
filter = {'filter': {'orderID': id}}
result = self.fetch_orders(symbol, None, None, self.deep_extend(filter, params))
numResults = len(result)
if numResults == 1:
return result[0]
raise OrderNotFound(self.id + ': The order ' + id + ' not found.')
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = None
request = {}
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
if since is not None:
request['startTime'] = self.iso8601(since)
if limit is not None:
request['count'] = limit
request = self.deep_extend(request, params)
# why the hassle? urlencode in python is kinda broken for nested dicts.
# E.g. self.urlencode({"filter": {"open": True}}) will return "filter={'open':+True}"
# Bitmex doesn't like that. Hence resorting to self hack.
if 'filter' in request:
request['filter'] = self.json(request['filter'])
response = self.privateGetOrder(request)
return self.parse_orders(response, market, since, limit)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
filter_params = {'filter': {'open': True}}
return self.fetch_orders(symbol, since, limit, self.deep_extend(filter_params, params))
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
# Bitmex barfs if you set 'open': False in the filter...
orders = self.fetch_orders(symbol, since, limit, params)
return self.filter_by(orders, 'status', 'closed')
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = None
request = {}
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
if since is not None:
request['startTime'] = self.iso8601(since)
if limit is not None:
request['count'] = limit
request = self.deep_extend(request, params)
# why the hassle? urlencode in python is kinda broken for nested dicts.
# E.g. self.urlencode({"filter": {"open": True}}) will return "filter={'open':+True}"
# Bitmex doesn't like that. Hence resorting to self hack.
if 'filter' in request:
request['filter'] = self.json(request['filter'])
response = self.privateGetExecutionTradeHistory(request)
#
# [
# {
# "execID": "string",
# "orderID": "string",
# "clOrdID": "string",
# "clOrdLinkID": "string",
# "account": 0,
# "symbol": "string",
# "side": "string",
# "lastQty": 0,
# "lastPx": 0,
# "underlyingLastPx": 0,
# "lastMkt": "string",
# "lastLiquidityInd": "string",
# "simpleOrderQty": 0,
# "orderQty": 0,
# "price": 0,
# "displayQty": 0,
# "stopPx": 0,
# "pegOffsetValue": 0,
# "pegPriceType": "string",
# "currency": "string",
# "settlCurrency": "string",
# "execType": "string",
# "ordType": "string",
# "timeInForce": "string",
# "execInst": "string",
# "contingencyType": "string",
# "exDestination": "string",
# "ordStatus": "string",
# "triggered": "string",
# "workingIndicator": True,
# "ordRejReason": "string",
# "simpleLeavesQty": 0,
# "leavesQty": 0,
# "simpleCumQty": 0,
# "cumQty": 0,
# "avgPx": 0,
# "commission": 0,
# "tradePublishIndicator": "string",
# "multiLegReportingType": "string",
# "text": "string",
# "trdMatchID": "string",
# "execCost": 0,
# "execComm": 0,
# "homeNotional": 0,
# "foreignNotional": 0,
# "transactTime": "2019-03-05T12:47:02.762Z",
# "timestamp": "2019-03-05T12:47:02.762Z"
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
if not market['active']:
raise ExchangeError(self.id + ': symbol ' + symbol + ' is delisted')
tickers = self.fetch_tickers([symbol], params)
ticker = self.safe_value(tickers, symbol)
if ticker is None:
raise ExchangeError(self.id + ' ticker symbol ' + symbol + ' not found')
return ticker
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
response = self.publicGetInstrumentActiveAndIndices(params)
result = {}
for i in range(0, len(response)):
ticker = self.parse_ticker(response[i])
symbol = self.safe_string(ticker, 'symbol')
if symbol is not None:
result[symbol] = ticker
return result
def parse_ticker(self, ticker, market=None):
#
# { symbol: "ETHH19",
# rootSymbol: "ETH",
# state: "Open",
# typ: "FFCCSX",
# listing: "2018-12-17T04:00:00.000Z",
# front: "2019-02-22T12:00:00.000Z",
# expiry: "2019-03-29T12:00:00.000Z",
# settle: "2019-03-29T12:00:00.000Z",
# relistInterval: null,
# inverseLeg: "",
# sellLeg: "",
# buyLeg: "",
# optionStrikePcnt: null,
# optionStrikeRound: null,
# optionStrikePrice: null,
# optionMultiplier: null,
# positionCurrency: "ETH",
# underlying: "ETH",
# quoteCurrency: "XBT",
# underlyingSymbol: "ETHXBT=",
# reference: "BMEX",
# referenceSymbol: ".BETHXBT30M",
# calcInterval: null,
# publishInterval: null,
# publishTime: null,
# maxOrderQty: 100000000,
# maxPrice: 10,
# lotSize: 1,
# tickSize: 0.00001,
# multiplier: 100000000,
# settlCurrency: "XBt",
# underlyingToPositionMultiplier: 1,
# underlyingToSettleMultiplier: null,
# quoteToSettleMultiplier: 100000000,
# isQuanto: False,
# isInverse: False,
# initMargin: 0.02,
# maintMargin: 0.01,
# riskLimit: 5000000000,
# riskStep: 5000000000,
# limit: null,
# capped: False,
# taxed: True,
# deleverage: True,
# makerFee: -0.0005,
# takerFee: 0.0025,
# settlementFee: 0,
# insuranceFee: 0,
# fundingBaseSymbol: "",
# fundingQuoteSymbol: "",
# fundingPremiumSymbol: "",
# fundingTimestamp: null,
# fundingInterval: null,
# fundingRate: null,
# indicativeFundingRate: null,
# rebalanceTimestamp: null,
# rebalanceInterval: null,
# openingTimestamp: "2019-02-13T08:00:00.000Z",
# closingTimestamp: "2019-02-13T09:00:00.000Z",
# sessionInterval: "2000-01-01T01:00:00.000Z",
# prevClosePrice: 0.03347,
# limitDownPrice: null,
# limitUpPrice: null,
# bankruptLimitDownPrice: null,
# bankruptLimitUpPrice: null,
# prevTotalVolume: 1386531,
# totalVolume: 1387062,
# volume: 531,
# volume24h: 17118,
# prevTotalTurnover: 4741294246000,
# totalTurnover: 4743103466000,
# turnover: 1809220000,
# turnover24h: 57919845000,
# homeNotional24h: 17118,
# foreignNotional24h: 579.19845,
# prevPrice24h: 0.03349,
# vwap: 0.03383564,
# highPrice: 0.03458,
# lowPrice: 0.03329,
# lastPrice: 0.03406,
# lastPriceProtected: 0.03406,
# lastTickDirection: "ZeroMinusTick",
# lastChangePcnt: 0.017,
# bidPrice: 0.03406,
# midPrice: 0.034065,
# askPrice: 0.03407,
# impactBidPrice: 0.03406,
# impactMidPrice: 0.034065,
# impactAskPrice: 0.03407,
# hasLiquidity: True,
# openInterest: 83679,
# openValue: 285010674000,
# fairMethod: "ImpactMidPrice",
# fairBasisRate: 0,
# fairBasis: 0,
# fairPrice: 0.03406,
# markMethod: "FairPrice",
# markPrice: 0.03406,
# indicativeTaxRate: 0,
# indicativeSettlePrice: 0.03406,
# optionUnderlyingPrice: null,
# settledPrice: null,
# timestamp: "2019-02-13T08:40:30.000Z",
# }
#
symbol = None
marketId = self.safe_string(ticker, 'symbol')
market = self.safe_value(self.markets_by_id, marketId, market)
if market is not None:
symbol = market['symbol']
timestamp = self.parse8601(self.safe_string(ticker, 'timestamp'))
open = self.safe_float(ticker, 'prevPrice24h')
last = self.safe_float(ticker, 'lastPrice')
change = None
percentage = None
if last is not None and open is not None:
change = last - open
if open > 0:
percentage = change / open * 100
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'highPrice'),
'low': self.safe_float(ticker, 'lowPrice'),
'bid': self.safe_float(ticker, 'bidPrice'),
'bidVolume': None,
'ask': self.safe_float(ticker, 'askPrice'),
'askVolume': None,
'vwap': self.safe_float(ticker, 'vwap'),
'open': open,
'close': last,
'last': last,
'previousClose': None,
'change': change,
'percentage': percentage,
'average': self.sum(open, last) / 2,
'baseVolume': self.safe_float(ticker, 'homeNotional24h'),
'quoteVolume': self.safe_float(ticker, 'foreignNotional24h'),
'info': ticker,
}
def parse_ohlcv(self, ohlcv, market=None, timeframe='1m', since=None, limit=None):
timestamp = self.parse8601(ohlcv['timestamp'])
return [
timestamp,
self.safe_float(ohlcv, 'open'),
self.safe_float(ohlcv, 'high'),
self.safe_float(ohlcv, 'low'),
self.safe_float(ohlcv, 'close'),
self.safe_float(ohlcv, 'volume'),
]
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
self.load_markets()
# send JSON key/value pairs, such as {"key": "value"}
# filter by individual fields and do advanced queries on timestamps
# filter = {'key': 'value'}
# send a bare series(e.g. XBU) to nearest expiring contract in that series
# you can also send a timeframe, e.g. XBU:monthly
# timeframes: daily, weekly, monthly, quarterly, and biquarterly
market = self.market(symbol)
request = {
'symbol': market['id'],
'binSize': self.timeframes[timeframe],
'partial': True, # True == include yet-incomplete current bins
# 'filter': filter, # filter by individual fields and do advanced queries
# 'columns': [], # will return all columns if omitted
# 'start': 0, # starting point for results(wtf?)
# 'reverse': False, # True == newest first
# 'endTime': '', # ending date filter for results
}
if limit is not None:
request['count'] = limit # default 100, max 500
# if since is not set, they will return candles starting from 2017-01-01
if since is not None:
ymdhms = self.ymdhms(since)
request['startTime'] = ymdhms # starting date filter for results
response = self.publicGetTradeBucketed(self.extend(request, params))
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# timestamp: '2018-08-28T00:00:02.735Z',
# symbol: 'XBTUSD',
# side: 'Buy',
# size: 2000,
# price: 6906.5,
# tickDirection: 'PlusTick',
# trdMatchID: 'b9a42432-0a46-6a2f-5ecc-c32e9ca4baf8',
# grossValue: 28958000,
# homeNotional: 0.28958,
# foreignNotional: 2000
# }
#
# fetchMyTrades(private)
#
# {
# "execID": "string",
# "orderID": "string",
# "clOrdID": "string",
# "clOrdLinkID": "string",
# "account": 0,
# "symbol": "string",
# "side": "string",
# "lastQty": 0,
# "lastPx": 0,
# "underlyingLastPx": 0,
# "lastMkt": "string",
# "lastLiquidityInd": "string",
# "simpleOrderQty": 0,
# "orderQty": 0,
# "price": 0,
# "displayQty": 0,
# "stopPx": 0,
# "pegOffsetValue": 0,
# "pegPriceType": "string",
# "currency": "string",
# "settlCurrency": "string",
# "execType": "string",
# "ordType": "string",
# "timeInForce": "string",
# "execInst": "string",
# "contingencyType": "string",
# "exDestination": "string",
# "ordStatus": "string",
# "triggered": "string",
# "workingIndicator": True,
# "ordRejReason": "string",
# "simpleLeavesQty": 0,
# "leavesQty": 0,
# "simpleCumQty": 0,
# "cumQty": 0,
# "avgPx": 0,
# "commission": 0,
# "tradePublishIndicator": "string",
# "multiLegReportingType": "string",
# "text": "string",
# "trdMatchID": "string",
# "execCost": 0,
# "execComm": 0,
# "homeNotional": 0,
# "foreignNotional": 0,
# "transactTime": "2019-03-05T12:47:02.762Z",
# "timestamp": "2019-03-05T12:47:02.762Z"
# }
#
timestamp = self.parse8601(self.safe_string(trade, 'timestamp'))
price = self.safe_float(trade, 'price')
amount = self.safe_float_2(trade, 'size', 'lastQty')
id = self.safe_string(trade, 'trdMatchID')
order = self.safe_string(trade, 'orderID')
side = self.safe_string(trade, 'side').lower()
# price * amount doesn't work for all symbols(e.g. XBT, ETH)
cost = self.safe_float(trade, 'execCost')
if cost is not None:
cost = abs(cost) / 100000000
fee = None
if 'execComm' in trade:
feeCost = self.safe_float(trade, 'execComm')
feeCost = feeCost / 100000000
currencyId = self.safe_string(trade, 'currency')
currencyId = currencyId.upper()
feeCurrency = self.common_currency_code(currencyId)
feeRate = self.safe_float(trade, 'commission')
fee = {
'cost': feeCost,
'currency': feeCurrency,
'rate': feeRate,
}
takerOrMaker = None
if fee is not None:
takerOrMaker = fee['cost'] < 'maker' if 0 else 'taker'
symbol = None
marketId = self.safe_string(trade, 'symbol')
if marketId is not None:
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = market['symbol']
else:
symbol = marketId
return {
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'id': id,
'order': order,
'type': None,
'takerOrMaker': takerOrMaker,
'side': side,
'price': price,
'cost': cost,
'amount': amount,
'fee': fee,
}
def parse_order_status(self, status):
statuses = {
'New': 'open',
'PartiallyFilled': 'open',
'Filled': 'closed',
'DoneForDay': 'open',
'Canceled': 'canceled',
'PendingCancel': 'open',
'PendingNew': 'open',
'Rejected': 'rejected',
'Expired': 'expired',
'Stopped': 'open',
'Untriggered': 'open',
'Triggered': 'open',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
status = self.parse_order_status(self.safe_string(order, 'ordStatus'))
symbol = None
if market is not None:
symbol = market['symbol']
else:
id = order['symbol']
if id in self.markets_by_id:
market = self.markets_by_id[id]
symbol = market['symbol']
timestamp = self.parse8601(self.safe_string(order, 'timestamp'))
lastTradeTimestamp = self.parse8601(self.safe_string(order, 'transactTime'))
price = self.safe_float(order, 'price')
amount = self.safe_float(order, 'orderQty')
filled = self.safe_float(order, 'cumQty', 0.0)
remaining = None
if amount is not None:
if filled is not None:
remaining = max(amount - filled, 0.0)
average = self.safe_float(order, 'avgPx')
cost = None
if filled is not None:
if average is not None:
cost = average * filled
elif price is not None:
cost = price * filled
result = {
'info': order,
'id': str(order['orderID']),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': lastTradeTimestamp,
'symbol': symbol,
'type': order['ordType'].lower(),
'side': order['side'].lower(),
'price': price,
'amount': amount,
'cost': cost,
'average': average,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': None,
}
return result
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if since is not None:
request['startTime'] = self.iso8601(since)
if limit is not None:
request['count'] = limit
response = self.publicGetTrade(self.extend(request, params))
#
# [
# {
# timestamp: '2018-08-28T00:00:02.735Z',
# symbol: 'XBTUSD',
# side: 'Buy',
# size: 2000,
# price: 6906.5,
# tickDirection: 'PlusTick',
# trdMatchID: 'b9a42432-0a46-6a2f-5ecc-c32e9ca4baf8',
# grossValue: 28958000,
# homeNotional: 0.28958,
# foreignNotional: 2000
# },
# {
# timestamp: '2018-08-28T00:00:03.778Z',
# symbol: 'XBTUSD',
# side: 'Sell',
# size: 1000,
# price: 6906,
# tickDirection: 'MinusTick',
# trdMatchID: '0d4f1682-5270-a800-569b-4a0eb92db97c',
# grossValue: 14480000,
# homeNotional: 0.1448,
# foreignNotional: 1000
# },
# ]
#
return self.parse_trades(response, market, since, limit)
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
request = {
'symbol': self.market_id(symbol),
'side': self.capitalize(side),
'orderQty': amount,
'ordType': self.capitalize(type),
}
if price is not None:
request['price'] = price
response = self.privatePostOrder(self.extend(request, params))
order = self.parse_order(response)
id = order['id']
self.orders[id] = order
return self.extend({'info': response}, order)
def edit_order(self, id, symbol, type, side, amount=None, price=None, params={}):
self.load_markets()
request = {
'orderID': id,
}
if amount is not None:
request['orderQty'] = amount
if price is not None:
request['price'] = price
response = self.privatePutOrder(self.extend(request, params))
order = self.parse_order(response)
self.orders[order['id']] = order
return self.extend({'info': response}, order)
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
response = self.privateDeleteOrder(self.extend({'orderID': id}, params))
order = response[0]
error = self.safe_string(order, 'error')
if error is not None:
if error.find('Unable to cancel order due to existing state') >= 0:
raise OrderNotFound(self.id + ' cancelOrder() failed: ' + error)
order = self.parse_order(order)
self.orders[order['id']] = order
return self.extend({'info': response}, order)
def is_fiat(self, currency):
if currency == 'EUR':
return True
if currency == 'PLN':
return True
return False
def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
self.load_markets()
# currency = self.currency(code)
if code != 'BTC':
raise ExchangeError(self.id + ' supoprts BTC withdrawals only, other currencies coming soon...')
request = {
'currency': 'XBt', # temporarily
'amount': amount,
'address': address,
# 'otpToken': '123456', # requires if two-factor auth(OTP) is enabled
# 'fee': 0.001, # bitcoin network fee
}
response = self.privatePostUserRequestWithdrawal(self.extend(request, params))
return {
'info': response,
'id': response['transactID'],
}
def handle_errors(self, code, reason, url, method, headers, body, response):
if code == 429:
raise DDoSProtection(self.id + ' ' + body)
if code >= 400:
if body:
if body[0] == '{':
error = self.safe_value(response, 'error', {})
message = self.safe_string(error, 'message')
feedback = self.id + ' ' + body
exact = self.exceptions['exact']
if message in exact:
raise exact[message](feedback)
broad = self.exceptions['broad']
broadKey = self.findBroadlyMatchedKey(broad, message)
if broadKey is not None:
raise broad[broadKey](feedback)
if code == 400:
raise BadRequest(feedback)
raise ExchangeError(feedback) # unknown message
def nonce(self):
return self.milliseconds()
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
query = '/api/' + self.version + '/' + path
if method != 'PUT':
if params:
query += '?' + self.urlencode(params)
url = self.urls['api'] + query
if api == 'private':
self.check_required_credentials()
auth = method + query
expires = self.safe_integer(self.options, 'api-expires')
headers = {
'Content-Type': 'application/json',
'api-key': self.apiKey,
}
expires = self.sum(self.seconds(), expires)
expires = str(expires)
auth += expires
headers['api-expires'] = expires
if method == 'POST' or method == 'PUT':
if params:
body = self.json(params)
auth += body
headers['api-signature'] = self.hmac(self.encode(auth), self.encode(self.secret))
return {'url': url, 'method': method, 'body': body, 'headers': headers}
|
py | b4027bd972524db718c7f2bfafa6963466549ad0 | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Static()
result.template = "object/static/structure/general/shared_transport_debris_01.iff"
result.attribute_template_id = -1
result.stfName("obj_n","unknown_object")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result |
py | b4027d329ad4d2019c51f1d754d9538286e6c2ea | # -*- coding: utf-8 -*-
# nghttp2 - HTTP/2 C Library
# Copyright (c) 2012 Tatsuhiro Tsujikawa
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# nghttp2 documentation build configuration file, created by
# sphinx-quickstart on Sun Mar 11 22:57:49 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.append(os.path.abspath('../..//doc/_exts'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinxcontrib.rubydomain']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['../..//_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'nghttp2'
copyright = u'2012, 2015, 2016, Tatsuhiro Tsujikawa'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.39.1'
# The full version, including alpha/beta/rc tags.
release = '1.39.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['manual', 'README.rst', '*-header.rst', 'sources']
# The reST default role (used for this markup: `text`) to use for all documents.
default_role = 'c:func'
primary_domain = 'c'
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The default language to highlight source code in. The default is 'python'.
highlight_language = 'c'
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['../..//doc/_themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = False
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': ['menu.html', 'localtoc.html', 'relations.html', 'sourcelink.html',
'searchbox.html']
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
html_copy_source = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'nghttp2doc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'nghttp2.tex', u'nghttp2 Documentation',
u'Tatsuhiro Tsujikawa', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('nghttp.1', 'nghttp', u'HTTP/2 client', [u'Tatsuhiro Tsujikawa'], 1),
('nghttpd.1', 'nghttpd', u'HTTP/2 server', [u'Tatsuhiro Tsujikawa'], 1),
('nghttpx.1', 'nghttpx', u'HTTP/2 proxy', [u'Tatsuhiro Tsujikawa'], 1),
('h2load.1', 'h2load', u'HTTP/2 benchmarking tool',
[u'Tatsuhiro Tsujikawa'], 1)
]
|
py | b4027d5bbd10b52d465eba549b314b9364b493a6 | from typing import Dict, List
import demistomock as demisto
from CommonServerPython import *
special = ['n', 't', '\\', '"', '\'', '7', 'r']
def check_if_found_incident(res: List):
if res and isinstance(res, list) and isinstance(res[0].get('Contents'), dict):
if 'data' not in res[0]['Contents']:
raise DemistoException(res[0].get('Contents'))
elif res[0]['Contents']['data'] is None:
return False
return True
else:
raise DemistoException(f'failed to get incidents from demisto.\nGot: {res}')
def is_valid_args(args: Dict):
array_args: List[str] = ['id', 'name', 'status', 'notstatus', 'reason', 'level', 'owner', 'type', 'query']
error_msg: List[str] = []
for _key, value in args.items():
if _key in array_args:
try:
if _key == 'id':
if not isinstance(value, (int, str, list)):
error_msg.append(
f'Error while parsing the incident id with the value: {value}. The given type: '
f'{type(value)} is not a valid type for an ID. The supported id types are: int, list and str')
elif isinstance(value, str):
_ = bytes(value, "utf-8").decode("unicode_escape")
else:
_ = bytes(value, "utf-8").decode("unicode_escape")
except UnicodeDecodeError as ex:
error_msg.append(f'Error while parsing the argument: "{_key}" '
f'\nError:\n- "{str(ex)}"')
if len(error_msg) != 0:
raise DemistoException('\n'.join(error_msg))
return True
def apply_filters(incidents: List, args: Dict):
names_to_filter = set(argToList(args.get('name')))
types_to_filter = set(argToList(args.get('type')))
filtered_incidents = []
for incident in incidents:
if names_to_filter and incident['name'] not in names_to_filter:
continue
if types_to_filter and incident['type'] not in types_to_filter:
continue
filtered_incidents.append(incident)
return filtered_incidents
def add_incidents_link(data: List):
server_url = demisto.demistoUrls().get('server')
for incident in data:
incident_link = urljoin(server_url, f'#/Details/{incident.get("id")}')
incident['incidentLink'] = incident_link
return data
def search_incidents(args: Dict): # pragma: no cover
if not is_valid_args(args):
return
if fromdate := arg_to_datetime(args.get('fromdate')):
from_date = fromdate.isoformat()
args['fromdate'] = from_date
if todate := arg_to_datetime(args.get('todate')):
to_date = todate.isoformat()
args['todate'] = to_date
if args.get('trimevents') == '0':
args.pop('trimevents')
# handle list of ids
if args.get('id'):
args['id'] = ','.join(argToList(args.get('id')))
res: List = execute_command('getIncidents', args, extract_contents=False)
incident_found: bool = check_if_found_incident(res)
if incident_found is False:
return 'Incidents not found.', {}, {}
data = apply_filters(res[0]['Contents']['data'], args)
data = add_incidents_link(data)
headers: List[str] = ['id', 'name', 'severity', 'status', 'owner', 'created', 'closed', 'incidentLink']
md: str = tableToMarkdown(name="Incidents found", t=data, headers=headers)
return md, data, res
def main(): # pragma: no cover
args: Dict = demisto.args()
try:
readable_output, outputs, raw_response = search_incidents(args)
if search_results_label := args.get('searchresultslabel'):
for output in outputs:
output['searchResultsLabel'] = search_results_label
results = CommandResults(
outputs_prefix='foundIncidents',
outputs_key_field='id',
readable_output=readable_output,
outputs=outputs,
raw_response=raw_response
)
return_results(results)
except DemistoException as error:
return_error(str(error), error)
if __name__ in ('__main__', '__builtin__', 'builtins'): # pragma: no cover
main()
|
py | b40280b4eee78b706701206a8ddd12c2f0690232 | import pytest
def test_pass_to_show_in_report(rp_logger):
rp_logger.info("Just a passed test")
assert True is True
@pytest.mark.skip(reason='no way of currently testing this')
def test_the_unknown():
assert True is False
@pytest.mark.command_skip
def test_custom_mark_skip_command_line():
assert True is False
@pytest.mark.fixture_skip
def test_custom_mark_skip_fixture():
assert True is False
def test_inner_skip_test():
pytest.skip("Skip from test insides")
|
py | b40280fab535e60850aee0dd57edfd778466d311 | """Certbot client."""
# version number like 1.2.3a0, must have at least 2 parts, like 1.2
import sys
import warnings
__version__ = '1.24.0.dev0'
if sys.version_info[:2] == (3, 6):
warnings.warn(
"Python 3.6 support will be dropped in the next release of "
"certbot. Please upgrade your Python version.",
PendingDeprecationWarning,
) # pragma: no cover
|
py | b4028111d1c5aa18b9e75266591e4aaa783049cd | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-10 11:13
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('home', '0008_work_link'),
]
operations = [
migrations.AddField(
model_name='homepage',
name='linked_data',
field=django.contrib.postgres.fields.jsonb.JSONField(blank=True, help_text='Linked Data in JSON', null=True),
),
]
|
py | b40281447097ff08c7ab984cbc09234e8ed27ee6 | import typing
import databases
import sqlalchemy
import typesystem
from sqlalchemy.ext.asyncio import create_async_engine
from orm.exceptions import MultipleMatches, NoMatch
from orm.fields import Date, DateTime, String, Text
FILTER_OPERATORS = {
"exact": "__eq__",
"iexact": "ilike",
"contains": "like",
"icontains": "ilike",
"in": "in_",
"gt": "__gt__",
"gte": "__ge__",
"lt": "__lt__",
"lte": "__le__",
}
def _update_auto_now_fields(values, fields):
for key, value in fields.items():
if isinstance(value, (DateTime, Date)) and value.auto_now:
values[key] = value.validator.get_default_value()
return values
class ModelRegistry:
def __init__(self, database: databases.Database) -> None:
self.database = database
self.models = {}
self._metadata = sqlalchemy.MetaData()
@property
def metadata(self):
for model_cls in self.models.values():
model_cls.build_table()
return self._metadata
async def create_all(self):
url = self._get_database_url()
engine = create_async_engine(url)
async with self.database:
async with engine.begin() as conn:
await conn.run_sync(self.metadata.create_all)
await engine.dispose()
async def drop_all(self):
url = self._get_database_url()
engine = create_async_engine(url)
async with self.database:
async with engine.begin() as conn:
await conn.run_sync(self.metadata.drop_all)
await engine.dispose()
def _get_database_url(self) -> str:
url = self.database.url
if not url.driver:
if url.dialect == "postgresql":
url = url.replace(driver="asyncpg")
elif url.dialect == "mysql":
url = url.replace(driver="aiomysql")
elif url.dialect == "sqlite":
url = url.replace(driver="aiosqlite")
return str(url)
class ModelMeta(type):
def __new__(cls, name, bases, attrs):
model_class = super().__new__(cls, name, bases, attrs)
if "registry" in attrs:
model_class.database = attrs["registry"].database
attrs["registry"].models[name] = model_class
if "tablename" not in attrs:
setattr(model_class, "tablename", name.lower())
for name, field in attrs.get("fields", {}).items():
setattr(field, "registry", attrs.get("registry"))
if field.primary_key:
model_class.pkname = name
return model_class
@property
def table(cls):
if not hasattr(cls, "_table"):
cls._table = cls.build_table()
return cls._table
@property
def columns(cls) -> sqlalchemy.sql.ColumnCollection:
return cls._table.columns
class QuerySet:
ESCAPE_CHARACTERS = ["%", "_"]
def __init__(
self,
model_cls=None,
filter_clauses=None,
select_related=None,
limit_count=None,
offset=None,
order_by=None,
):
self.model_cls = model_cls
self.filter_clauses = [] if filter_clauses is None else filter_clauses
self._select_related = [] if select_related is None else select_related
self.limit_count = limit_count
self.query_offset = offset
self._order_by = [] if order_by is None else order_by
def __get__(self, instance, owner):
return self.__class__(model_cls=owner)
@property
def database(self):
return self.model_cls.registry.database
@property
def table(self) -> sqlalchemy.Table:
return self.model_cls.table
@property
def schema(self):
fields = {key: field.validator for key, field in self.model_cls.fields.items()}
return typesystem.Schema(fields=fields)
@property
def pkname(self):
return self.model_cls.pkname
def _build_select_expression(self):
tables = [self.table]
select_from = self.table
for item in self._select_related:
model_cls = self.model_cls
select_from = self.table
for part in item.split("__"):
model_cls = model_cls.fields[part].target
table = model_cls.table
select_from = sqlalchemy.sql.join(select_from, table)
tables.append(table)
expr = sqlalchemy.sql.select(tables)
expr = expr.select_from(select_from)
if self.filter_clauses:
if len(self.filter_clauses) == 1:
clause = self.filter_clauses[0]
else:
clause = sqlalchemy.sql.and_(*self.filter_clauses)
expr = expr.where(clause)
if self._order_by:
order_by = list(map(self._prepare_order_by, self._order_by))
expr = expr.order_by(*order_by)
if self.limit_count:
expr = expr.limit(self.limit_count)
if self.query_offset:
expr = expr.offset(self.query_offset)
return expr
def filter(
self,
clause: typing.Optional[sqlalchemy.sql.expression.BinaryExpression] = None,
**kwargs: typing.Any,
):
if clause is not None:
self.filter_clauses.append(clause)
return self
else:
return self._filter_query(**kwargs)
def exclude(
self,
clause: typing.Optional[sqlalchemy.sql.expression.BinaryExpression] = None,
**kwargs: typing.Any,
):
if clause is not None:
self.filter_clauses.append(clause)
return self
else:
return self._filter_query(_exclude=True, **kwargs)
def _filter_query(self, _exclude: bool = False, **kwargs):
clauses = []
filter_clauses = self.filter_clauses
select_related = list(self._select_related)
if kwargs.get("pk"):
pk_name = self.model_cls.pkname
kwargs[pk_name] = kwargs.pop("pk")
for key, value in kwargs.items():
if "__" in key:
parts = key.split("__")
# Determine if we should treat the final part as a
# filter operator or as a related field.
if parts[-1] in FILTER_OPERATORS:
op = parts[-1]
field_name = parts[-2]
related_parts = parts[:-2]
else:
op = "exact"
field_name = parts[-1]
related_parts = parts[:-1]
model_cls = self.model_cls
if related_parts:
# Add any implied select_related
related_str = "__".join(related_parts)
if related_str not in select_related:
select_related.append(related_str)
# Walk the relationships to the actual model class
# against which the comparison is being made.
for part in related_parts:
model_cls = model_cls.fields[part].target
column = model_cls.table.columns[field_name]
else:
op = "exact"
column = self.table.columns[key]
# Map the operation code onto SQLAlchemy's ColumnElement
# https://docs.sqlalchemy.org/en/latest/core/sqlelement.html#sqlalchemy.sql.expression.ColumnElement
op_attr = FILTER_OPERATORS[op]
has_escaped_character = False
if op in ["contains", "icontains"]:
has_escaped_character = any(
c for c in self.ESCAPE_CHARACTERS if c in value
)
if has_escaped_character:
# enable escape modifier
for char in self.ESCAPE_CHARACTERS:
value = value.replace(char, f"\\{char}")
value = f"%{value}%"
if isinstance(value, Model):
value = value.pk
clause = getattr(column, op_attr)(value)
clause.modifiers["escape"] = "\\" if has_escaped_character else None
clauses.append(clause)
if _exclude:
filter_clauses.append(sqlalchemy.not_(sqlalchemy.sql.and_(*clauses)))
else:
filter_clauses += clauses
return self.__class__(
model_cls=self.model_cls,
filter_clauses=filter_clauses,
select_related=select_related,
limit_count=self.limit_count,
offset=self.query_offset,
order_by=self._order_by,
)
def search(self, term: typing.Any):
if not term:
return self
filter_clauses = list(self.filter_clauses)
value = f"%{term}%"
# has_escaped_character = any(c for c in self.ESCAPE_CHARACTERS if c in term)
# if has_escaped_character:
# # enable escape modifier
# for char in self.ESCAPE_CHARACTERS:
# term = term.replace(char, f'\\{char}')
# term = f"%{value}%"
#
# clause.modifiers['escape'] = '\\' if has_escaped_character else None
search_fields = [
name
for name, field in self.model_cls.fields.items()
if isinstance(field, (String, Text))
]
search_clauses = [
self.table.columns[name].ilike(value) for name in search_fields
]
if len(search_clauses) > 1:
filter_clauses.append(sqlalchemy.sql.or_(*search_clauses))
else:
filter_clauses.extend(search_clauses)
return self.__class__(
model_cls=self.model_cls,
filter_clauses=filter_clauses,
select_related=self._select_related,
limit_count=self.limit_count,
offset=self.query_offset,
order_by=self._order_by,
)
def order_by(self, *order_by):
return self.__class__(
model_cls=self.model_cls,
filter_clauses=self.filter_clauses,
select_related=self._select_related,
limit_count=self.limit_count,
offset=self.query_offset,
order_by=order_by,
)
def select_related(self, related):
if not isinstance(related, (list, tuple)):
related = [related]
related = list(self._select_related) + related
return self.__class__(
model_cls=self.model_cls,
filter_clauses=self.filter_clauses,
select_related=related,
limit_count=self.limit_count,
offset=self.query_offset,
order_by=self._order_by,
)
async def exists(self) -> bool:
expr = self._build_select_expression()
expr = sqlalchemy.exists(expr).select()
return await self.database.fetch_val(expr)
def limit(self, limit_count: int):
return self.__class__(
model_cls=self.model_cls,
filter_clauses=self.filter_clauses,
select_related=self._select_related,
limit_count=limit_count,
offset=self.query_offset,
order_by=self._order_by,
)
def offset(self, offset: int):
return self.__class__(
model_cls=self.model_cls,
filter_clauses=self.filter_clauses,
select_related=self._select_related,
limit_count=self.limit_count,
offset=offset,
order_by=self._order_by,
)
async def count(self) -> int:
expr = self._build_select_expression().alias("subquery_for_count")
expr = sqlalchemy.func.count().select().select_from(expr)
return await self.database.fetch_val(expr)
async def all(self, **kwargs):
if kwargs:
return await self.filter(**kwargs).all()
expr = self._build_select_expression()
rows = await self.database.fetch_all(expr)
return [
self.model_cls._from_row(row, select_related=self._select_related)
for row in rows
]
async def get(self, **kwargs):
if kwargs:
return await self.filter(**kwargs).get()
expr = self._build_select_expression().limit(2)
rows = await self.database.fetch_all(expr)
if not rows:
raise NoMatch()
if len(rows) > 1:
raise MultipleMatches()
return self.model_cls._from_row(rows[0], select_related=self._select_related)
async def first(self, **kwargs):
if kwargs:
return await self.filter(**kwargs).first()
rows = await self.limit(1).all()
if rows:
return rows[0]
def _validate_kwargs(self, **kwargs):
fields = self.model_cls.fields
validator = typesystem.Schema(
fields={key: value.validator for key, value in fields.items()}
)
kwargs = validator.validate(kwargs)
for key, value in fields.items():
if value.validator.read_only and value.validator.has_default():
kwargs[key] = value.validator.get_default_value()
return kwargs
async def create(self, **kwargs):
kwargs = self._validate_kwargs(**kwargs)
instance = self.model_cls(**kwargs)
expr = self.table.insert().values(**kwargs)
if self.pkname not in kwargs:
instance.pk = await self.database.execute(expr)
else:
await self.database.execute(expr)
return instance
async def bulk_create(self, objs: typing.List[typing.Dict]) -> None:
new_objs = [self._validate_kwargs(**obj) for obj in objs]
expr = self.table.insert().values(new_objs)
await self.database.execute(expr)
async def delete(self) -> None:
expr = self.table.delete()
for filter_clause in self.filter_clauses:
expr = expr.where(filter_clause)
await self.database.execute(expr)
async def update(self, **kwargs) -> None:
fields = {
key: field.validator
for key, field in self.model_cls.fields.items()
if key in kwargs
}
validator = typesystem.Schema(fields=fields)
kwargs = _update_auto_now_fields(
validator.validate(kwargs), self.model_cls.fields
)
expr = self.table.update().values(**kwargs)
for filter_clause in self.filter_clauses:
expr = expr.where(filter_clause)
await self.database.execute(expr)
async def get_or_create(
self, defaults: typing.Dict[str, typing.Any], **kwargs
) -> typing.Tuple[typing.Any, bool]:
try:
instance = await self.get(**kwargs)
return instance, False
except NoMatch:
kwargs.update(defaults)
instance = await self.create(**kwargs)
return instance, True
async def update_or_create(
self, defaults: typing.Dict[str, typing.Any], **kwargs
) -> typing.Tuple[typing.Any, bool]:
try:
instance = await self.get(**kwargs)
await instance.update(**defaults)
return instance, False
except NoMatch:
kwargs.update(defaults)
instance = await self.create(**kwargs)
return instance, True
def _prepare_order_by(self, order_by: str):
reverse = order_by.startswith("-")
order_by = order_by.lstrip("-")
order_col = self.table.columns[order_by]
return order_col.desc() if reverse else order_col
class Model(metaclass=ModelMeta):
objects = QuerySet()
def __init__(self, **kwargs):
if "pk" in kwargs:
kwargs[self.pkname] = kwargs.pop("pk")
for key, value in kwargs.items():
if key not in self.fields:
raise ValueError(
f"Invalid keyword {key} for class {self.__class__.__name__}"
)
setattr(self, key, value)
@property
def pk(self):
return getattr(self, self.pkname)
@pk.setter
def pk(self, value):
setattr(self, self.pkname, value)
def __repr__(self):
return f"<{self.__class__.__name__}: {self}>"
def __str__(self):
return f"{self.__class__.__name__}({self.pkname}={self.pk})"
@classmethod
def build_table(cls):
tablename = cls.tablename
metadata = cls.registry._metadata
columns = []
for name, field in cls.fields.items():
columns.append(field.get_column(name))
return sqlalchemy.Table(tablename, metadata, *columns, extend_existing=True)
@property
def table(self) -> sqlalchemy.Table:
return self.__class__.table
async def update(self, **kwargs):
fields = {
key: field.validator for key, field in self.fields.items() if key in kwargs
}
validator = typesystem.Schema(fields=fields)
kwargs = _update_auto_now_fields(validator.validate(kwargs), self.fields)
pk_column = getattr(self.table.c, self.pkname)
expr = self.table.update().values(**kwargs).where(pk_column == self.pk)
await self.database.execute(expr)
# Update the model instance.
for key, value in kwargs.items():
setattr(self, key, value)
async def delete(self) -> None:
pk_column = getattr(self.table.c, self.pkname)
expr = self.table.delete().where(pk_column == self.pk)
await self.database.execute(expr)
async def load(self):
# Build the select expression.
pk_column = getattr(self.table.c, self.pkname)
expr = self.table.select().where(pk_column == self.pk)
# Perform the fetch.
row = await self.database.fetch_one(expr)
# Update the instance.
for key, value in dict(row._mapping).items():
setattr(self, key, value)
@classmethod
def _from_row(cls, row, select_related=[]):
"""
Instantiate a model instance, given a database row.
"""
item = {}
# Instantiate any child instances first.
for related in select_related:
if "__" in related:
first_part, remainder = related.split("__", 1)
model_cls = cls.fields[first_part].target
item[first_part] = model_cls._from_row(row, select_related=[remainder])
else:
model_cls = cls.fields[related].target
item[related] = model_cls._from_row(row)
# Pull out the regular column values.
for column in cls.table.columns:
if column.name not in item:
item[column.name] = row[column]
return cls(**item)
def __setattr__(self, key, value):
if key in self.fields:
# Setting a relationship to a raw pk value should set a
# fully-fledged relationship instance, with just the pk loaded.
value = self.fields[key].expand_relationship(value)
super().__setattr__(key, value)
def __eq__(self, other):
if self.__class__ != other.__class__:
return False
for key in self.fields.keys():
if getattr(self, key, None) != getattr(other, key, None):
return False
return True
|
py | b4028181274f654a1138f68066d482cbfaf4141e | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
# sys.path.insert(0, os.path.abspath(".."))
# sys.path.insert(0, os.path.abspath("../.."))
# sys.path.insert(0, os.path.abspath("../../.."))
sys.path.insert(0, os.path.abspath("../../OpenSMOG"))
autodoc_mock_imports = ["simtk","numpy","lxml","OpenSMOG_Reporter"]
# -- Project information -----------------------------------------------------
project = 'OpenSMOG'
copyright = '2020-2021 The Center for Theoretical Biological Physics (CTBP) - Rice University & Northeastern University'
author = 'Antonio B. Oliveira Jr., Vinícius G. Contessoto & Paul Whitford'
# The full version, including alpha/beta/rc tags
version = '1.0.4'
release = '1.0.4'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.napoleon",
"sphinx.ext.autodoc",
"sphinx.ext.mathjax",
"sphinx.ext.viewcode",
"sphinx.ext.todo",
"sphinx.ext.intersphinx",
"sphinx.ext.autosummary",
"sphinx.ext.autosectionlabel",
"nbsphinx",
"jupyter_sphinx",
"sphinxcontrib.bibtex",
]
bibtex_bibfiles = ["Reference/OpenSMOG.bib"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
source_suffix = ".rst"
master_doc = "index"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = [
"_build",
"_templates",
]
show_authors = True
pygments_style = "sphinx"
todo_include_todos = False
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
#html_logo = "images/SBM_logo.png"
#html_favicon = "images/SBM_icon.png"
html_static_path = []
html_show_sourcelink = True
nbsphinx_execute = 'never' |
py | b402827c4307ce6ad09ed63cfe942ae08d797b40 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from common import git_repository
from common import http_client_appengine
from common import dependency
from common import deps_parser
_CHROMIUM_ROOT_DIR = 'src/'
_CHROMIUM_REPO_MASTER = 'https://chromium.googlesource.com/chromium/src.git'
class DEPSDownloader(deps_parser.DEPSLoader):
"""Downloads DEPS from remote Git repo."""
def __init__(self, check_deps_git_first=False):
"""
Args:
check_deps_git_first (bool): If True, use .DEPS.git instead of DEPS.
"""
self.check_deps_git_first = check_deps_git_first
def Load(self, repo_url, revision, deps_file):
http_client = http_client_appengine.HttpClientAppengine()
repo = git_repository.GitRepository(repo_url, http_client)
content = None
if self.check_deps_git_first and deps_file == 'DEPS':
# When the given deps_file is "DEPS" and .DEPS.git should be checked
# first, it's because before migration from SVN to Git, .DEPS.git contains
# dependencies hosted in Git while DEPS contains those in SVN.
# If .DEPS.git is not found, fallback to the given deps_file. Assume it is
# a commit after migration from SVN to Git.
content = repo.GetSource('.DEPS.git', revision)
if content is None:
content = repo.GetSource(deps_file, revision)
if content is None:
raise Exception(
'Failed to pull %s file from %s, at revision %s.' % (
deps_file, repo_url, revision))
return content
def GetChromeDependency(revision, os_platform, check_deps_git_first=False):
"""Returns all dependencies of Chrome as a dict for the given revision and OS.
Args:
revision (str): The revision of a Chrome build.
os_platform (str): The target platform of the Chrome build, should be one of
'win', 'ios', 'mac', 'unix', 'android', or 'all'.
check_deps_git_first (bool): If True, use .DEPS.git instead of DEPS.
Returns:
A map from dependency path to the dependency info.
"""
root_dep = dependency.Dependency(
_CHROMIUM_ROOT_DIR, _CHROMIUM_REPO_MASTER, revision, 'DEPS')
deps_parser.UpdateDependencyTree(
root_dep, [os_platform], DEPSDownloader(check_deps_git_first))
dependencies = {}
# Flatten the dependency tree into a one-level dict.
def FlattenDepTree(dep):
dependencies[dep.path] = dep
for child in dep.children.values():
FlattenDepTree(child)
FlattenDepTree(root_dep)
return dependencies
def GetChromiumDEPSRolls(old_cr_revision, new_cr_revision, os_platform,
check_deps_git_first=False):
"""Returns a list of dependency rolls between the given Chromium revisions.
Args:
old_cr_revision (str): The Git commit hash for the old Chromium revision.
new_cr_revision (str): The Git commit hash for the new Chromium revision.
os_platform (str): The target OS platform of the Chrome or test binary.
check_deps_git_first (bool): If True, use .DEPS.git instead of DEPS.
"""
old_deps = GetChromeDependency(
old_cr_revision, os_platform, check_deps_git_first)
new_deps = GetChromeDependency(
new_cr_revision, os_platform, check_deps_git_first)
rolls = []
for path, new_dep in new_deps.iteritems():
if path == _CHROMIUM_ROOT_DIR: # Skip the root dependency -- chromium.
continue
old_revision = None
if path in old_deps:
old_revision = old_deps[path].revision
if old_revision != new_dep.revision:
rolls.append(
dependency.DependencyRoll(
path, new_dep.repo_url, old_revision, new_dep.revision))
for path, old_dep in old_deps.iteritems():
if path not in new_deps:
rolls.append(
dependency.DependencyRoll(
path, old_dep.repo_url, old_dep.revision, None))
return rolls
|
py | b40282853f4426a69554c074742a5cdaf440856d | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 4.0.2
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info < (2, 7, 0):
raise RuntimeError("Python 2.7 or later required")
# Import the low-level C/C++ module
if __package__ or "." in __name__:
from . import _socketstream
else:
import _socketstream
try:
import builtins as __builtin__
except ImportError:
import __builtin__
_swig_new_instance_method = _socketstream.SWIG_PyInstanceMethod_New
_swig_new_static_method = _socketstream.SWIG_PyStaticMethod_New
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
def _swig_setattr_nondynamic_instance_variable(set):
def set_instance_attr(self, name, value):
if name == "thisown":
self.this.own(value)
elif name == "this":
set(self, name, value)
elif hasattr(self, name) and isinstance(getattr(type(self), name), property):
set(self, name, value)
else:
raise AttributeError("You cannot add instance attributes to %s" % self)
return set_instance_attr
def _swig_setattr_nondynamic_class_variable(set):
def set_class_attr(cls, name, value):
if hasattr(cls, name) and not isinstance(getattr(cls, name), property):
set(cls, name, value)
else:
raise AttributeError("You cannot add class attributes to %s" % cls)
return set_class_attr
def _swig_add_metaclass(metaclass):
"""Class decorator for adding a metaclass to a SWIG wrapped class - a slimmed down version of six.add_metaclass"""
def wrapper(cls):
return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy())
return wrapper
class _SwigNonDynamicMeta(type):
"""Meta class to enforce nondynamic attributes (no new attributes) for a class"""
__setattr__ = _swig_setattr_nondynamic_class_variable(type.__setattr__)
import weakref
import mfem._par.mesh
import mfem._par.matrix
import mfem._par.vector
import mfem._par.array
import mfem._par.mem_manager
import mfem._par.operators
import mfem._par.sort_pairs
import mfem._par.ncmesh
import mfem._par.vtk
import mfem._par.element
import mfem._par.globals
import mfem._par.densemat
import mfem._par.geom
import mfem._par.intrules
import mfem._par.table
import mfem._par.hash
import mfem._par.vertex
import mfem._par.gridfunc
import mfem._par.coefficient
import mfem._par.sparsemat
import mfem._par.eltrans
import mfem._par.fe
import mfem._par.fespace
import mfem._par.fe_coll
import mfem._par.lininteg
import mfem._par.handle
import mfem._par.hypre
import mfem._par.restriction
import mfem._par.bilininteg
import mfem._par.linearform
import mfem._par.nonlininteg
class socketbuf(object):
r"""Proxy of C++ mfem::socketbuf class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
r"""
__init__(socketbuf self) -> socketbuf
__init__(socketbuf self, int sd) -> socketbuf
__init__(socketbuf self, char const [] hostname, int port) -> socketbuf
"""
_socketstream.socketbuf_swiginit(self, _socketstream.new_socketbuf(*args))
def attach(self, sd):
r"""attach(socketbuf self, int sd) -> int"""
return _socketstream.socketbuf_attach(self, sd)
attach = _swig_new_instance_method(_socketstream.socketbuf_attach)
def detach(self):
r"""detach(socketbuf self) -> int"""
return _socketstream.socketbuf_detach(self)
detach = _swig_new_instance_method(_socketstream.socketbuf_detach)
def open(self, hostname, port):
r"""open(socketbuf self, char const [] hostname, int port) -> int"""
return _socketstream.socketbuf_open(self, hostname, port)
open = _swig_new_instance_method(_socketstream.socketbuf_open)
def close(self):
r"""close(socketbuf self) -> int"""
return _socketstream.socketbuf_close(self)
close = _swig_new_instance_method(_socketstream.socketbuf_close)
def getsocketdescriptor(self):
r"""getsocketdescriptor(socketbuf self) -> int"""
return _socketstream.socketbuf_getsocketdescriptor(self)
getsocketdescriptor = _swig_new_instance_method(_socketstream.socketbuf_getsocketdescriptor)
def is_open(self):
r"""is_open(socketbuf self) -> bool"""
return _socketstream.socketbuf_is_open(self)
is_open = _swig_new_instance_method(_socketstream.socketbuf_is_open)
__swig_destroy__ = _socketstream.delete_socketbuf
# Register socketbuf in _socketstream:
_socketstream.socketbuf_swigregister(socketbuf)
class socketstream(object):
r"""Proxy of C++ mfem::socketstream class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
secure_default = _socketstream.socketstream_secure_default
def __init__(self, *args):
r"""
__init__(socketstream self, bool secure=secure_default) -> socketstream
__init__(socketstream self, socketbuf buf) -> socketstream
__init__(socketstream self, int s, bool secure=secure_default) -> socketstream
__init__(socketstream self, char const [] hostname, int port, bool secure=secure_default) -> socketstream
"""
_socketstream.socketstream_swiginit(self, _socketstream.new_socketstream(*args))
def rdbuf(self):
r"""rdbuf(socketstream self) -> socketbuf"""
return _socketstream.socketstream_rdbuf(self)
rdbuf = _swig_new_instance_method(_socketstream.socketstream_rdbuf)
def open(self, hostname, port):
r"""open(socketstream self, char const [] hostname, int port) -> int"""
return _socketstream.socketstream_open(self, hostname, port)
open = _swig_new_instance_method(_socketstream.socketstream_open)
def close(self):
r"""close(socketstream self) -> int"""
return _socketstream.socketstream_close(self)
close = _swig_new_instance_method(_socketstream.socketstream_close)
def is_open(self):
r"""is_open(socketstream self) -> bool"""
return _socketstream.socketstream_is_open(self)
is_open = _swig_new_instance_method(_socketstream.socketstream_is_open)
__swig_destroy__ = _socketstream.delete_socketstream
def precision(self, *args):
r"""
precision(socketstream self, int const p) -> int
precision(socketstream self) -> int
"""
return _socketstream.socketstream_precision(self, *args)
precision = _swig_new_instance_method(_socketstream.socketstream_precision)
def send_solution(self, mesh, gf):
r"""send_solution(socketstream self, Mesh mesh, GridFunction gf)"""
return _socketstream.socketstream_send_solution(self, mesh, gf)
send_solution = _swig_new_instance_method(_socketstream.socketstream_send_solution)
def send_text(self, ostr):
r"""send_text(socketstream self, char const [] ostr)"""
return _socketstream.socketstream_send_text(self, ostr)
send_text = _swig_new_instance_method(_socketstream.socketstream_send_text)
def flush(self):
r"""flush(socketstream self)"""
return _socketstream.socketstream_flush(self)
flush = _swig_new_instance_method(_socketstream.socketstream_flush)
def good(self):
r"""good(socketstream self) -> bool"""
return _socketstream.socketstream_good(self)
good = _swig_new_instance_method(_socketstream.socketstream_good)
def __lshift__(self, *args):
r"""
__lshift__(socketstream self, char const [] ostr) -> socketstream
__lshift__(socketstream self, int const x) -> socketstream
__lshift__(socketstream self, Mesh mesh) -> socketstream
__lshift__(socketstream self, GridFunction gf) -> socketstream
"""
return _socketstream.socketstream___lshift__(self, *args)
__lshift__ = _swig_new_instance_method(_socketstream.socketstream___lshift__)
def endline(self):
r"""endline(socketstream self) -> socketstream"""
return _socketstream.socketstream_endline(self)
endline = _swig_new_instance_method(_socketstream.socketstream_endline)
# Register socketstream in _socketstream:
_socketstream.socketstream_swigregister(socketstream)
class socketserver(object):
r"""Proxy of C++ mfem::socketserver class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, port, backlog=4):
r"""__init__(socketserver self, int port, int backlog=4) -> socketserver"""
_socketstream.socketserver_swiginit(self, _socketstream.new_socketserver(port, backlog))
def good(self):
r"""good(socketserver self) -> bool"""
return _socketstream.socketserver_good(self)
good = _swig_new_instance_method(_socketstream.socketserver_good)
def close(self):
r"""close(socketserver self) -> int"""
return _socketstream.socketserver_close(self)
close = _swig_new_instance_method(_socketstream.socketserver_close)
def accept(self, *args):
r"""
accept(socketserver self) -> int
accept(socketserver self, socketstream sockstr) -> int
"""
return _socketstream.socketserver_accept(self, *args)
accept = _swig_new_instance_method(_socketstream.socketserver_accept)
__swig_destroy__ = _socketstream.delete_socketserver
# Register socketserver in _socketstream:
_socketstream.socketserver_swigregister(socketserver)
|
py | b40283388a9e37d3c44cf1e5b0dfd9e7b3d46356 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import decimal
import random
import re
import time
from oslo.config import cfg
from nova.compute import power_state
from nova import exception as nova_exception
from nova.openstack.common import excutils
from nova.openstack.common import log as logging
from nova import utils
from nova.virt.powervm import blockdev
from nova.virt.powervm import command
from nova.virt.powervm import common
from nova.virt.powervm import constants
from nova.virt.powervm import exception
from nova.virt.powervm import lpar as LPAR
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
def get_powervm_operator():
if CONF.powervm_mgr_type == 'ivm':
return IVMOperator(common.Connection(CONF.powervm_mgr,
CONF.powervm_mgr_user,
CONF.powervm_mgr_passwd))
def get_powervm_disk_adapter():
return blockdev.PowerVMLocalVolumeAdapter(
common.Connection(CONF.powervm_mgr,
CONF.powervm_mgr_user,
CONF.powervm_mgr_passwd))
class PowerVMOperator(object):
"""PowerVM main operator.
The PowerVMOperator is intended to wrap all operations
from the driver and handle either IVM or HMC managed systems.
"""
def __init__(self):
self._operator = get_powervm_operator()
self._disk_adapter = get_powervm_disk_adapter()
self._host_stats = {}
self._update_host_stats()
def get_info(self, instance_name):
"""Get the current status of an LPAR instance.
Returns a dict containing:
:state: the running state, one of the power_state codes
:max_mem: (int) the maximum memory in KBytes allowed
:mem: (int) the memory in KBytes used by the domain
:num_cpu: (int) the number of virtual CPUs for the domain
:cpu_time: (int) the CPU time used in nanoseconds
:raises: PowerVMLPARInstanceNotFound
"""
lpar_instance = self._get_instance(instance_name)
state = constants.POWERVM_POWER_STATE.get(
lpar_instance['state'], power_state.NOSTATE)
return {'state': state,
'max_mem': lpar_instance['max_mem'],
'mem': lpar_instance['desired_mem'],
'num_cpu': lpar_instance['max_procs'],
'cpu_time': lpar_instance['uptime']}
def instance_exists(self, instance_name):
lpar_instance = self._operator.get_lpar(instance_name)
return True if lpar_instance else False
def _get_instance(self, instance_name):
"""Check whether or not the LPAR instance exists and return it."""
lpar_instance = self._operator.get_lpar(instance_name)
if lpar_instance is None:
LOG.error(_("LPAR instance '%s' not found") % instance_name)
raise exception.PowerVMLPARInstanceNotFound(
instance_name=instance_name)
return lpar_instance
def list_instances(self):
"""
Return the names of all the instances known to the virtualization
layer, as a list.
"""
lpar_instances = self._operator.list_lpar_instances()
return lpar_instances
def get_available_resource(self):
"""Retrieve resource info.
:returns: dictionary containing resource info
"""
data = self.get_host_stats()
# Memory data is in MB already.
memory_mb_used = data['host_memory_total'] - data['host_memory_free']
# Convert to GB
local_gb = data['disk_total'] / 1024
local_gb_used = data['disk_used'] / 1024
dic = {'vcpus': data['vcpus'],
'memory_mb': data['host_memory_total'],
'local_gb': local_gb,
'vcpus_used': data['vcpus_used'],
'memory_mb_used': memory_mb_used,
'local_gb_used': local_gb_used,
'hypervisor_type': data['hypervisor_type'],
'hypervisor_version': data['hypervisor_version'],
'hypervisor_hostname': self._operator.get_hostname(),
'cpu_info': ','.join(data['cpu_info']),
'disk_available_least': data['disk_total']}
return dic
def get_host_stats(self, refresh=False):
"""Return currently known host stats."""
if refresh:
self._update_host_stats()
return self._host_stats
def _update_host_stats(self):
memory_info = self._operator.get_memory_info()
cpu_info = self._operator.get_cpu_info()
# Note: disk avail information is not accurate. The value
# is a sum of all Volume Groups and the result cannot
# represent the real possibility. Example: consider two
# VGs both 10G, the avail disk will be 20G however,
# a 15G image does not fit in any VG. This can be improved
# later on.
disk_info = self._operator.get_disk_info()
data = {}
data['vcpus'] = cpu_info['total_procs']
data['vcpus_used'] = cpu_info['total_procs'] - cpu_info['avail_procs']
data['cpu_info'] = constants.POWERVM_CPU_INFO
data['disk_total'] = disk_info['disk_total']
data['disk_used'] = disk_info['disk_used']
data['disk_available'] = disk_info['disk_avail']
data['host_memory_total'] = memory_info['total_mem']
data['host_memory_free'] = memory_info['avail_mem']
data['hypervisor_type'] = constants.POWERVM_HYPERVISOR_TYPE
data['hypervisor_version'] = constants.POWERVM_HYPERVISOR_VERSION
data['hypervisor_hostname'] = self._operator.get_hostname()
data['extres'] = ''
self._host_stats = data
def spawn(self, context, instance, image_id, network_info):
def _create_image(context, instance, image_id):
"""Fetch image from glance and copy it to the remote system."""
try:
root_volume = self._disk_adapter.create_volume_from_image(
context, instance, image_id)
self._disk_adapter.attach_volume_to_host(root_volume)
lpar_id = self._operator.get_lpar(instance['name'])['lpar_id']
vhost = self._operator.get_vhost_by_instance_id(lpar_id)
self._operator.attach_disk_to_vhost(
root_volume['device_name'], vhost)
except Exception, e:
LOG.exception(_("PowerVM image creation failed: %s") % str(e))
raise exception.PowerVMImageCreationFailed()
spawn_start = time.time()
try:
try:
host_stats = self.get_host_stats(refresh=True)
lpar_inst = self._create_lpar_instance(instance,
network_info, host_stats)
#TODO(mjfork) capture the error and handle the error when the
# MAC prefix already exists on the
# system (1 in 2^28)
self._operator.create_lpar(lpar_inst)
LOG.debug(_("Creating LPAR instance '%s'") % instance['name'])
except nova_exception.ProcessExecutionError:
LOG.exception(_("LPAR instance '%s' creation failed") %
instance['name'])
raise exception.PowerVMLPARCreationFailed()
_create_image(context, instance, image_id)
LOG.debug(_("Activating the LPAR instance '%s'")
% instance['name'])
self._operator.start_lpar(instance['name'])
# TODO(mrodden): probably do this a better way
# that actually relies on the time module
# and nonblocking threading
# Wait for boot
timeout_count = range(10)
while timeout_count:
state = self.get_info(instance['name'])['state']
if state == power_state.RUNNING:
LOG.info(_("Instance spawned successfully."),
instance=instance)
break
timeout_count.pop()
if len(timeout_count) == 0:
LOG.error(_("Instance '%s' failed to boot") %
instance['name'])
self._cleanup(instance['name'])
break
time.sleep(1)
except exception.PowerVMImageCreationFailed:
with excutils.save_and_reraise_exception():
# log errors in cleanup
try:
self._cleanup(instance['name'])
except Exception:
LOG.exception(_('Error while attempting to '
'clean up failed instance launch.'))
spawn_time = time.time() - spawn_start
LOG.info(_("Instance spawned in %s seconds") % spawn_time,
instance=instance)
def destroy(self, instance_name, destroy_disks=True):
"""Destroy (shutdown and delete) the specified instance.
:param instance_name: Instance name.
"""
try:
self._cleanup(instance_name, destroy_disks)
except exception.PowerVMLPARInstanceNotFound:
LOG.warn(_("During destroy, LPAR instance '%s' was not found on "
"PowerVM system.") % instance_name)
def capture_image(self, context, instance, image_id, image_meta):
"""Capture the root disk for a snapshot
:param context: nova context for this operation
:param instance: instance information to capture the image from
:param image_id: uuid of pre-created snapshot image
:param image_meta: metadata to upload with captured image
"""
lpar = self._operator.get_lpar(instance['name'])
previous_state = lpar['state']
# stop the instance if it is running
if previous_state == 'Running':
LOG.debug(_("Stopping instance %s for snapshot.") %
instance['name'])
# wait up to 2 minutes for shutdown
self.power_off(instance['name'], timeout=120)
# get disk_name
vhost = self._operator.get_vhost_by_instance_id(lpar['lpar_id'])
disk_name = self._operator.get_disk_name_by_vhost(vhost)
# do capture and upload
self._disk_adapter.create_image_from_volume(
disk_name, context, image_id, image_meta)
# restart instance if it was running before
if previous_state == 'Running':
self.power_on(instance['name'])
def _cleanup(self, instance_name, destroy_disks=True):
lpar_id = self._get_instance(instance_name)['lpar_id']
try:
vhost = self._operator.get_vhost_by_instance_id(lpar_id)
disk_name = self._operator.get_disk_name_by_vhost(vhost)
LOG.debug(_("Shutting down the instance '%s'") % instance_name)
self._operator.stop_lpar(instance_name)
#dperaza: LPAR should be deleted first so that vhost is
#cleanly removed and detached from disk device.
LOG.debug(_("Deleting the LPAR instance '%s'") % instance_name)
self._operator.remove_lpar(instance_name)
if disk_name and destroy_disks:
# TODO(mrodden): we should also detach from the instance
# before we start deleting things...
volume_info = {'device_name': disk_name}
#Volume info dictionary might need more info that is lost when
#volume is detached from host so that it can be deleted
self._disk_adapter.detach_volume_from_host(volume_info)
self._disk_adapter.delete_volume(volume_info)
except Exception:
LOG.exception(_("PowerVM instance cleanup failed"))
raise exception.PowerVMLPARInstanceCleanupFailed(
instance_name=instance_name)
def power_off(self, instance_name, timeout=30):
self._operator.stop_lpar(instance_name, timeout)
def power_on(self, instance_name):
self._operator.start_lpar(instance_name)
def macs_for_instance(self, instance):
return self._operator.macs_for_instance(instance)
def _create_lpar_instance(self, instance, network_info, host_stats=None):
inst_name = instance['name']
# CPU/Memory min and max can be configurable. Lets assume
# some default values for now.
# Memory
mem = instance['memory_mb']
if host_stats and mem > host_stats['host_memory_free']:
LOG.error(_('Not enough free memory in the host'))
raise exception.PowerVMInsufficientFreeMemory(
instance_name=instance['name'])
mem_min = min(mem, constants.POWERVM_MIN_MEM)
mem_max = mem + constants.POWERVM_MAX_MEM
# CPU
cpus = instance['vcpus']
if host_stats:
avail_cpus = host_stats['vcpus'] - host_stats['vcpus_used']
if cpus > avail_cpus:
LOG.error(_('Insufficient available CPU on PowerVM'))
raise exception.PowerVMInsufficientCPU(
instance_name=instance['name'])
cpus_min = min(cpus, constants.POWERVM_MIN_CPUS)
cpus_max = cpus + constants.POWERVM_MAX_CPUS
cpus_units_min = decimal.Decimal(cpus_min) / decimal.Decimal(10)
cpus_units = decimal.Decimal(cpus) / decimal.Decimal(10)
# Network
# To ensure the MAC address on the guest matches the
# generated value, pull the first 10 characters off the
# MAC address for the mac_base_value parameter and then
# get the integer value of the final 2 characters as the
# slot_id parameter
mac = network_info[0]['address']
mac_base_value = (mac[:-2]).replace(':', '')
eth_id = self._operator.get_virtual_eth_adapter_id()
slot_id = int(mac[-2:], 16)
virtual_eth_adapters = ('%(slot_id)s/0/%(eth_id)s//0/0' %
locals())
# LPAR configuration data
# max_virtual_slots is hardcoded to 64 since we generate a MAC
# address that must be placed in slots 32 - 64
lpar_inst = LPAR.LPAR(
name=inst_name, lpar_env='aixlinux',
min_mem=mem_min, desired_mem=mem,
max_mem=mem_max, proc_mode='shared',
sharing_mode='uncap', min_procs=cpus_min,
desired_procs=cpus, max_procs=cpus_max,
min_proc_units=cpus_units_min,
desired_proc_units=cpus_units,
max_proc_units=cpus_max,
virtual_eth_mac_base_value=mac_base_value,
max_virtual_slots=64,
virtual_eth_adapters=virtual_eth_adapters)
return lpar_inst
def _check_host_resources(self, instance, vcpus, mem, host_stats):
"""Checks resources on host for resize, migrate, and spawn
:param vcpus: CPUs to be used
:param mem: memory requested by instance
:param disk: size of disk to be expanded or created
"""
if mem > host_stats['host_memory_free']:
LOG.exception(_('Not enough free memory in the host'))
raise exception.PowerVMInsufficientFreeMemory(
instance_name=instance['name'])
avail_cpus = host_stats['vcpus'] - host_stats['vcpus_used']
if vcpus > avail_cpus:
LOG.exception(_('Insufficient available CPU on PowerVM'))
raise exception.PowerVMInsufficientCPU(
instance_name=instance['name'])
def migrate_disk(self, device_name, src_host, dest, image_path,
instance_name=None):
"""Migrates SVC or Logical Volume based disks
:param device_name: disk device name in /dev/
:param dest: IP or DNS name of destination host/VIOS
:param image_path: path on source and destination to directory
for storing image files
:param instance_name: name of instance being migrated
:returns: disk_info dictionary object describing root volume
information used for locating/mounting the volume
"""
dest_file_path = self._disk_adapter.migrate_volume(
device_name, src_host, dest, image_path, instance_name)
disk_info = {}
disk_info['root_disk_file'] = dest_file_path
return disk_info
def deploy_from_migrated_file(self, lpar, file_path, size):
# decompress file
gzip_ending = '.gz'
if file_path.endswith(gzip_ending):
raw_file_path = file_path[:-len(gzip_ending)]
else:
raw_file_path = file_path
self._operator._decompress_image_file(file_path, raw_file_path)
try:
# deploy lpar from file
self._deploy_from_vios_file(lpar, raw_file_path, size)
finally:
# cleanup migrated file
self._operator._remove_file(raw_file_path)
def _deploy_from_vios_file(self, lpar, file_path, size):
self._operator.create_lpar(lpar)
lpar = self._operator.get_lpar(lpar['name'])
instance_id = lpar['lpar_id']
vhost = self._operator.get_vhost_by_instance_id(instance_id)
# Create logical volume on IVM
diskName = self._disk_adapter._create_logical_volume(size)
# Attach the disk to LPAR
self._operator.attach_disk_to_vhost(diskName, vhost)
# Copy file to device
self._disk_adapter._copy_file_to_device(file_path, diskName)
self._operator.start_lpar(lpar['name'])
class BaseOperator(object):
"""Base operator for IVM and HMC managed systems."""
def __init__(self, connection):
"""Constructor.
:param connection: common.Connection object with the
information to connect to the remote
ssh.
"""
self._connection = None
self.connection_data = connection
def _set_connection(self):
if self._connection is None:
self._connection = common.ssh_connect(self.connection_data)
def get_lpar(self, instance_name, resource_type='lpar'):
"""Return a LPAR object by its instance name.
:param instance_name: LPAR instance name
:param resource_type: the type of resources to list
:returns: LPAR object
"""
cmd = self.command.lssyscfg('-r %s --filter "lpar_names=%s"'
% (resource_type, instance_name))
output = self.run_vios_command(cmd)
if not output:
return None
lpar = LPAR.load_from_conf_data(output[0])
return lpar
def list_lpar_instances(self):
"""List all existent LPAR instances names.
:returns: list -- list with instances names.
"""
lpar_names = self.run_vios_command(self.command.lssyscfg(
'-r lpar -F name'))
if not lpar_names:
return []
return lpar_names
def create_lpar(self, lpar):
"""Receives a LPAR data object and creates a LPAR instance.
:param lpar: LPAR object
"""
conf_data = lpar.to_string()
self.run_vios_command(self.command.mksyscfg('-r lpar -i "%s"' %
conf_data))
def start_lpar(self, instance_name):
"""Start a LPAR instance.
:param instance_name: LPAR instance name
"""
self.run_vios_command(self.command.chsysstate('-r lpar -o on -n %s'
% instance_name))
def stop_lpar(self, instance_name, timeout=30):
"""Stop a running LPAR.
:param instance_name: LPAR instance name
:param timeout: value in seconds for specifying
how long to wait for the LPAR to stop
"""
cmd = self.command.chsysstate('-r lpar -o shutdown --immed -n %s' %
instance_name)
self.run_vios_command(cmd)
# poll instance until stopped or raise exception
lpar_obj = self.get_lpar(instance_name)
wait_inc = 1 # seconds to wait between status polling
start_time = time.time()
while lpar_obj['state'] != 'Not Activated':
curr_time = time.time()
# wait up to (timeout) seconds for shutdown
if (curr_time - start_time) > timeout:
raise exception.PowerVMLPAROperationTimeout(
operation='stop_lpar',
instance_name=instance_name)
time.sleep(wait_inc)
lpar_obj = self.get_lpar(instance_name)
def remove_lpar(self, instance_name):
"""Removes a LPAR.
:param instance_name: LPAR instance name
"""
self.run_vios_command(self.command.rmsyscfg('-r lpar -n %s'
% instance_name))
def get_vhost_by_instance_id(self, instance_id):
"""Return the vhost name by the instance id.
:param instance_id: LPAR instance id
:returns: string -- vhost name or None in case none is found
"""
instance_hex_id = '%#010x' % int(instance_id)
cmd = self.command.lsmap('-all -field clientid svsa -fmt :')
output = self.run_vios_command(cmd)
vhosts = dict(item.split(':') for item in list(output))
if instance_hex_id in vhosts:
return vhosts[instance_hex_id]
return None
def get_virtual_eth_adapter_id(self):
"""Virtual ethernet adapter id.
Searches for the shared ethernet adapter and returns
its id.
:returns: id of the virtual ethernet adapter.
"""
cmd = self.command.lsmap('-all -net -field sea -fmt :')
output = self.run_vios_command(cmd)
sea = output[0]
cmd = self.command.lsdev('-dev %s -attr pvid' % sea)
output = self.run_vios_command(cmd)
# Returned output looks like this: ['value', '', '1']
if output:
return output[2]
return None
def get_hostname(self):
"""Returns the managed system hostname.
:returns: string -- hostname
"""
output = self.run_vios_command(self.command.hostname())
return output[0]
def get_disk_name_by_vhost(self, vhost):
"""Returns the disk name attached to a vhost.
:param vhost: a vhost name
:returns: string -- disk name
"""
cmd = self.command.lsmap('-vadapter %s -field backing -fmt :' % vhost)
output = self.run_vios_command(cmd)
if output:
return output[0]
return None
def attach_disk_to_vhost(self, disk, vhost):
"""Attach disk name to a specific vhost.
:param disk: the disk name
:param vhost: the vhost name
"""
cmd = self.command.mkvdev('-vdev %s -vadapter %s') % (disk, vhost)
self.run_vios_command(cmd)
def get_memory_info(self):
"""Get memory info.
:returns: tuple - memory info (total_mem, avail_mem)
"""
cmd = self.command.lshwres(
'-r mem --level sys -F configurable_sys_mem,curr_avail_sys_mem')
output = self.run_vios_command(cmd)
total_mem, avail_mem = output[0].split(',')
return {'total_mem': int(total_mem),
'avail_mem': int(avail_mem)}
def get_cpu_info(self):
"""Get CPU info.
:returns: tuple - cpu info (total_procs, avail_procs)
"""
cmd = self.command.lshwres(
'-r proc --level sys -F '
'configurable_sys_proc_units,curr_avail_sys_proc_units')
output = self.run_vios_command(cmd)
total_procs, avail_procs = output[0].split(',')
return {'total_procs': float(total_procs),
'avail_procs': float(avail_procs)}
def get_disk_info(self):
"""Get the disk usage information.
:returns: tuple - disk info (disk_total, disk_used, disk_avail)
"""
vgs = self.run_vios_command(self.command.lsvg())
(disk_total, disk_used, disk_avail) = [0, 0, 0]
for vg in vgs:
cmd = self.command.lsvg('%s -field totalpps usedpps freepps -fmt :'
% vg)
output = self.run_vios_command(cmd)
# Output example:
# 1271 (10168 megabytes):0 (0 megabytes):1271 (10168 megabytes)
(d_total, d_used, d_avail) = re.findall(r'(\d+) megabytes',
output[0])
disk_total += int(d_total)
disk_used += int(d_used)
disk_avail += int(d_avail)
return {'disk_total': disk_total,
'disk_used': disk_used,
'disk_avail': disk_avail}
def run_vios_command(self, cmd, check_exit_code=True):
"""Run a remote command using an active ssh connection.
:param command: String with the command to run.
"""
self._set_connection()
stdout, stderr = utils.ssh_execute(self._connection, cmd,
check_exit_code=check_exit_code)
return stdout.strip().splitlines()
def run_vios_command_as_root(self, command, check_exit_code=True):
"""Run a remote command as root using an active ssh connection.
:param command: List of commands.
"""
self._set_connection()
stdout, stderr = common.ssh_command_as_root(
self._connection, command, check_exit_code=check_exit_code)
return stdout.read().splitlines()
def macs_for_instance(self, instance):
pass
def update_lpar(self, lpar_info):
"""Resizing an LPAR
:param lpar_info: dictionary of LPAR information
"""
configuration_data = ('name=%s,min_mem=%s,desired_mem=%s,'
'max_mem=%s,min_procs=%s,desired_procs=%s,'
'max_procs=%s,min_proc_units=%s,'
'desired_proc_units=%s,max_proc_units=%s' %
(lpar_info['name'], lpar_info['min_mem'],
lpar_info['desired_mem'],
lpar_info['max_mem'],
lpar_info['min_procs'],
lpar_info['desired_procs'],
lpar_info['max_procs'],
lpar_info['min_proc_units'],
lpar_info['desired_proc_units'],
lpar_info['max_proc_units']))
self.run_vios_command(self.command.chsyscfg('-r prof -i "%s"' %
configuration_data))
def get_logical_vol_size(self, diskname):
"""Finds and calculates the logical volume size in GB
:param diskname: name of the logical volume
:returns: size of logical volume in GB
"""
configuration_data = ("ioscli lslv %s -fmt : -field pps ppsize" %
diskname)
output = self.run_vios_command(configuration_data)
pps, ppsize = output[0].split(':')
ppsize = re.findall(r'\d+', ppsize)
ppsize = int(ppsize[0])
pps = int(pps)
lv_size = ((pps * ppsize) / 1024)
return lv_size
def rename_lpar(self, instance_name, new_name):
"""Rename LPAR given by instance_name to new_name
Note: For IVM based deployments, the name is
limited to 31 characters and will be trimmed
to meet this requirement
:param instance_name: name of LPAR to be renamed
:param new_name: desired new name of LPAR
:returns: new name of renamed LPAR trimmed to 31 characters
if necessary
"""
# grab first 31 characters of new name
new_name_trimmed = new_name[:31]
cmd = ''.join(['chsyscfg -r lpar -i ',
'"',
'name=%s,' % instance_name,
'new_name=%s' % new_name_trimmed,
'"'])
self.run_vios_command(cmd)
return new_name_trimmed
def _decompress_image_file(self, file_path, outfile_path):
command = "/usr/bin/gunzip -c %s > %s" % (file_path, outfile_path)
output = self.run_vios_command_as_root(command)
# Remove compressed image file
command = "/usr/bin/rm %s" % file_path
output = self.run_vios_command_as_root(command)
return outfile_path
def _remove_file(self, file_path):
"""Removes a file on the VIOS partition
:param file_path: absolute path to file to be removed
"""
command = 'rm %s' % file_path
self.run_vios_command_as_root(command)
class IVMOperator(BaseOperator):
"""Integrated Virtualization Manager (IVM) Operator.
Runs specific commands on an IVM managed system.
"""
def __init__(self, ivm_connection):
self.command = command.IVMCommand()
BaseOperator.__init__(self, ivm_connection)
def macs_for_instance(self, instance):
"""Generates set of valid MAC addresses for an IVM instance."""
# NOTE(vish): We would prefer to use 0xfe here to ensure that linux
# bridge mac addresses don't change, but it appears to
# conflict with libvirt, so we use the next highest octet
# that has the unicast and locally administered bits set
# properly: 0xfa.
# Discussion: https://bugs.launchpad.net/nova/+bug/921838
# NOTE(mjfork): For IVM-based PowerVM, we cannot directly set a MAC
# address on an LPAR, but rather need to construct one
# that can be used. Retain the 0xfa as noted above,
# but ensure the final 2 hex values represent a value
# between 32 and 64 so we can assign as the slot id on
# the system. For future reference, the last octect
# should not exceed FF (255) since it would spill over
# into the higher-order octect.
#
# FA:xx:xx:xx:xx:[32-64]
macs = set()
mac_base = [0xfa,
random.randint(0x00, 0xff),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff),
random.randint(0x00, 0x00)]
for n in range(32, 64):
mac_base[5] = n
macs.add(':'.join(map(lambda x: "%02x" % x, mac_base)))
return macs
|
py | b4028340a29d7e00298387b030b349417082b685 | # -*- coding: utf-8 -*-
r"""
Word paths
This module implements word paths, which is an application of Combinatorics
on Words to Discrete Geometry. A word path is the representation of a word
as a discrete path in a vector space using a one-to-one correspondence
between the alphabet and a set of vectors called steps. Many problems
surrounding 2d lattice polygons (such as questions of self-intersection,
area, inertia moment, etc.) can be solved in linear time (linear in the
length of the perimeter) using theory from Combinatorics on Words.
On the square grid, the encoding of a path using a four-letter alphabet
(for East, North, West and South directions) is also known as the Freeman
chain code [1,2] (see [3] for further reading).
AUTHORS:
- Arnaud Bergeron (2008) : Initial version, path on the square grid
- Sebastien Labbe (2009-01-14) : New classes and hierarchy, doc and functions.
EXAMPLES:
The combinatorial class of all paths defined over three given steps::
sage: P = WordPaths('abc', steps=[(1,2), (-3,4), (0,-3)]); P
Word Paths over 3 steps
This defines a one-to-one correspondence between alphabet and steps::
sage: d = P.letters_to_steps()
sage: sorted(d.items())
[('a', (1, 2)), ('b', (-3, 4)), ('c', (0, -3))]
Creation of a path from the combinatorial class P defined above::
sage: p = P('abaccba'); p
Path: abaccba
Many functions can be used on p: the coordinates of its trajectory,
ask whether p is a closed path, plot it and many other::
sage: list(p.points())
[(0, 0), (1, 2), (-2, 6), (-1, 8), (-1, 5), (-1, 2), (-4, 6), (-3, 8)]
sage: p.is_closed()
False
sage: p.plot()
Graphics object consisting of 3 graphics primitives
To obtain a list of all the available word path specific functions,
use ``help(p)``::
sage: help(p)
Help on FiniteWordPath_2d_str in module sage.combinat.words.paths object:
...
Methods inherited from FiniteWordPath_2d:
...
Methods inherited from FiniteWordPath_all:
...
Since p is a finite word, many functions from the word library are available::
sage: p.crochemore_factorization()
(a, b, a, c, c, ba)
sage: p.is_palindrome()
False
sage: p[:3]
Path: aba
sage: len(p)
7
P also herits many functions from Words::
sage: P = WordPaths('rs', steps=[(1,2), (-1,4)]); P
Word Paths over 2 steps
sage: P.alphabet()
{'r', 's'}
sage: list(P.iterate_by_length(3))
[Path: rrr,
Path: rrs,
Path: rsr,
Path: rss,
Path: srr,
Path: srs,
Path: ssr,
Path: sss]
When the number of given steps is half the size of alphabet, the
opposite of vectors are used::
sage: P = WordPaths('abcd', [(1,0), (0,1)])
sage: sorted(P.letters_to_steps().items())
[('a', (1, 0)), ('b', (0, 1)), ('c', (-1, 0)), ('d', (0, -1))]
Some built-in combinatorial classes of paths::
sage: P = WordPaths('abAB', steps='square_grid'); P
Word Paths on the square grid
::
sage: D = WordPaths('()', steps='dyck'); D
Finite Dyck paths
sage: d = D('()()()(())'); d
Path: ()()()(())
sage: d.plot()
Graphics object consisting of 3 graphics primitives
::
sage: P = WordPaths('abcdef', steps='triangle_grid')
sage: p = P('babaddefadabcadefaadfafabacdefa')
sage: p.plot()
Graphics object consisting of 3 graphics primitives
Vector steps may be in more than 2 dimensions::
sage: d = [(1,0,0), (0,1,0), (0,0,1)]
sage: P = WordPaths(alphabet='abc', steps=d); P
Word Paths over 3 steps
sage: p = P('abcabcabcabcaabacabcababcacbabacacabcaccbcac')
sage: p.plot()
Graphics3d Object
::
sage: d = [(1,3,5,1), (-5,1,-6,0), (0,0,1,9), (4,2,-1,0)]
sage: P = WordPaths(alphabet='rstu', steps=d); P
Word Paths over 4 steps
sage: p = P('rtusuusususuturrsust'); p
Path: rtusuusususuturrsust
sage: p.end_point()
(5, 31, -26, 30)
::
sage: CubePaths = WordPaths('abcABC', steps='cube_grid'); CubePaths
Word Paths on the cube grid
sage: CubePaths('abcabaabcabAAAAA').plot()
Graphics3d Object
The input data may be a str, a list, a tuple,
a callable or a finite iterator::
sage: P = WordPaths([0, 1, 2, 3])
sage: P([0,1,2,3,2,1,2,3,2])
Path: 012321232
sage: P((0,1,2,3,2,1,2,3,2))
Path: 012321232
sage: P(lambda n:n%4, length=10)
Path: 0123012301
sage: P(iter([0,3,2,1]), length='finite')
Path: 0321
REFERENCES:
- [1] Freeman, H.: *On the encoding of arbitrary geometric configurations*.
IRE Trans. Electronic Computer 10 (1961) 260-268.
- [2] Freeman, H.: *Boundary encoding and processing*. In Lipkin, B., Rosenfeld,
A., eds.: Picture Processing and Psychopictorics, Academic Press, New York
(1970) 241-266.
- [3] Braquelaire, J.P., Vialard, A.: *Euclidean paths: A new representation of
boundary of discrete regions*. Graphical Models and Image Processing 61 (1999)
16-43.
- [4] :wikipedia:`Regular_tiling`
- [5] :wikipedia:`Dyck_word`
"""
# ****************************************************************************
# Copyright (C) 2008 Arnaud bergeron <[email protected]>,
# Copyright (C) 2009 Sebastien Labbe <[email protected]>,
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# https://www.gnu.org/licenses/
# ****************************************************************************
from builtins import zip
from sage.structure.sage_object import SageObject
from sage.misc.cachefunc import cached_method
from sage.misc.lazy_attribute import lazy_attribute
from sage.combinat.words.words import FiniteWords
from sage.combinat.words.word import FiniteWord_class
from sage.combinat.words.alphabet import build_alphabet
from sage.misc.lazy_import import lazy_import
lazy_import("sage.plot.all", ["arrow", "line", "polygon", "point", "Graphics"])
from sage.modules.free_module_element import vector
from sage.rings.integer_ring import ZZ
from sage.rings.number_field.number_field import QuadraticField
from sage.rings.real_mpfr import RR
from .word_datatypes import (WordDatatype_str,
WordDatatype_list,
WordDatatype_tuple)
#WordDatatype_cpp_basic_string)
from .word_infinite_datatypes import (
WordDatatype_iter_with_caching,
WordDatatype_iter,
WordDatatype_callable_with_caching,
WordDatatype_callable)
from sage.matrix.constructor import vector_on_axis_rotation_matrix
#######################################################################
# #
# WordPaths function #
# #
#######################################################################
def WordPaths(alphabet, steps=None):
r"""
Returns the combinatorial class of paths of the given type of steps.
INPUT:
- ``alphabet`` - ordered alphabet
- ``steps`` - (default is None). It can be one of the following:
- an iterable ordered container of as many vectors as there are
letters in the alphabet. The vectors are associated to the letters
according to their order in steps. The vectors can be a tuple or
anything that can be passed to vector function.
- an iterable ordered container of k vectors where k is half the
size of alphabet. The vectors and their opposites are associated
to the letters according to their order in steps (given vectors
first, opposite vectors after).
- ``None``: In this case, the type of steps are guessed from the
length of alphabet.
- 'square_grid' or 'square': (default when size of alphabet is 4)
The order is : East, North, West, South.
- 'triangle_grid' or 'triangle':
- 'hexagonal_grid' or 'hexagon': (default when size of alphabet is 6)
- 'cube_grid' or 'cube':
- 'north_east', 'ne' or 'NE': (the default when size of alphabet is 2)
- 'dyck':
OUTPUT:
- The combinatorial class of all paths of the given type.
EXAMPLES:
The steps can be given explicitly::
sage: WordPaths('abc', steps=[(1,2), (-1,4), (0,-3)])
Word Paths over 3 steps
Different type of input alphabet::
sage: WordPaths(range(3), steps=[(1,2), (-1,4), (0,-3)])
Word Paths over 3 steps
sage: WordPaths(['cric','crac','croc'], steps=[(1,2), (1,4), (0,3)])
Word Paths over 3 steps
Directions can be in three dimensions as well::
sage: WordPaths('ab', steps=[(1,2,2),(-1,4,2)])
Word Paths over 2 steps
When the number of given steps is half the size of alphabet, the
opposite of vectors are used::
sage: P = WordPaths('abcd', [(1,0), (0,1)])
sage: P
Word Paths over 4 steps
sage: sorted(P.letters_to_steps().items())
[('a', (1, 0)), ('b', (0, 1)), ('c', (-1, 0)), ('d', (0, -1))]
When no steps are given, default classes are returned::
sage: WordPaths('ab')
Word Paths in North and East steps
sage: WordPaths(range(4))
Word Paths on the square grid
sage: WordPaths(range(6))
Word Paths on the hexagonal grid
There are many type of built-in steps...
On a two letters alphabet::
sage: WordPaths('ab', steps='north_east')
Word Paths in North and East steps
sage: WordPaths('()', steps='dyck')
Finite Dyck paths
On a four letters alphabet::
sage: WordPaths('ruld', steps='square_grid')
Word Paths on the square grid
On a six letters alphabet::
sage: WordPaths('abcdef', steps='hexagonal_grid')
Word Paths on the hexagonal grid
sage: WordPaths('abcdef', steps='triangle_grid')
Word Paths on the triangle grid
sage: WordPaths('abcdef', steps='cube_grid')
Word Paths on the cube grid
TESTS::
sage: WordPaths(range(5))
Traceback (most recent call last):
...
TypeError: Unable to make a class WordPaths from {0, 1, 2, 3, 4}
sage: WordPaths('abAB', steps='square_gridd')
Traceback (most recent call last):
...
TypeError: Unknown type of steps : square_gridd
"""
#Construction of the alphabet
alphabet = build_alphabet(alphabet)
#If no steps are given, they are guessed from the alphabet
if steps is None:
if alphabet.cardinality() == 2:
steps = 'north_east'
elif alphabet.cardinality() == 4:
steps = 'square_grid'
elif alphabet.cardinality() == 6:
steps = 'hexagonal_grid'
else:
raise TypeError("Unable to make a class WordPaths from %s"%alphabet)
#Returns the class of WordPaths according to the given type of paths
if isinstance(steps, str):
if steps in ('square_grid', 'square'):
return WordPaths_square_grid(alphabet=alphabet)
elif steps in ('triangle_grid', 'triangle'):
return WordPaths_triangle_grid(alphabet=alphabet)
elif steps in ('hexagonal_grid', 'hexagon'):
return WordPaths_hexagonal_grid(alphabet=alphabet)
elif steps in ('cube_grid', 'cube'):
return WordPaths_cube_grid(alphabet=alphabet)
elif steps in ('north_east', 'ne', 'NE'):
return WordPaths_north_east(alphabet=alphabet)
elif steps == 'dyck':
return WordPaths_dyck(alphabet=alphabet)
else:
raise TypeError("Unknown type of steps : %s"%steps)
else:
return WordPaths_all(alphabet=alphabet, steps=steps)
#######################################################################
# #
# Combinatorial classes of word paths #
# #
#######################################################################
class WordPaths_all(FiniteWords):
r"""
The combinatorial class of all paths, i.e of all words over
an alphabet where each letter is mapped to a step (a vector).
"""
def __init__(self, alphabet, steps):
r"""
INPUT:
- ``alphabet`` - an ordered alphabet
- ``steps`` - an iterable (of same length as alphabet or half the
length of alphabet) of ordered vectors
EXAMPLES::
sage: from sage.combinat.words.paths import WordPaths_all
sage: d = ((1,1), (-1,1), (1,-1), (-1,-1))
sage: P = WordPaths_all('abAB', d); P
Word Paths over 4 steps
sage: P == loads(dumps(P))
True
If size of alphabet is twice the number of steps, then opposite
vectors are used for the second part of the alphabet.
sage: WordPaths('abcd',[(2,1),(2,4)])
Word Paths over 4 steps
sage: _.letters_to_steps()
{'a': (2, 1), 'b': (2, 4), 'c': (-2, -1), 'd': (-2, -4)}
TESTS::
sage: from sage.combinat.words.paths import WordPaths_all
sage: d = ((1,1), (-1,1), (1,-1), (-1,-1))
sage: WordPaths_all('abA', d)
Traceback (most recent call last):
...
TypeError: size of steps (=4) must equal the size of alphabet (=3) or half the size of alphabet.
sage: d = ((1,1), 1)
sage: WordPaths_all('ab', d)
Traceback (most recent call last):
...
ValueError: Can't make vectors from steps
sage: d = ((1,1), (-1,1,0))
sage: WordPaths_all('ab', d)
Traceback (most recent call last):
...
ValueError: Can't make summable vectors from steps
"""
#Construction of the words class
FiniteWords.__init__(self, alphabet)
alphabet = self.alphabet()
#Checking the size of alphabet and steps
ls = len(steps)
la = alphabet.cardinality()
if la != ls and la != 2*ls:
raise TypeError("size of steps (=%s) must equal the size \
of alphabet (=%s) or half the size of alphabet."%(len(steps),alphabet.cardinality()))
#Construction of the steps
from sage.structure.element import Vector
if all((isinstance(x, Vector) for x in steps)):
vsteps = steps
else:
try:
vsteps = [vector(s) for s in steps]
except (TypeError):
raise ValueError("Can't make vectors from steps")
try:
s = sum(vsteps)
except (TypeError, AttributeError):
raise ValueError("Can't make summable vectors from steps")
#Complete vsteps with the opposite vectors if needed
if la == 2 * ls:
vsteps += [-v for v in vsteps]
self._steps = dict(zip(alphabet, vsteps))
self._vector_space = s.parent()
def __eq__(self, other):
r"""
TESTS::
sage: W1 = WordPaths(['a','b'], [vector((0,1)), vector((0,2))])
sage: W2 = WordPaths(['a','b'], [vector((0,1)), vector((0,2))])
sage: W3 = WordPaths(['a','b'], [vector((0,2)), vector((1,0))])
sage: W1 == W2
True
sage: W1 == W3
False
"""
return self is other or (type(self) == type(other) and \
self.alphabet() == other.alphabet() and \
self.vector_space() == other.vector_space() and \
self.letters_to_steps() == other.letters_to_steps())
def __ne__(self, other):
r"""
TESTS::
sage: W1 = WordPaths(['a','b'], [vector((0,1)), vector((0,2))])
sage: W2 = WordPaths(['a','b'], [vector((0,1)), vector((0,2))])
sage: W3 = WordPaths(['a','b'], [vector((0,2)), vector((1,0))])
sage: W1 != W2
False
sage: W1 != W3
True
"""
return not (self == other)
@lazy_attribute
def _element_classes(self):
r"""
Returns a dictionary that gives the class of the elements of self.
The word may be finite (infinite or of unknown length is not supported
yet).
Its data may be str, list, tuple, a callable or an iterable.
For callable and iterable, the data may be cached.
The dimension of the path may be 1, 2, 3 or more.
TESTS::
sage: d = WordPaths('ab',steps=[(1,2),(3,4)])._element_classes
sage: type(d)
<class 'dict'>
sage: len(d)
7
sage: d['tuple']
<class 'sage.combinat.words.paths.FiniteWordPath_2d_tuple'>
::
sage: d = WordPaths('ab',steps=[(1,2,3),(3,4,5)])._element_classes
sage: len(d)
7
sage: d['tuple']
<class 'sage.combinat.words.paths.FiniteWordPath_3d_tuple'>
::
sage: steps = [(1,2,3,4),(3,4,5,6)]
sage: d = WordPaths('ab',steps=steps)._element_classes
sage: len(d)
7
sage: d['tuple']
<class 'sage.combinat.words.paths.FiniteWordPath_all_tuple'>
::
sage: d = WordPaths('ab',steps=[(1,),(3,)])._element_classes
sage: len(d)
7
sage: d['tuple']
<class 'sage.combinat.words.paths.FiniteWordPath_all_tuple'>
"""
dimension = self._vector_space.dimension()
if dimension == 2:
return {
'list': FiniteWordPath_2d_list,
'str': FiniteWordPath_2d_str,
'tuple': FiniteWordPath_2d_tuple,
'callable_with_caching': FiniteWordPath_2d_callable_with_caching,
'callable': FiniteWordPath_2d_callable,
'iter_with_caching': FiniteWordPath_2d_iter_with_caching,
'iter': FiniteWordPath_2d_iter,
}
elif dimension == 3:
return {
'list': FiniteWordPath_3d_list,
'str': FiniteWordPath_3d_str,
'tuple': FiniteWordPath_3d_tuple,
'callable_with_caching': FiniteWordPath_3d_callable_with_caching,
'callable': FiniteWordPath_3d_callable,
'iter_with_caching': FiniteWordPath_3d_iter_with_caching,
'iter': FiniteWordPath_3d_iter,
}
else:
return {
'list': FiniteWordPath_all_list,
'str': FiniteWordPath_all_str,
'tuple': FiniteWordPath_all_tuple,
'callable_with_caching': FiniteWordPath_all_callable_with_caching,
'callable': FiniteWordPath_all_callable,
'iter_with_caching': FiniteWordPath_all_iter_with_caching,
'iter': FiniteWordPath_all_iter,
}
def __repr__(self):
r"""
Returns a string representation of self.
EXAMPLES::
sage: from sage.combinat.words.paths import WordPaths_all
sage: d = (vector((1,1)), vector((-1,1)), vector((1,-1)), vector((-1,-1)))
sage: WordPaths_all('abAB',d).__repr__()
'Word Paths over 4 steps'
"""
return "Word Paths over %s steps" % self.alphabet().cardinality()
def letters_to_steps(self):
r"""
Returns the dictionary mapping letters to vectors (steps).
EXAMPLES::
sage: d = WordPaths('ab').letters_to_steps()
sage: sorted(d.items())
[('a', (0, 1)), ('b', (1, 0))]
sage: d = WordPaths('abcd').letters_to_steps()
sage: sorted(d.items())
[('a', (1, 0)), ('b', (0, 1)), ('c', (-1, 0)), ('d', (0, -1))]
sage: d = WordPaths('abcdef').letters_to_steps()
sage: sorted(d.items())
[('a', (1, 0)),
('b', (1/2, 1/2*sqrt3)),
('c', (-1/2, 1/2*sqrt3)),
('d', (-1, 0)),
('e', (-1/2, -1/2*sqrt3)),
('f', (1/2, -1/2*sqrt3))]
"""
return self._steps
def vector_space(self):
r"""
Return the vector space over which the steps of the paths are defined.
EXAMPLES::
sage: WordPaths('ab',steps='dyck').vector_space()
Ambient free module of rank 2 over the principal ideal domain Integer Ring
sage: WordPaths('ab',steps='north_east').vector_space()
Ambient free module of rank 2 over the principal ideal domain Integer Ring
sage: WordPaths('abcd',steps='square_grid').vector_space()
Ambient free module of rank 2 over the principal ideal domain Integer Ring
sage: WordPaths('abcdef',steps='hexagonal_grid').vector_space()
Vector space of dimension 2 over Number Field in sqrt3 with defining polynomial x^2 - 3 with sqrt3 = 1.732050807568878?
sage: WordPaths('abcdef',steps='cube_grid').vector_space()
Ambient free module of rank 3 over the principal ideal domain Integer Ring
sage: WordPaths('abcdef',steps='triangle_grid').vector_space()
Vector space of dimension 2 over Number Field in sqrt3 with defining polynomial x^2 - 3 with sqrt3 = 1.732050807568878?
"""
return self._vector_space
class WordPaths_square_grid(WordPaths_all):
r"""
The combinatorial class of all paths on the square grid.
"""
def __init__(self, alphabet):
r"""
The combinatorial class of all finite paths on the square grid.
INPUT:
- ``alphabet`` - ordered alphabet of length 4. The order for the steps
is : East, North, West, South.
EXAMPLES::
sage: from sage.combinat.words.paths import WordPaths_square_grid
sage: P = WordPaths_square_grid('abAB'); P
Word Paths on the square grid
sage: P == loads(dumps(P))
True
"""
#Construction of the steps
d = [(1 ,0), (0,1), (-1,0), (0,-1)]
#Construction of the class
super(WordPaths_square_grid, self).__init__(alphabet, steps=d)
@lazy_attribute
def _element_classes(self):
r"""
Returns a dictionary that gives the class of the elements of self.
The word may be finite (infinite or of unknown length is not supported
yet).
Its data may be str, list, tuple, a callable or an iterable.
For callable and iterable, the data may be cached.
TESTS::
sage: d = WordPaths('abcd')._element_classes
sage: type(d)
<class 'dict'>
sage: len(d)
7
sage: d['tuple']
<class 'sage.combinat.words.paths.FiniteWordPath_square_grid_tuple'>
"""
return {
'list': FiniteWordPath_square_grid_list,
'str': FiniteWordPath_square_grid_str,
'tuple': FiniteWordPath_square_grid_tuple,
'callable_with_caching': FiniteWordPath_square_grid_callable_with_caching,
'callable': FiniteWordPath_square_grid_callable,
'iter_with_caching': FiniteWordPath_square_grid_iter_with_caching,
'iter': FiniteWordPath_square_grid_iter,
}
def __repr__(self):
r"""
EXAMPLES::
sage: from sage.combinat.words.paths import WordPaths_square_grid
sage: WordPaths_square_grid('abAB').__repr__()
'Word Paths on the square grid'
"""
return "Word Paths on the square grid"
class WordPaths_triangle_grid(WordPaths_all):
r"""
The combinatorial class of all paths on the triangle grid.
"""
def __init__(self, alphabet):
r"""
The combinatorial class of all finite paths on the triangle grid.
INPUT:
- ``alphabet`` - ordered alphabet of length 6. The order for the steps
is : Right, Up-Right, Up-Left, Left, Down-Left, Down-Right.
EXAMPLES::
sage: from sage.combinat.words.paths import WordPaths_triangle_grid
sage: P = WordPaths_triangle_grid('abcdef'); P
Word Paths on the triangle grid
sage: P == loads(dumps(P))
True
"""
K = QuadraticField(3, 'sqrt3')
sqrt3 = K.gen()
#Construction of the steps
d = (vector(K, (1 ,0 )),
vector(K, (ZZ(1)/ZZ(2), sqrt3/2)),
vector(K, (ZZ(-1)/ZZ(2), sqrt3/2)),
vector(K, (-1 , 0 )),
vector(K, (ZZ(-1)/ZZ(2), -sqrt3/2 )),
vector(K, (ZZ(1)/ZZ(2), -sqrt3/2 )))
#Construction of the class
super(WordPaths_triangle_grid, self).__init__(alphabet, steps=d)
self._infinite_word_class = None
self._finite_word_class = FiniteWordPath_triangle_grid
@lazy_attribute
def _element_classes(self):
r"""
Returns a dictionary that gives the class of the elements of self.
The word may be finite (infinite or of unknown length is not supported
yet).
Its data may be str, list, tuple, a callable or an iterable.
For callable and iterable, the data may be cached.
TESTS::
sage: d = WordPaths('abcdef', steps='triangle')._element_classes
sage: len(d)
7
sage: type(d)
<class 'dict'>
sage: d['tuple']
<class 'sage.combinat.words.paths.FiniteWordPath_triangle_grid_tuple'>
"""
return {
'list': FiniteWordPath_triangle_grid_list,
'str': FiniteWordPath_triangle_grid_str,
'tuple': FiniteWordPath_triangle_grid_tuple,
'callable_with_caching': FiniteWordPath_triangle_grid_callable_with_caching,
'callable': FiniteWordPath_triangle_grid_callable,
'iter_with_caching': FiniteWordPath_triangle_grid_iter_with_caching,
'iter': FiniteWordPath_triangle_grid_iter,
}
def __repr__(self):
r"""
EXAMPLES::
sage: from sage.combinat.words.paths import WordPaths_triangle_grid
sage: WordPaths_triangle_grid('abcdef').__repr__()
'Word Paths on the triangle grid'
"""
return "Word Paths on the triangle grid"
class WordPaths_hexagonal_grid(WordPaths_triangle_grid):
r"""
The combinatorial class of all paths on the hexagonal grid.
"""
def __init__(self, alphabet):
r"""
The combinatorial class of all finite paths on the hexagonal grid.
INPUT:
- ``alphabet`` - ordered alphabet of length 6. The order for the steps
is : Right, Up-Right, Up-Left, Left, Down-Left, Down-Right.
EXAMPLES::
sage: from sage.combinat.words.paths import WordPaths_hexagonal_grid
sage: P = WordPaths_hexagonal_grid('abcdef'); P
Word Paths on the hexagonal grid
sage: P == loads(dumps(P))
True
"""
#Construction of the class
super(WordPaths_hexagonal_grid, self).__init__(alphabet)
self._infinite_word_class = None
self._finite_word_class = FiniteWordPath_hexagonal_grid
@lazy_attribute
def _element_classes(self):
r"""
Returns a dictionary that gives the class of the elements of self.
The word may be finite (infinite or of unknown length is not supported
yet).
Its data may be str, list, tuple, a callable or an iterable.
For callable and iterable, the data may be cached.
TESTS::
sage: d = WordPaths('abcdef', steps='hexagon')._element_classes
sage: type(d)
<class 'dict'>
sage: len(d)
7
sage: d['tuple']
<class 'sage.combinat.words.paths.FiniteWordPath_hexagonal_grid_tuple'>
"""
return {
'list': FiniteWordPath_hexagonal_grid_list,
'str': FiniteWordPath_hexagonal_grid_str,
'tuple': FiniteWordPath_hexagonal_grid_tuple,
'callable_with_caching': FiniteWordPath_hexagonal_grid_callable_with_caching,
'callable': FiniteWordPath_hexagonal_grid_callable,
'iter_with_caching': FiniteWordPath_hexagonal_grid_iter_with_caching,
'iter': FiniteWordPath_hexagonal_grid_iter,
}
def __repr__(self):
r"""
EXAMPLES::
sage: from sage.combinat.words.paths import WordPaths_hexagonal_grid
sage: WordPaths_hexagonal_grid('abcdef').__repr__()
'Word Paths on the hexagonal grid'
"""
return "Word Paths on the hexagonal grid"
class WordPaths_cube_grid(WordPaths_all):
r"""
The combinatorial class of all paths on the cube grid.
"""
def __init__(self, alphabet):
r"""
The combinatorial class of all finite paths on the cube grid.
INPUT:
- ``alphabet`` -- ordered alphabet of length 6. The order for
the steps is `e_x, e_y, e_z, -e_x, -e_y, -e_z`, where `e_v`
denotes the canonical basis.
EXAMPLES::
sage: from sage.combinat.words.paths import WordPaths_cube_grid
sage: P = WordPaths_cube_grid('abcABC'); P
Word Paths on the cube grid
sage: P == loads(dumps(P))
True
"""
#Construction of the class
d = [(1,0,0), (0,1,0), (0,0,1), (-1,0,0), (0,-1,0), (0,0,-1)]
super(WordPaths_cube_grid, self).__init__(alphabet, steps=d)
self._infinite_word_class = None
self._finite_word_class = FiniteWordPath_cube_grid
@lazy_attribute
def _element_classes(self):
r"""
Returns a dictionary that gives the class of the elements of self.
The word may be finite (infinite or of unknown length is not supported
yet).
Its data may be str, list, tuple, a callable or an iterable.
For callable and iterable, the data may be cached.
TESTS::
sage: d = WordPaths('abcdef', steps='cube')._element_classes
sage: type(d)
<class 'dict'>
sage: len(d)
7
sage: d['tuple']
<class 'sage.combinat.words.paths.FiniteWordPath_cube_grid_tuple'>
"""
return {'list': FiniteWordPath_cube_grid_list,
'str': FiniteWordPath_cube_grid_str,
'tuple': FiniteWordPath_cube_grid_tuple,
'callable_with_caching': FiniteWordPath_cube_grid_callable_with_caching,
'callable': FiniteWordPath_cube_grid_callable,
'iter_with_caching': FiniteWordPath_cube_grid_iter_with_caching,
'iter': FiniteWordPath_cube_grid_iter,
}
def __repr__(self):
r"""
EXAMPLES::
sage: from sage.combinat.words.paths import WordPaths_cube_grid
sage: WordPaths_cube_grid('abcABC').__repr__()
'Word Paths on the cube grid'
"""
return "Word Paths on the cube grid"
class WordPaths_dyck(WordPaths_all):
r"""
The combinatorial class of all Dyck paths.
"""
def __init__(self, alphabet):
r"""
The combinatorial class of all finite Dyck paths.
INPUT:
- ``alphabet`` - ordered alphabet of length 2. The order for the steps
is : (1,1), (1,-1)
EXAMPLES::
sage: from sage.combinat.words.paths import WordPaths_dyck
sage: P = WordPaths_dyck('[]'); P
Finite Dyck paths
sage: P == loads(dumps(P))
True
"""
#Construction of the class
d = [(1,1), (1,-1)]
super(WordPaths_dyck, self).__init__(alphabet, steps=d)
self._infinite_word_class = None
self._finite_word_class = FiniteWordPath_dyck
@lazy_attribute
def _element_classes(self):
r"""
Returns a dictionary that gives the class of the elements of self.
The word may be finite (infinite or of unknown length is not supported
yet).
Its data may be str, list, tuple, a callable or an iterable.
For callable and iterable, the data may be cached.
TESTS::
sage: d = WordPaths('ab', steps='dyck')._element_classes
sage: type(d)
<class 'dict'>
sage: len(d)
7
sage: d['tuple']
<class 'sage.combinat.words.paths.FiniteWordPath_dyck_tuple'>
"""
return {'list': FiniteWordPath_dyck_list,
'str': FiniteWordPath_dyck_str,
'tuple': FiniteWordPath_dyck_tuple,
'callable_with_caching': FiniteWordPath_dyck_callable_with_caching,
'callable': FiniteWordPath_dyck_callable,
'iter_with_caching': FiniteWordPath_dyck_iter_with_caching,
'iter': FiniteWordPath_dyck_iter,
}
def __repr__(self):
r"""
EXAMPLES::
sage: from sage.combinat.words.paths import WordPaths_dyck
sage: WordPaths_dyck('()').__repr__()
'Finite Dyck paths'
"""
return "Finite Dyck paths"
class WordPaths_north_east(WordPaths_all):
r"""
The combinatorial class of all paths using North and East directions.
"""
def __init__(self, alphabet):
r"""
The combinatorial class of all finite paths using only north and east
steps on the square grid.
INPUT:
- ``alphabet`` - ordered alphabet of length 2. The order for the steps
is North, East
EXAMPLES::
sage: from sage.combinat.words.paths import WordPaths_north_east
sage: P = WordPaths_north_east('ab'); P
Word Paths in North and East steps
sage: P == loads(dumps(P))
True
"""
#Construction of the class
d = [(0,1), (1,0)]
super(WordPaths_north_east, self).__init__(alphabet, steps=d)
self._infinite_word_class = None
self._finite_word_class = FiniteWordPath_north_east
@lazy_attribute
def _element_classes(self):
r"""
Returns a dictionary that gives the class of the elements of self.
The word may be finite (infinite or of unknown length is not supported
yet).
Its data may be str, list, tuple, a callable or an iterable.
For callable and iterable, the data may be cached.
TESTS::
sage: d = WordPaths('ab', steps='NE')._element_classes
sage: type(d)
<class 'dict'>
sage: len(d)
7
sage: d['tuple']
<class 'sage.combinat.words.paths.FiniteWordPath_north_east_tuple'>
"""
return {'list': FiniteWordPath_north_east_list,
'str': FiniteWordPath_north_east_str,
'tuple': FiniteWordPath_north_east_tuple,
'callable_with_caching': FiniteWordPath_north_east_callable_with_caching,
'callable': FiniteWordPath_north_east_callable,
'iter_with_caching': FiniteWordPath_north_east_iter_with_caching,
'iter': FiniteWordPath_north_east_iter,
}
def __repr__(self):
r"""
EXAMPLES::
sage: from sage.combinat.words.paths import WordPaths_north_east
sage: WordPaths_north_east('ab').__repr__()
'Word Paths in North and East steps'
"""
return "Word Paths in North and East steps"
#######################################################################
# #
# Abstract word path classes #
# (all, 2d, 3d, ...) #
# #
#######################################################################
class FiniteWordPath_all(SageObject):
def _repr_(self):
r"""
Returns a string representation of this path.
EXAMPLES::
sage: F = WordPaths('ab',[(1,0,0,0),(0,1,0,0)]); F
Word Paths over 2 steps
sage: f = F('ababab')
sage: f._repr_()
'Path: ababab'
"""
return "Path: %s"%self.string_rep()
def points(self, include_last=True):
r"""
Returns an iterator yielding a list of points used to draw the path
represented by this word.
INPUT:
- ``include_last`` - bool (default: True) whether to include the
last point
EXAMPLES:
A simple closed square::
sage: P = WordPaths('abAB')
sage: list(P('abAB').points())
[(0, 0), (1, 0), (1, 1), (0, 1), (0, 0)]
A simple closed square without the last point::
sage: list(P('abAB').points(include_last=False))
[(0, 0), (1, 0), (1, 1), (0, 1)]
::
sage: list(P('abaB').points())
[(0, 0), (1, 0), (1, 1), (2, 1), (2, 0)]
"""
curpt = self.start_point()
yield curpt
end = len(self) if include_last else -1
for l in self[:end]:
curpt += self.parent().letters_to_steps()[l]
yield curpt
def start_point(self):
r"""
Return the starting point of self.
OUTPUT:
vector
EXAMPLES::
sage: WordPaths('abcdef')('abcdef').start_point()
(0, 0)
sage: WordPaths('abcdef', steps='cube_grid')('abcdef').start_point()
(0, 0, 0)
sage: P = WordPaths('ab', steps=[(1,0,0,0),(0,1,0,0)])
sage: P('abbba').start_point()
(0, 0, 0, 0)
"""
return self.parent().vector_space()(0)
@cached_method
def end_point(self):
r"""
Returns the end point of the path.
EXAMPLES::
sage: WordPaths('abcdef')('abababab').end_point()
(6, 2*sqrt3)
sage: WordPaths('abAB')('abababab').end_point()
(4, 4)
sage: P = WordPaths('abcABC', steps='cube_grid')
sage: P('ababababCC').end_point()
(4, 4, -2)
sage: WordPaths('abcdef')('abcdef').end_point()
(0, 0)
sage: P = WordPaths('abc', steps=[(1,3,7,9),(-4,1,0,0),(0,32,1,8)])
sage: P('abcabababacaacccbbcac').end_point()
(-16, 254, 63, 128)
"""
last = None
for pt in self.points():
last = pt
return last
def directive_vector(self):
r"""
Returns the directive vector of self.
The directive vector is the vector starting at the start point
and ending at the end point of the path self.
EXAMPLES::
sage: WordPaths('abcdef')('abababab').directive_vector()
(6, 2*sqrt3)
sage: WordPaths('abAB')('abababab').directive_vector()
(4, 4)
sage: P = WordPaths('abcABC', steps='cube_grid')
sage: P('ababababCC').directive_vector()
(4, 4, -2)
sage: WordPaths('abcdef')('abcdef').directive_vector()
(0, 0)
sage: P = WordPaths('abc', steps=[(1,3,7,9),(-4,1,0,0),(0,32,1,8)])
sage: P('abcabababacaacccbbcac').directive_vector()
(-16, 254, 63, 128)
"""
return self.end_point() - self.start_point()
def is_closed(self):
r"""
Returns True if the path is closed, i.e. if the origin and the end of
the path are equal.
EXAMPLES::
sage: P = WordPaths('abcd', steps=[(1,0),(0,1),(-1,0),(0,-1)])
sage: P('abcd').is_closed()
True
sage: P('abc').is_closed()
False
sage: P().is_closed()
True
sage: P('aacacc').is_closed()
True
"""
return self.start_point() == self.end_point()
def is_simple(self):
r"""
Returns True if the path is simple, i.e. if all its points are
distincts.
If the path is closed, the last point is not considered.
EXAMPLES::
sage: P = WordPaths('abcdef',steps='triangle_grid');P
Word Paths on the triangle grid
sage: P('abc').is_simple()
True
sage: P('abcde').is_simple()
True
sage: P('abcdef').is_simple()
True
sage: P('ad').is_simple()
True
sage: P('aabdee').is_simple()
False
"""
n = 0
s = set()
include_last = not self.is_closed()
for p in self.points(include_last=include_last):
# We need the elements to have a common parent,
# so we convert the points to immutable vectors.
v = vector(p)
v.set_immutable()
s.add(v)
n += 1
if len(s) != n:
return False
return True
def tikz_trajectory(self):
r"""
Returns the trajectory of self as a tikz str.
EXAMPLES::
sage: P = WordPaths('abcdef')
sage: p = P('abcde')
sage: p.tikz_trajectory()
'(0.000, 0.000) -- (1.00, 0.000) -- (1.50, 0.866) -- (1.00, 1.73) -- (0.000, 1.73) -- (-0.500, 0.866)'
"""
from sage.all import n
f = lambda x: n(x,digits=3)
l = [str(tuple(map(f, pt))) for pt in self.points()]
return ' -- '.join(l)
def projected_point_iterator(self, v=None, ring=None):
r"""
Return an iterator of the projection of the orbit points of the
path into the space orthogonal to the given vector.
INPUT:
- ``v`` - vector (optional, default: None) If None, the directive
vector (i.e. the end point minus starting point) of the path is
considered.
- ``ring`` - ring (optional, default: None) where to do the
computations. If None, RealField(53) is used.
OUTPUT:
iterator of points
EXAMPLES:
Projected points of the Rauzy fractal::
sage: s = WordMorphism('1->12,2->13,3->1')
sage: D = s.fixed_point('1')
sage: v = s.pisot_eigenvector_right()
sage: P = WordPaths('123',[(1,0,0),(0,1,0),(0,0,1)])
sage: w = P(D[:200])
sage: it = w.projected_point_iterator(v)
sage: for i in range(6): next(it)
(0.000000000000000, 0.000000000000000)
(-0.526233343362516, 0.000000000000000)
(0.220830337618112, -0.477656250512816)
(-0.305403005744404, -0.477656250512816)
(0.100767309386062, 0.400890564600664)
(-0.425466033976454, 0.400890564600664)
Projected points of a 2d path::
sage: P = WordPaths('ab','ne')
sage: p = P('aabbabbab')
sage: it = p.projected_point_iterator(ring=RealField(20))
sage: for i in range(8): next(it)
(0.00000)
(0.78087)
(1.5617)
(0.93704)
(0.31235)
(1.0932)
(0.46852)
(-0.15617)
"""
if v is None:
v = self.directive_vector()
if ring is None:
ring = RR
R = vector_on_axis_rotation_matrix(v, 0, ring=ring)[1:]
for q in self.points():
yield R * q
def plot_projection(self, v=None, letters=None, color=None, ring=None,
size=12, kind='right'):
r"""
Return an image of the projection of the successive points of the
path into the space orthogonal to the given vector.
INPUT:
- ``self`` - a word path in a 3 or 4 dimension vector space
- ``v`` - vector (optional, default: None) If None, the directive
vector (i.e. the end point minus starting point) of the path is
considered.
- ``letters`` - iterable (optional, default: None) of the letters
to be projected. If None, then all the letters are considered.
- ``color`` - dictionary (optional, default: None) of the letters
mapped to colors. If None, automatic colors are chosen.
- ``ring`` - ring (optional, default: None) where to do the
computations. If None, RealField(53) is used.
- ``size`` - number (optional, default: ``12``) size of the points.
- ``kind`` - string (optional, default ``'right'``) either
``'right'`` or ``'left'``. The color of a letter is given to the
projected prefix to the right or the left of the letter.
OUTPUT:
2d or 3d Graphic object.
EXAMPLES:
The Rauzy fractal::
sage: s = WordMorphism('1->12,2->13,3->1')
sage: D = s.fixed_point('1')
sage: v = s.pisot_eigenvector_right()
sage: P = WordPaths('123',[(1,0,0),(0,1,0),(0,0,1)])
sage: w = P(D[:200])
sage: w.plot_projection(v) # long time (2s)
Graphics object consisting of 200 graphics primitives
In this case, the abelianized vector doesn't give a good
projection::
sage: w.plot_projection() # long time (2s)
Graphics object consisting of 200 graphics primitives
You can project only the letters you want::
sage: w.plot_projection(v, letters='12') # long time (2s)
Graphics object consisting of 168 graphics primitives
You can increase or decrease the precision of the computations by
changing the ring of the projection matrix::
sage: w.plot_projection(v, ring=RealField(20)) # long time (2s)
Graphics object consisting of 200 graphics primitives
You can change the size of the points::
sage: w.plot_projection(v, size=30) # long time (2s)
Graphics object consisting of 200 graphics primitives
You can assign the color of a letter to the projected prefix to the
right or the left of the letter::
sage: w.plot_projection(v, kind='left') # long time (2s)
Graphics object consisting of 200 graphics primitives
To remove the axis, do like this::
sage: r = w.plot_projection(v)
sage: r.axes(False)
sage: r # long time (2s)
Graphics object consisting of 200 graphics primitives
You can assign different colors to each letter::
sage: color = {'1':'purple', '2':(.2,.3,.4), '3': 'magenta'}
sage: w.plot_projection(v, color=color) # long time (2s)
Graphics object consisting of 200 graphics primitives
The 3d-Rauzy fractal::
sage: s = WordMorphism('1->12,2->13,3->14,4->1')
sage: D = s.fixed_point('1')
sage: v = s.pisot_eigenvector_right()
sage: P = WordPaths('1234',[(1,0,0,0), (0,1,0,0), (0,0,1,0), (0,0,0,1)])
sage: w = P(D[:200])
sage: w.plot_projection(v)
Graphics3d Object
The dimension of vector space of the parent must be 3 or 4::
sage: P = WordPaths('ab', [(1, 0), (0, 1)])
sage: p = P('aabbabbab')
sage: p.plot_projection()
Traceback (most recent call last):
...
TypeError: The dimension of the vector space (=2) must be 3 or 4
"""
dimension = self.parent().vector_space().dimension()
if dimension not in (3, 4):
msg = "The dimension of the vector space (=%s) must be 3 or 4" % dimension
raise TypeError(msg)
if letters is None:
letters = self.parent().alphabet()
if color is None:
from sage.plot.all import hue
A = self.parent().alphabet()
color = {a: hue(A.rank(a)/float(A.cardinality())) for a in A}
it = self.projected_point_iterator(v, ring=ring)
if kind == 'right':
next(it)
elif kind != 'left':
raise ValueError('unknown value for kind (=%s)' % kind)
tout = [point([c], color=color[a], size=size)
for a, c in zip(self, it) if a in letters]
return sum(tout)
def projected_path(self, v=None, ring=None):
r"""
Return the path projected into the space orthogonal to the given
vector.
INPUT:
- ``v`` - vector (optional, default: None) If None, the directive
vector (i.e. the end point minus starting point) of the path is
considered.
- ``ring`` - ring (optional, default: None) where to do the
computations. If None, RealField(53) is used.
OUTPUT:
word path
EXAMPLES:
The projected path of the tribonacci word::
sage: s = WordMorphism('1->12,2->13,3->1')
sage: D = s.fixed_point('1')
sage: v = s.pisot_eigenvector_right()
sage: P = WordPaths('123',[(1,0,0),(0,1,0),(0,0,1)])
sage: w = P(D[:1000])
sage: p = w.projected_path(v)
sage: p
Path: 1213121121312121312112131213121121312121...
sage: p[:20].plot()
Graphics object consisting of 3 graphics primitives
The ``ring`` argument allows to change the precision of the
projected steps::
sage: p = w.projected_path(v, RealField(10))
sage: p
Path: 1213121121312121312112131213121121312121...
sage: p.parent().letters_to_steps()
{'1': (-0.53, 0.00), '2': (0.75, -0.48), '3': (0.41, 0.88)}
"""
if v is None:
v = self.directive_vector()
if ring is None:
ring = RR
R = vector_on_axis_rotation_matrix(v, 0, ring=ring)[1:]
d = self.parent().letters_to_steps()
A = self.parent().alphabet()
nvvectors = [R*d[a] for a in A]
projected_parent = WordPaths(A, nvvectors)
return projected_parent(self)
def is_tangent(self):
r"""
The is_tangent() method, which is implemented for words, has
an extended meaning for word paths, which is not implemented yet.
TESTS::
sage: WordPaths('ab')('abbab').is_tangent()
Traceback (most recent call last):
...
NotImplementedError
AUTHOR:
- Thierry Monteil
"""
raise NotImplementedError
class FiniteWordPath_2d(FiniteWordPath_all):
def plot(self, pathoptions=dict(rgbcolor='red',thickness=3),
fill=True, filloptions=dict(rgbcolor='red',alpha=0.2),
startpoint=True, startoptions=dict(rgbcolor='red',pointsize=100),
endarrow=True, arrowoptions=dict(rgbcolor='red',arrowsize=20,width=3),
gridlines=False, gridoptions=dict()):
r"""
Returns a 2d Graphics illustrating the path.
INPUT:
- ``pathoptions`` - (dict,
default:dict(rgbcolor='red',thickness=3)), options for the
path drawing
- ``fill`` - (boolean, default: True), if fill is True and if
the path is closed, the inside is colored
- ``filloptions`` - (dict,
default:dict(rgbcolor='red',alpha=0.2)), options for the
inside filling
- ``startpoint`` - (boolean, default: True), draw the start point?
- ``startoptions`` - (dict,
default:dict(rgbcolor='red',pointsize=100)) options for the
start point drawing
- ``endarrow`` - (boolean, default: True), draw an arrow end at the end?
- ``arrowoptions`` - (dict,
default:dict(rgbcolor='red',arrowsize=20, width=3)) options
for the end point arrow
- ``gridlines``- (boolean, default: False), show gridlines?
- ``gridoptions`` - (dict, default: {}), options for the gridlines
EXAMPLES:
A non closed path on the square grid::
sage: P = WordPaths('abAB')
sage: P('abababAABAB').plot()
Graphics object consisting of 3 graphics primitives
A closed path on the square grid::
sage: P('abababAABABB').plot()
Graphics object consisting of 4 graphics primitives
A Dyck path::
sage: P = WordPaths('()', steps='dyck')
sage: P('()()()((()))').plot()
Graphics object consisting of 3 graphics primitives
A path in the triangle grid::
sage: P = WordPaths('abcdef', steps='triangle_grid')
sage: P('abcdedededefab').plot()
Graphics object consisting of 3 graphics primitives
A polygon of length 220 that tiles the plane in two ways::
sage: P = WordPaths('abAB')
sage: P('aBababAbabaBaBABaBabaBaBABAbABABaBabaBaBABaBababAbabaBaBABaBabaBaBABAbABABaBABAbAbabAbABABaBABAbABABaBabaBaBABAbABABaBABAbAbabAbABAbAbabaBababAbABAbAbabAbABABaBABAbAbabAbABAbAbabaBababAbabaBaBABaBababAbabaBababAbABAbAbab').plot()
Graphics object consisting of 4 graphics primitives
With gridlines::
sage: P('ababababab').plot(gridlines=True)
TESTS::
sage: P = WordPaths('abAB')
sage: P().plot()
Graphics object consisting of 3 graphics primitives
sage: sum(map(plot,map(P,['a','A','b','B'])))
Graphics object consisting of 12 graphics primitives
"""
G = Graphics()
pts = list(self.points())
####################
####################
# FIXME Bug: plot needs float for coordinates
####################
####################
pts = [[RR(i) for i in x] for x in pts]
#Inside
if fill and self.is_closed():
G += polygon(pts, **filloptions)
#Startpoint
if startpoint:
G += point(pts[0], **startoptions)
#The path itself
if endarrow and not self.is_empty():
G += line(pts[:-1], **pathoptions)
G += arrow(pts[-2], pts[-1], **arrowoptions)
else:
G += line(pts, **pathoptions)
G.axes(False)
G.set_aspect_ratio(1)
#gridlines
###############BUG##############
#Gridlines doesn't work fine.
#It should be gridlines="integers"
###############BUG##############
if gridlines:
G = G.show(gridlines=True, **gridoptions)
return G
def animate(self):
r"""
Returns an animation object illustrating the path growing step by step.
EXAMPLES::
sage: P = WordPaths('abAB')
sage: p = P('aaababbb')
sage: a = p.animate(); a # optional -- ImageMagick
Animation with 9 frames
sage: show(a) # optional -- ImageMagick
sage: a.gif(delay=35, iterations=3) # optional -- ImageMagick
::
sage: P = WordPaths('abcdef',steps='triangle')
sage: p = P('abcdef')
sage: p.animate() # optional -- ImageMagick
Animation with 8 frames
If the path is closed, the plain polygon is added at the end of the
animation::
sage: P = WordPaths('abAB')
sage: p = P('ababAbABABaB')
sage: a = p.animate(); a # optional -- ImageMagick
Animation with 14 frames
Another example illustrating a Fibonacci tile::
sage: w = words.fibonacci_tile(2)
sage: show(w.animate()) # optional -- ImageMagick
The first 4 Fibonacci tiles in an animation::
sage: a = words.fibonacci_tile(0).animate()
sage: b = words.fibonacci_tile(1).animate()
sage: c = words.fibonacci_tile(2).animate()
sage: d = words.fibonacci_tile(3).animate()
sage: (a*b*c*d).show() # optional -- ImageMagick
.. note::
If ImageMagick is not installed, you will get an error
message like this::
/usr/local/share/sage/local/bin/sage-native-execute: 8: convert:
not found
Error: ImageMagick does not appear to be installed. Saving an
animation to a GIF file or displaying an animation requires
ImageMagick, so please install it and try again.
See www.imagemagick.org, for example.
"""
from sage.plot.all import line, polygon, animate
pts = list(self.points())
####################
####################
#Bug: plot needs float for coordinates
####################
####################
pts = [[RR(i) for i in x] for x in pts]
images = [line(pts[:i]) for i in range(1,len(pts)+1)]
if self.is_closed():
images.append(polygon(pts))
#Get the window of the last image
last_image = images[-1]
kwds = {}
kwds['xmin'] = last_image.xmin()
kwds['xmax'] = last_image.xmax()
kwds['ymin'] = last_image.ymin()
kwds['ymax'] = last_image.ymax()
kwds['aspect_ratio'] = 1
kwds['axes'] = False
return animate(images, **kwds)
def plot_directive_vector(self, options=dict(rgbcolor='blue')):
r"""
Returns an arrow 2d graphics that goes from the start of the path
to the end.
INPUT:
- ``options`` - dictionary, default: {'rgbcolor': 'blue'} graphic
options for the arrow
If the start is the same as the end, a single point is returned.
EXAMPLES::
sage: P = WordPaths('abcd'); P
Word Paths on the square grid
sage: p = P('aaaccaccacacacaccccccbbdd'); p
Path: aaaccaccacacacaccccccbbdd
sage: R = p.plot() + p.plot_directive_vector()
sage: R.axes(False)
sage: R.set_aspect_ratio(1)
sage: R.plot()
Graphics object consisting of 4 graphics primitives
TESTS:
A closed path::
sage: P('acbd').plot_directive_vector()
Graphics object consisting of 1 graphics primitive
"""
start = self.start_point()
end = self.end_point()
if (start == end):
G = point(start, pointsize=10, **options)
else:
G = arrow(start, end, **options)
G.axes(False)
G.set_aspect_ratio(1)
return G
def area(self):
r"""
Returns the area of a closed path.
INPUT:
- ``self`` - a closed path
EXAMPLES::
sage: P = WordPaths('abcd',steps=[(1,1),(-1,1),(-1,-1),(1,-1)])
sage: p = P('abcd')
sage: p.area() #todo: not implemented
2
"""
if not self.is_closed():
raise TypeError("the path must be closed to compute its area")
return NotImplemented
def height(self):
r"""
Returns the height of self.
The height of a `2d`-path is merely the difference
between the highest and the lowest `y`-coordinate of each
points traced by it.
OUTPUT:
non negative real number
EXAMPLES::
sage: Freeman = WordPaths('abAB')
sage: Freeman('aababaabbbAA').height()
5
The function is well-defined if self is not simple or close::
sage: Freeman('aabAAB').height()
1
sage: Freeman('abbABa').height()
2
This works for any `2d`-paths::
sage: Paths = WordPaths('ab', steps=[(1,0),(1,1)])
sage: p = Paths('abbaa')
sage: p.height()
2
sage: DyckPaths = WordPaths('ab', steps='dyck')
sage: p = DyckPaths('abaabb')
sage: p.height()
2
sage: w = WordPaths('abcABC', steps='triangle')('ababcaaBC')
sage: w.height()
2.59807621135332
"""
return self.ymax() - self.ymin()
def width(self):
r"""
Returns the width of self.
The height of a `2d`-path is merely the difference
between the rightmost and the leftmost `x`-coordinate of each
points traced by it.
OUTPUT:
non negative real number
EXAMPLES::
sage: Freeman = WordPaths('abAB')
sage: Freeman('aababaabbbAA').width()
5
The function is well-defined if self is not simple or close::
sage: Freeman('aabAAB').width()
2
sage: Freeman('abbABa').width()
1
This works for any `2d`-paths::
sage: Paths = WordPaths('ab', steps=[(1,0),(1,1)])
sage: p = Paths('abbaa')
sage: p.width()
5
sage: DyckPaths = WordPaths('ab', steps='dyck')
sage: p = DyckPaths('abaabb')
sage: p.width()
6
sage: w = WordPaths('abcABC', steps='triangle')('ababcaaBC')
sage: w.width()
4.50000000000000
"""
return self.xmax() - self.xmin()
def xmin(self):
r"""
Returns the minimum of the x-coordinates of the path.
EXAMPLES::
sage: P = WordPaths('0123')
sage: p = P('0101013332')
sage: p.xmin()
0
This works for any `2d`-paths::
sage: Paths = WordPaths('ab', steps=[(1,0),(-1,1)])
sage: p = Paths('abbba')
sage: p.xmin()
-2
sage: DyckPaths = WordPaths('ab', steps='dyck')
sage: p = DyckPaths('abaabb')
sage: p.xmin()
0
sage: w = WordPaths('abcABC', steps='triangle')('ababcaaBC')
sage: w.xmin()
0.000000000000000
"""
return min(x for (x,_) in self.points())
def ymin(self):
r"""
Returns the minimum of the y-coordinates of the path.
EXAMPLES::
sage: P = WordPaths('0123')
sage: p = P('0101013332')
sage: p.ymin()
0
This works for any `2d`-paths::
sage: Paths = WordPaths('ab', steps=[(1,-1),(-1,1)])
sage: p = Paths('ababa')
sage: p.ymin()
-1
sage: DyckPaths = WordPaths('ab', steps='dyck')
sage: p = DyckPaths('abaabb')
sage: p.ymin()
0
sage: w = WordPaths('abcABC', steps='triangle')('ababcaaBC')
sage: w.ymin()
0.000000000000000
"""
return min(y for (_,y) in self.points())
def xmax(self):
r"""
Returns the maximum of the x-coordinates of the path.
EXAMPLES::
sage: P = WordPaths('0123')
sage: p = P('0101013332')
sage: p.xmax()
3
This works for any `2d`-paths::
sage: Paths = WordPaths('ab', steps=[(1,-1),(-1,1)])
sage: p = Paths('ababa')
sage: p.xmax()
1
sage: DyckPaths = WordPaths('ab', steps='dyck')
sage: p = DyckPaths('abaabb')
sage: p.xmax()
6
sage: w = WordPaths('abcABC', steps='triangle')('ababcaaBC')
sage: w.xmax()
4.50000000000000
"""
return max(x for (x,_) in self.points())
def ymax(self):
r"""
Returns the maximum of the y-coordinates of the path.
EXAMPLES::
sage: P = WordPaths('0123')
sage: p = P('0101013332')
sage: p.ymax()
3
This works for any `2d`-paths::
sage: Paths = WordPaths('ab', steps=[(1,-1),(-1,1)])
sage: p = Paths('ababa')
sage: p.ymax()
0
sage: DyckPaths = WordPaths('ab', steps='dyck')
sage: p = DyckPaths('abaabb')
sage: p.ymax()
2
sage: w = WordPaths('abcABC', steps='triangle')('ababcaaBC')
sage: w.ymax()
2.59807621135332
"""
return max(y for (_,y) in self.points())
class FiniteWordPath_3d(FiniteWordPath_all):
def plot(self, pathoptions=dict(rgbcolor='red',arrow_head=True,thickness=3),
startpoint=True, startoptions=dict(rgbcolor='red',size=10)):
r"""
INPUT:
- ``pathoptions`` - (dict, default:dict(rgbcolor='red',arrow_head=True,
thickness=3)), options for the path drawing
- ``startpoint`` - (boolean, default: True), draw the start point?
- ``startoptions`` - (dict, default:dict(rgbcolor='red',size=10))
options for the start point drawing
EXAMPLES::
sage: d = ( vector((1,3,2)), vector((2,-4,5)) )
sage: P = WordPaths(alphabet='ab', steps=d); P
Word Paths over 2 steps
sage: p = P('ababab'); p
Path: ababab
sage: p.plot()
Graphics3d Object
sage: P = WordPaths('abcABC', steps='cube_grid')
sage: p = P('abcabcAABBC')
sage: p.plot()
Graphics3d Object
"""
#The following line seems not to work for 3d
#G = Graphics()
#so we draw to start a small almost invisible point instead:
G = point([self.start_point()], size=1)
pts = list(self.points())
if startpoint:
G += point([pts[0]], **startoptions)
G += line(pts, **pathoptions)
return G
#######################################################################
# #
# Abstract word path classes #
# (square grid, hexagonal grid, etc.) #
# #
#######################################################################
class FiniteWordPath_square_grid(FiniteWordPath_2d):
def is_closed(self):
r"""
Returns True if self represents a closed path and False otherwise.
EXAMPLES::
sage: P = WordPaths('abAB', steps='square_grid')
sage: P('aA').is_closed()
True
sage: P('abAB').is_closed()
True
sage: P('ababAABB').is_closed()
True
sage: P('aaabbbAABB').is_closed()
False
sage: P('ab').is_closed()
False
"""
tab = self.abelian_vector()
return tab[0] == tab[2] and tab[1] == tab[3]
def area(self):
r"""
Returns the area of a closed path.
INPUT:
- ``self`` - a closed path
EXAMPLES::
sage: P = WordPaths('abAB', steps='square_grid')
sage: P('abAB').area()
1
sage: P('aabbAABB').area()
4
sage: P('aabbABAB').area()
3
The area of the Fibonacci tiles::
sage: [words.fibonacci_tile(i).area() for i in range(6)]
[1, 5, 29, 169, 985, 5741]
sage: [words.dual_fibonacci_tile(i).area() for i in range(6)]
[1, 5, 29, 169, 985, 5741]
sage: oeis(_)[0] # optional -- internet
A001653: Numbers k such that 2*k^2 - 1 is a square.
sage: _.first_terms() # optional -- internet
(1,
5,
29,
169,
985,
5741,
33461,
195025,
1136689,
6625109,
38613965,
225058681,
1311738121,
7645370045,
44560482149,
259717522849,
1513744654945,
8822750406821,
51422757785981,
299713796309065,
1746860020068409,
10181446324101389,
59341817924539925)
TESTS::
sage: P = WordPaths('abAB', steps='square_grid')
sage: P('a').area()
Traceback (most recent call last):
...
TypeError: the path must be closed to compute its area
"""
if not self.is_closed():
raise TypeError("the path must be closed to compute its area")
return abs(self._area_vh())
def _area_vh(self, x=0, y=0):
r"""
Return the area of ``self``, with starting point (x,y).
This is using VH algorithm.
INPUT:
- x, y -- starting point (optional, default (0, 0))
EXAMPLES::
sage: P = WordPaths('abAB', steps='square_grid')
sage: P('abAB')._area_vh()
-1
sage: P('aabbAABB')._area_vh()
-4
sage: P('aabbABAB')._area_vh()
-3
REFERENCES:
Annie Lacasse Memoire.
"""
area = 0
a, b, A, B = self.parent().alphabet()
for move in self:
if move == b:
area -= x
y += 1
elif move == B:
area += x
y -= 1
elif move == a:
area += y
x += 1
elif move == A:
area -= y
x -= 1
return area // 2
def is_simple(self):
r"""
Returns True if the path is simple, i.e. if all its points are
distincts.
If the path is closed, the last point is not considered.
.. note::
The linear algorithm described in the thesis of Xavier Provençal
should be implemented here.
EXAMPLES::
sage: P = WordPaths('abAB', steps='square_grid')
sage: P('abab').is_simple()
True
sage: P('abAB').is_simple()
True
sage: P('abA').is_simple()
True
sage: P('aabABB').is_simple()
False
sage: P().is_simple()
True
sage: P('A').is_simple()
True
sage: P('aA').is_simple()
True
sage: P('aaA').is_simple()
False
REFERENCES:
- Provençal, X., *Combinatoires des mots, géometrie discrète et
pavages*, Thèse de doctorat en Mathématiques, Montréal, UQAM,
septembre 2008, 115 pages.
"""
return super(FiniteWordPath_square_grid,self).is_simple()
def tikz_trajectory(self):
r"""
Returns the trajectory of self as a tikz str.
EXAMPLES::
sage: f = words.fibonacci_tile(1)
sage: f.tikz_trajectory()
'(0, 0) -- (0, -1) -- (-1, -1) -- (-1, -2) -- (0, -2) -- (0, -3) -- (1, -3) -- (1, -2) -- (2, -2) -- (2, -1) -- (1, -1) -- (1, 0) -- (0, 0)'
"""
return ' -- '.join(map(str,self.points()))
class FiniteWordPath_triangle_grid(FiniteWordPath_2d):
# Triangle grid paths are implemented with quadratic fields,
# and the ordering of such elements is currently problematic:
#
# sage: Q.<sqrt3> = QuadraticField(3)
# sage: sqrt3 > 0
# True
# sage: 0 < sqrt3
# False
# sage: max(2*sqrt3, sqrt3/10)
# 1/10*sqrt3
#
# Therefore, the functions xmin(), xmax(), ymin() and ymax() are
# redefined here with conversion to RR in order to avoid this problem
def xmin(self):
r"""
Returns the minimum of the x-coordinates of the path.
EXAMPLES::
sage: w = WordPaths('abcABC', steps='triangle')('ababcaaBC')
sage: w.xmin()
0.000000000000000
sage: w = WordPaths('abcABC', steps='triangle')('ABAcacacababababcbcbAC')
sage: w.xmin()
-3.00000000000000
"""
return min(RR(x) for (x,_) in self.points())
def ymin(self):
r"""
Returns the minimum of the y-coordinates of the path.
EXAMPLES::
sage: w = WordPaths('abcABC', steps='triangle')('ababcaaBC')
sage: w.ymin()
0.000000000000000
sage: w = WordPaths('abcABC', steps='triangle')('ABAcacacababababcbcbAC')
sage: w.ymin()
-0.866025403784439
"""
return min(RR(y) for (_,y) in self.points())
def xmax(self):
r"""
Returns the maximum of the x-coordinates of the path.
EXAMPLES::
sage: w = WordPaths('abcABC', steps='triangle')('ababcaaBC')
sage: w.xmax()
4.50000000000000
sage: w = WordPaths('abcABC', steps='triangle')('ABAcacacababababcbcbAC')
sage: w.xmax()
4.00000000000000
"""
return max(RR(x) for (x,_) in self.points())
def ymax(self):
r"""
Returns the maximum of the y-coordinates of the path.
EXAMPLES::
sage: w = WordPaths('abcABC', steps='triangle')('ababcaaBC')
sage: w.ymax()
2.59807621135332
sage: w = WordPaths('abcABC', steps='triangle')('ABAcacacababababcbcbAC')
sage: w.ymax()
8.66025403784439
"""
return max(RR(y) for (_,y) in self.points())
#TODO: faire une verification du mot pour etre sur hexagonal grid
class FiniteWordPath_hexagonal_grid(FiniteWordPath_triangle_grid):
def __init__(self, parent, *args, **kwds):
r"""
INPUT:
- ``parent`` - a parent object inheriting from Words_all
that has the alphabet attribute defined
- ``*args, **kwds`` - arguments accepted by AbstractWord
EXAMPLES::
sage: F = WordPaths('abcdef', steps='hexagon'); F
Word Paths on the hexagonal grid
sage: f = F('aaabbbccddef'); f
Path: aaabbbccddef
::
sage: f == loads(dumps(f))
True
"""
super(FiniteWordPath_hexagonal_grid, self).__init__(parent, *args, **kwds)
class FiniteWordPath_cube_grid(FiniteWordPath_3d):
pass
class FiniteWordPath_north_east(FiniteWordPath_2d):
pass
class FiniteWordPath_dyck(FiniteWordPath_2d):
pass
#######################################################################
# #
# Concrete word path classes #
# #
# It would be nice if those were created inline... #
# We must ask if Nicolas Thiery was able to convince Sage #
# people about this. #
# #
#######################################################################
##### Finite paths #####
class FiniteWordPath_all_list(WordDatatype_list, FiniteWordPath_all, FiniteWord_class):
r"""
TESTS::
sage: P = WordPaths(['a','b'],[(1,2,0,0),(3,4,0,0)])
sage: p = P(['a','b','a']);p
Path: aba
sage: type(p)
<class 'sage.combinat.words.paths.FiniteWordPath_all_list'>
sage: p == loads(dumps(p))
True
"""
pass
class FiniteWordPath_all_str(WordDatatype_str, FiniteWordPath_all, FiniteWord_class):
r"""
TESTS::
sage: P = WordPaths('ab',[(1,2,0,0),(3,4,0,0)])
sage: p = P('aabbb'); p
Path: aabbb
sage: type(p)
<class 'sage.combinat.words.paths.FiniteWordPath_all_str'>
sage: p == loads(dumps(p))
True
"""
pass
class FiniteWordPath_all_tuple(WordDatatype_tuple, FiniteWordPath_all, FiniteWord_class):
r"""
TESTS::
sage: P = WordPaths('ab',[(1,2,0,0),(3,4,0,0)])
sage: p = P( ('a','b','b') ); p
Path: abb
sage: type(p)
<class 'sage.combinat.words.paths.FiniteWordPath_all_tuple'>
sage: p == loads(dumps(p))
True
"""
pass
class FiniteWordPath_all_iter_with_caching(WordDatatype_iter_with_caching, FiniteWordPath_all, FiniteWord_class):
pass
class FiniteWordPath_all_iter(WordDatatype_iter, FiniteWordPath_all, FiniteWord_class):
pass
class FiniteWordPath_all_callable_with_caching(WordDatatype_callable_with_caching, FiniteWordPath_all, FiniteWord_class):
pass
class FiniteWordPath_all_callable(WordDatatype_callable, FiniteWordPath_all, FiniteWord_class):
pass
##### Finite paths on 2d #####
class FiniteWordPath_2d_list(WordDatatype_list, FiniteWordPath_2d, FiniteWord_class):
r"""
TESTS::
sage: P = WordPaths(['a','b'],[(1,2),(3,4)])
sage: p = P(['a','b','a']);p
Path: aba
sage: type(p)
<class 'sage.combinat.words.paths.FiniteWordPath_2d_list'>
sage: p == loads(dumps(p))
True
"""
pass
class FiniteWordPath_2d_str(WordDatatype_str, FiniteWordPath_2d, FiniteWord_class):
r"""
TESTS::
sage: P = WordPaths(['a','b'],[(1,2),(3,4)])
sage: p = P('aba'); p
Path: aba
sage: type(p)
<class 'sage.combinat.words.paths.FiniteWordPath_2d_str'>
sage: p == loads(dumps(p))
True
"""
pass
class FiniteWordPath_2d_tuple(WordDatatype_tuple, FiniteWordPath_2d, FiniteWord_class):
r"""
TESTS::
sage: P = WordPaths(['a','b'],[(1,2),(3,4)])
sage: p = P(('a','b','a'));p
Path: aba
sage: type(p)
<class 'sage.combinat.words.paths.FiniteWordPath_2d_tuple'>
sage: p == loads(dumps(p))
True
"""
pass
class FiniteWordPath_2d_iter_with_caching(WordDatatype_iter_with_caching, FiniteWordPath_2d, FiniteWord_class):
pass
class FiniteWordPath_2d_iter(WordDatatype_iter, FiniteWordPath_2d, FiniteWord_class):
pass
class FiniteWordPath_2d_callable_with_caching(WordDatatype_callable_with_caching, FiniteWordPath_2d, FiniteWord_class):
pass
class FiniteWordPath_2d_callable(WordDatatype_callable, FiniteWordPath_2d, FiniteWord_class):
pass
##### Finite paths on 3d #####
class FiniteWordPath_3d_list(WordDatatype_list, FiniteWordPath_3d, FiniteWord_class):
r"""
TESTS::
sage: P = WordPaths(['a','b'],[(1,2,0),(3,4,0)])
sage: p = P(['a','b','a']);p
Path: aba
sage: type(p)
<class 'sage.combinat.words.paths.FiniteWordPath_3d_list'>
sage: p == loads(dumps(p))
True
"""
pass
class FiniteWordPath_3d_str(WordDatatype_str, FiniteWordPath_3d, FiniteWord_class):
r"""
TESTS::
sage: P = WordPaths(['a','b'],[(1,2,0),(3,4,0)])
sage: p = P('aba'); p
Path: aba
sage: type(p)
<class 'sage.combinat.words.paths.FiniteWordPath_3d_str'>
sage: p == loads(dumps(p))
True
"""
pass
class FiniteWordPath_3d_tuple(WordDatatype_tuple, FiniteWordPath_3d, FiniteWord_class):
r"""
TESTS::
sage: P = WordPaths(['a','b'],[(1,2,0),(3,4,0)])
sage: p = P(('a','b','a'));p
Path: aba
sage: type(p)
<class 'sage.combinat.words.paths.FiniteWordPath_3d_tuple'>
sage: p == loads(dumps(p))
True
"""
pass
class FiniteWordPath_3d_iter_with_caching(WordDatatype_iter_with_caching, FiniteWordPath_3d, FiniteWord_class):
pass
class FiniteWordPath_3d_iter(WordDatatype_iter, FiniteWordPath_3d, FiniteWord_class):
pass
class FiniteWordPath_3d_callable_with_caching(WordDatatype_callable_with_caching, FiniteWordPath_3d, FiniteWord_class):
pass
class FiniteWordPath_3d_callable(WordDatatype_callable, FiniteWordPath_3d, FiniteWord_class):
pass
##### Finite paths on square grid #####
class FiniteWordPath_square_grid_list(WordDatatype_list, FiniteWordPath_square_grid, FiniteWord_class):
r"""
TESTS::
sage: P = WordPaths('abcd', steps='square')
sage: p = P(['a','b','b']); p
Path: abb
sage: type(p)
<class 'sage.combinat.words.paths.FiniteWordPath_square_grid_list'>
sage: p == loads(dumps(p))
True
"""
pass
class FiniteWordPath_square_grid_str(WordDatatype_str, FiniteWordPath_square_grid, FiniteWord_class):
r"""
TESTS::
sage: P = WordPaths('abcd', steps='square')
sage: p = P('abccc'); p
Path: abccc
sage: type(p)
<class 'sage.combinat.words.paths.FiniteWordPath_square_grid_str'>
sage: p == loads(dumps(p))
True
"""
pass
class FiniteWordPath_square_grid_tuple(WordDatatype_tuple, FiniteWordPath_square_grid, FiniteWord_class):
r"""
TESTS::
sage: P = WordPaths('abcd', steps='square')
sage: p = P(('a','b','b')); p
Path: abb
sage: type(p)
<class 'sage.combinat.words.paths.FiniteWordPath_square_grid_tuple'>
sage: p == loads(dumps(p))
True
"""
pass
class FiniteWordPath_square_grid_iter_with_caching(WordDatatype_iter_with_caching, FiniteWordPath_square_grid, FiniteWord_class):
pass
class FiniteWordPath_square_grid_iter(WordDatatype_iter, FiniteWordPath_square_grid, FiniteWord_class):
pass
class FiniteWordPath_square_grid_callable_with_caching(WordDatatype_callable_with_caching, FiniteWordPath_square_grid, FiniteWord_class):
pass
class FiniteWordPath_square_grid_callable(WordDatatype_callable, FiniteWordPath_square_grid, FiniteWord_class):
pass
##### Unknown length paths on square grid (experimental) #####
#class WordPath_square_grid_iter_with_caching(WordDatatype_iter_with_caching, FiniteWordPath_square_grid, Word_class):
# pass
##### Finite paths on triangle grid #####
class FiniteWordPath_triangle_grid_list(WordDatatype_list, FiniteWordPath_triangle_grid, FiniteWord_class):
r"""
TESTS::
sage: P = WordPaths('abcdef', steps='triangle')
sage: p = P(['a','b','b']); p
Path: abb
sage: type(p)
<class 'sage.combinat.words.paths.FiniteWordPath_triangle_grid_list'>
sage: p == loads(dumps(p))
True
"""
pass
class FiniteWordPath_triangle_grid_str(WordDatatype_str, FiniteWordPath_triangle_grid, FiniteWord_class):
r"""
TESTS::
sage: P = WordPaths('abcdef', steps='triangle')
sage: p = P('abb'); p
Path: abb
sage: type(p)
<class 'sage.combinat.words.paths.FiniteWordPath_triangle_grid_str'>
sage: p == loads(dumps(p))
True
"""
pass
class FiniteWordPath_triangle_grid_tuple(WordDatatype_tuple, FiniteWordPath_triangle_grid, FiniteWord_class):
r"""
TESTS::
sage: P = WordPaths('abcdef', steps='triangle')
sage: p = P(('a','b','b')); p
Path: abb
sage: type(p)
<class 'sage.combinat.words.paths.FiniteWordPath_triangle_grid_tuple'>
sage: p == loads(dumps(p))
True
"""
pass
class FiniteWordPath_triangle_grid_iter_with_caching(WordDatatype_iter_with_caching, FiniteWordPath_triangle_grid, FiniteWord_class):
pass
class FiniteWordPath_triangle_grid_iter(WordDatatype_iter, FiniteWordPath_triangle_grid, FiniteWord_class):
pass
class FiniteWordPath_triangle_grid_callable_with_caching(WordDatatype_callable_with_caching, FiniteWordPath_triangle_grid, FiniteWord_class):
pass
class FiniteWordPath_triangle_grid_callable(WordDatatype_callable, FiniteWordPath_triangle_grid, FiniteWord_class):
pass
##### Finite paths on hexagonal grid #####
class FiniteWordPath_hexagonal_grid_list(WordDatatype_list, FiniteWordPath_hexagonal_grid, FiniteWord_class):
r"""
TESTS::
sage: P = WordPaths('abcdef', steps='hexagon')
sage: p = P(['a','b','b']); p
Path: abb
sage: type(p)
<class 'sage.combinat.words.paths.FiniteWordPath_hexagonal_grid_list'>
sage: p == loads(dumps(p))
True
"""
pass
class FiniteWordPath_hexagonal_grid_str(WordDatatype_str, FiniteWordPath_hexagonal_grid, FiniteWord_class):
r"""
TESTS::
sage: P = WordPaths('abcdef', steps='hexagon')
sage: p = P('abb'); p
Path: abb
sage: type(p)
<class 'sage.combinat.words.paths.FiniteWordPath_hexagonal_grid_str'>
sage: p == loads(dumps(p))
True
"""
pass
class FiniteWordPath_hexagonal_grid_tuple(WordDatatype_tuple, FiniteWordPath_hexagonal_grid, FiniteWord_class):
r"""
TESTS::
sage: P = WordPaths('abcdef', steps='hexagon')
sage: p = P(('a','b','b')); p
Path: abb
sage: type(p)
<class 'sage.combinat.words.paths.FiniteWordPath_hexagonal_grid_tuple'>
sage: p == loads(dumps(p))
True
"""
pass
class FiniteWordPath_hexagonal_grid_iter_with_caching(WordDatatype_iter_with_caching, FiniteWordPath_hexagonal_grid, FiniteWord_class):
pass
class FiniteWordPath_hexagonal_grid_iter(WordDatatype_iter, FiniteWordPath_hexagonal_grid, FiniteWord_class):
pass
class FiniteWordPath_hexagonal_grid_callable_with_caching(WordDatatype_callable_with_caching, FiniteWordPath_hexagonal_grid, FiniteWord_class):
pass
class FiniteWordPath_hexagonal_grid_callable(WordDatatype_callable, FiniteWordPath_hexagonal_grid, FiniteWord_class):
pass
##### Finite paths on cube grid #####
class FiniteWordPath_cube_grid_list(WordDatatype_list, FiniteWordPath_cube_grid, FiniteWord_class):
r"""
TESTS::
sage: P = WordPaths('abcdef', steps='cube')
sage: p = P(['a','b','b']); p
Path: abb
sage: type(p)
<class 'sage.combinat.words.paths.FiniteWordPath_cube_grid_list'>
sage: p == loads(dumps(p))
True
"""
pass
class FiniteWordPath_cube_grid_str(WordDatatype_str, FiniteWordPath_cube_grid, FiniteWord_class):
r"""
TESTS::
sage: P = WordPaths('abcdef', steps='cube')
sage: p = P('abb'); p
Path: abb
sage: type(p)
<class 'sage.combinat.words.paths.FiniteWordPath_cube_grid_str'>
sage: p == loads(dumps(p))
True
"""
pass
class FiniteWordPath_cube_grid_tuple(WordDatatype_tuple, FiniteWordPath_cube_grid, FiniteWord_class):
r"""
TESTS::
sage: P = WordPaths('abcdef', steps='cube')
sage: p = P(('a','b','b')); p
Path: abb
sage: type(p)
<class 'sage.combinat.words.paths.FiniteWordPath_cube_grid_tuple'>
sage: p == loads(dumps(p))
True
"""
pass
class FiniteWordPath_cube_grid_iter_with_caching(WordDatatype_iter_with_caching, FiniteWordPath_cube_grid, FiniteWord_class):
pass
class FiniteWordPath_cube_grid_iter(WordDatatype_iter, FiniteWordPath_cube_grid, FiniteWord_class):
pass
class FiniteWordPath_cube_grid_callable_with_caching(WordDatatype_callable_with_caching, FiniteWordPath_cube_grid, FiniteWord_class):
pass
class FiniteWordPath_cube_grid_callable(WordDatatype_callable, FiniteWordPath_cube_grid, FiniteWord_class):
pass
##### Finite paths on north_east #####
class FiniteWordPath_north_east_list(WordDatatype_list, FiniteWordPath_north_east, FiniteWord_class):
r"""
TESTS::
sage: P = WordPaths('ab', steps='ne')
sage: p = P(['a','b','b']); p
Path: abb
sage: type(p)
<class 'sage.combinat.words.paths.FiniteWordPath_north_east_list'>
sage: p == loads(dumps(p))
True
"""
pass
class FiniteWordPath_north_east_str(WordDatatype_str, FiniteWordPath_north_east, FiniteWord_class):
r"""
TESTS::
sage: P = WordPaths('ab', steps='ne')
sage: p = P('abb'); p
Path: abb
sage: type(p)
<class 'sage.combinat.words.paths.FiniteWordPath_north_east_str'>
sage: p == loads(dumps(p))
True
"""
pass
class FiniteWordPath_north_east_tuple(WordDatatype_tuple, FiniteWordPath_north_east, FiniteWord_class):
r"""
TESTS::
sage: P = WordPaths('ab', steps='ne')
sage: p = P(('a','b','b')); p
Path: abb
sage: type(p)
<class 'sage.combinat.words.paths.FiniteWordPath_north_east_tuple'>
sage: p == loads(dumps(p))
True
"""
pass
class FiniteWordPath_north_east_iter_with_caching(WordDatatype_iter_with_caching, FiniteWordPath_north_east, FiniteWord_class):
pass
class FiniteWordPath_north_east_iter(WordDatatype_iter, FiniteWordPath_north_east, FiniteWord_class):
pass
class FiniteWordPath_north_east_callable_with_caching(WordDatatype_callable_with_caching, FiniteWordPath_north_east, FiniteWord_class):
pass
class FiniteWordPath_north_east_callable(WordDatatype_callable, FiniteWordPath_north_east, FiniteWord_class):
pass
##### Finite paths on dyck #####
class FiniteWordPath_dyck_list(WordDatatype_list, FiniteWordPath_dyck, FiniteWord_class):
r"""
TESTS::
sage: P = WordPaths('ab', steps='dyck')
sage: p = P(['a','b','b']); p
Path: abb
sage: type(p)
<class 'sage.combinat.words.paths.FiniteWordPath_dyck_list'>
sage: p == loads(dumps(p))
True
"""
pass
class FiniteWordPath_dyck_str(WordDatatype_str, FiniteWordPath_dyck, FiniteWord_class):
r"""
TESTS::
sage: P = WordPaths('ab', steps='dyck')
sage: p = P('abb'); p
Path: abb
sage: type(p)
<class 'sage.combinat.words.paths.FiniteWordPath_dyck_str'>
sage: p == loads(dumps(p))
True
"""
pass
class FiniteWordPath_dyck_tuple(WordDatatype_tuple, FiniteWordPath_dyck, FiniteWord_class):
r"""
TESTS::
sage: P = WordPaths('ab', steps='dyck')
sage: p = P(('a','b','b')); p
Path: abb
sage: type(p)
<class 'sage.combinat.words.paths.FiniteWordPath_dyck_tuple'>
sage: p == loads(dumps(p))
True
"""
pass
class FiniteWordPath_dyck_iter_with_caching(WordDatatype_iter_with_caching, FiniteWordPath_dyck, FiniteWord_class):
pass
class FiniteWordPath_dyck_iter(WordDatatype_iter, FiniteWordPath_dyck, FiniteWord_class):
pass
class FiniteWordPath_dyck_callable_with_caching(WordDatatype_callable_with_caching, FiniteWordPath_dyck, FiniteWord_class):
pass
class FiniteWordPath_dyck_callable(WordDatatype_callable, FiniteWordPath_dyck, FiniteWord_class):
pass
|
py | b4028460ca3e52b014406018354052750545dd66 | # -*- coding: utf-8 -*-
import tempfile
import numpy as np
from pydefect.core.defect_entry import (
DefectType, DefectEntry, determine_defect_type, anchor_atom_index,
divide_defect_name)
from pydefect.util.testing import PydefectTest
from pydefect.core.config import CUTOFF_FACTOR
class DefectTypeTest(PydefectTest):
def setUp(self) -> None:
self.vacancy = DefectType.from_string("vacancy")
self.substituted = DefectType.substituted
def tests_str(self):
self.assertEqual("vacancy", str(self.vacancy))
def test__raise_error(self):
with self.assertRaises(AttributeError):
DefectType.from_string("antisite")
def test_is_defect_center_atom(self):
self.assertTrue(self.substituted.is_defect_center_atom)
class DetermineDefectTypeTest(PydefectTest):
def setUp(self) -> None:
self.vacancy = determine_defect_type(
inserted_atoms=[], removed_atoms=[{"coords": [0, 0, 0]}])
self.interstitial = determine_defect_type(
inserted_atoms=[{"coords": [0, 0, 0]}], removed_atoms=[])
self.substituted = determine_defect_type(
inserted_atoms=[{"coords": [0, 0, 0]}],
removed_atoms=[{"coords": [0, 0, 0]}])
self.complex = determine_defect_type(
inserted_atoms=[{"coords": [0, 0, 0.5]}],
removed_atoms=[{"coords": [0, 0, 0]}])
self.complex2 = determine_defect_type(
inserted_atoms=[{"coords": [0, 0, 0]}],
removed_atoms=[{"coords": [0, 0, 0]}, {"coords": [0, 0, 0.5]}])
def test(self):
self.assertEqual(DefectType.vacancy, self.vacancy)
self.assertEqual(DefectType.interstitial, self.interstitial)
self.assertEqual(DefectType.substituted, self.substituted)
self.assertEqual(DefectType.complex, self.complex)
self.assertEqual(DefectType.complex, self.complex2)
def test_raise_error(self):
with self.assertRaises(ValueError):
determine_defect_type(inserted_atoms=[], removed_atoms=[])
class DefectEntryTest(PydefectTest):
def setUp(self):
""" """
# DefectEntry class object for a single vacancy
name = "Va_O1"
defect_type = DefectType.from_string("vacancy")
self.initial_structure_vacancy = \
self.get_structure_by_name(name="MgO64atoms-Va_O_0-unrelaxed")
perturbed_initial_structure = self.initial_structure_vacancy.copy()
removed_atoms = [{"element": "O",
"index": 32,
"coords": [0.25, 0, 0]}]
inserted_atoms = []
changes_of_num_elements = {"O": -1}
charge = 2
initial_site_symmetry = "m-3m"
multiplicity = 32
neighboring_sites = [0, 4, 16, 17, 24, 26]
cutoff = round(8.419456 / 4 * CUTOFF_FACTOR, 2)
self.MgO_Va_O1_2 = \
DefectEntry(name=name,
defect_type=defect_type,
initial_structure=self.initial_structure_vacancy,
perturbed_initial_structure=perturbed_initial_structure,
removed_atoms=removed_atoms,
inserted_atoms=inserted_atoms,
changes_of_num_elements=changes_of_num_elements,
charge=charge,
initial_site_symmetry=initial_site_symmetry,
cutoff=cutoff,
neighboring_sites=neighboring_sites,
multiplicity=multiplicity)
name = "complex"
defect_type = DefectType.from_string("complex")
self.initial_structure_complex = \
self.get_structure_by_name(name="MgO64atoms-Va_Mg+Va_O+Ca_i")
perturbed_initial_structure = self.initial_structure_complex.copy()
removed_atoms = [{"element": "Mg",
"index": 0,
"coords": [0, 0, 0]},
{"element": "O",
"index": 32,
"coords": [0.25, 0, 0]}]
inserted_atoms = [{"element": "Ca",
"index": 0,
"coords": [0.125, 0, 0]}]
changes_of_num_elements = {"Mg": -1, "Ca": 1, "O": -1}
charge = 2
initial_site_symmetry = "4mm"
multiplicity = 192
neighboring_sites = [16, 17, 24, 26, 47, 48, 55, 57]
cutoff = round(8.419456 / 4 * CUTOFF_FACTOR, 2)
self.MgO_complex = \
DefectEntry(name=name,
defect_type=defect_type,
initial_structure=self.initial_structure_complex,
perturbed_initial_structure=perturbed_initial_structure,
removed_atoms=removed_atoms,
inserted_atoms=inserted_atoms,
changes_of_num_elements=changes_of_num_elements,
charge=charge,
initial_site_symmetry=initial_site_symmetry,
cutoff=cutoff,
neighboring_sites=neighboring_sites,
multiplicity=multiplicity)
def test_from_defect_structure(self):
perfect_structure = self.get_structure_by_name(name="MgO64atoms")
expected = self.MgO_Va_O1_2.as_dict()
actual = DefectEntry.from_defect_structure(
defect_structure=self.initial_structure_vacancy,
perfect_structure=perfect_structure,
defect_name="Va_O1_2").as_dict()
for d in expected:
self.assertEqual(expected[d], actual[d])
def test_from_defect_structure_complex(self):
perfect_structure = self.get_structure_by_name(name="MgO64atoms")
expected = self.MgO_complex.as_dict()
actual = DefectEntry.from_defect_structure(
defect_structure=self.initial_structure_complex,
perfect_structure=perfect_structure,
defect_name="complex_2").as_dict()
for d in expected:
self.assertEqual(expected[d], actual[d])
def test_msonable(self):
self.assertMSONable(self.MgO_Va_O1_2)
self.assertMSONable(self.MgO_complex)
def test_dict_round_trip(self):
""" round trip test of as_dict and from_dict """
expected = self.MgO_Va_O1_2.as_dict()
actual = DefectEntry.from_dict(expected).as_dict()
self.assertEqual(expected, actual)
def test_json_round_trip(self):
""" round trip test of to_json and from_json """
tmp_file = tempfile.NamedTemporaryFile()
self.MgO_Va_O1_2.to_json_file(tmp_file.name)
expected = self.MgO_Va_O1_2.as_dict()
actual = DefectEntry.load_json(tmp_file.name).as_dict()
self.assertTrue(expected, actual)
def test_atom_mapping_to_perfect(self):
expected = list(range(64))
expected.pop(32)
actual = self.MgO_Va_O1_2.atom_mapping_to_perfect
self.assertEqual(expected, actual)
def test_atom_mapping_to_perfect_complex(self):
expected = list(range(64))
expected.pop(32)
expected.pop(0)
expected = [None] + expected
actual = self.MgO_complex.atom_mapping_to_perfect
self.assertEqual(expected, actual)
def test_defect_center(self):
expected = [0.25, 0, 0]
actual = self.MgO_Va_O1_2.defect_center_coords
self.assertArrayEqual(expected, actual)
def test_defect_center_complex(self):
expected = [0.125, 0, 0]
actual = self.MgO_complex.defect_center_coords
self.assertArrayEqual(expected, actual)
def test_anchor_atom_index(self):
expected = 38 # Line 47 [0.75, 0.5, 0.5]
actual = self.MgO_Va_O1_2.anchor_atom_index
self.assertEqual(actual, expected)
def test_anchor_atom_index_complex(self):
expected = [8, 38] # 8 or 38
actual = self.MgO_complex.anchor_atom_index
self.assertTrue(actual in expected)
class AnchorAtomIndexTest(PydefectTest):
def test(self):
structure = self.get_structure_by_name(name="KZn4P3")
actual = anchor_atom_index(structure, [0.5, 0.5, 0.5])
self.assertEqual(7, actual)
class DivideDirnameTest(PydefectTest):
def setUp(self):
# DefectEntry class object for a single vacancy
self.dirname1 = "Va_Mg1_2"
self.dirname2 = "Va_O1_2_inward"
self.dirname3 = "Mg_i+Va_O1*2_-2_coord1"
def test_dirname1(self):
self.assertEqual(("Va_Mg1", 2, None), divide_defect_name(self.dirname1))
def test_dirname2(self):
self.assertEqual(("Va_O1", 2, "inward"),
divide_defect_name(self.dirname2))
def test_dirname3(self):
self.assertEqual(("Mg_i+Va_O1*2", -2, "coord1"),
divide_defect_name(self.dirname3))
|
py | b4028465e34747d60fac8c6a1339f7badcfa091b | import os
# Django settings for fvserver project.
PROJECT_DIR = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), os.path.pardir))
ENCRYPTED_FIELD_KEYS_DIR = os.path.join(PROJECT_DIR, 'keyset')
DEBUG = False
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': os.path.join(PROJECT_DIR, 'crypt.db'), # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Europe/London'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-gb'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PROJECT_DIR, 'static')
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
# deprecated in Django 1.4, but django_wsgiserver still looks for it
# when serving admin media
ADMIN_MEDIA_PREFIX = '/static_admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_DIR, 'site_static'),
)
LOGIN_URL='/login/'
LOGIN_REDIRECT_URL='/'
ALLOWED_HOSTS = ['*']
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '6%y8=x5(#ufxd*+d+-ohwy0b$5z^cla@7tvl@n55_h_cex0qat'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
# insert your TEMPLATE_DIRS here
os.path.join(PROJECT_DIR, 'templates'),
],
'APP_DIRS':True,
'OPTIONS': {
'context_processors': [
# Insert your TEMPLATE_CONTEXT_PROCESSORS here or use this
# list if you haven't customized them:
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
'debug': DEBUG,
},
},
]
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'fvserver.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'fvserver.wsgi.application'
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
'django.contrib.admindocs',
'server',
'bootstrap3',
'django_extensions',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
py | b40284e829aaa940c6d9ac35bbf37427f87e3b98 | """
To serve with uwsgi:
uwsgi --http 0.0.0.0:8002 --manage-script-name --mount /=server_flask:app
To serve with python:
python server_flask.py 8002
(then visit http://localhost:8002/static/index.html in your browser)
"""
import os, sys, posixpath
import traceback
import logging
import pkg_resources
from flask import Flask, request, make_response, redirect, send_from_directory
from werkzeug.exceptions import HTTPException
import msgpack as msgpack_converter
def create_app(config=None):
from web_gui import api
RPC_ENDPOINT = '/RPC2'
STATIC_PATH = pkg_resources.resource_filename('web_gui', 'static/')
app = Flask(__name__, static_folder=STATIC_PATH, static_url_path='/static')
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
@app.route('/')
def root():
return redirect("static/index.html")
@app.route('/robots.txt')
def static_from_root():
return send_from_directory(app.static_folder, request.path[1:])
@app.errorhandler(Exception)
def handle_error(e):
code = 500
if isinstance(e, HTTPException):
code = e.code
content = {'exception': repr(e), 'traceback': traceback.format_exc()}
logging.info(content['traceback'])
return make_response(msgpack_converter.packb(content, use_bin_type=True), code)
def wrap_method(mfunc):
def wrapper(*args, **kwargs):
real_kwargs = request.get_json() if request.get_data() else {}
content = mfunc(*args, **real_kwargs)
packed = msgpack_converter.packb(content, use_bin_type=True)
response = make_response(packed)
response.headers['Content-Type'] = 'application/msgpack'
return response
return wrapper
api.initialize(config)
for method in api.api_methods:
mfunc = getattr(api, method)
wrapped = wrap_method(mfunc)
path = posixpath.join(RPC_ENDPOINT, method)
shortpath = posixpath.join("/", method)
app.add_url_rule(path, path, wrapped, methods=["POST"])
app.add_url_rule(shortpath, shortpath, wrapped, methods=["POST"])
from dataflow.rev import print_revision
print_revision()
return app
if __name__ == '__main__':
logging.basicConfig(level=logging.WARNING)
port = 8002
if len(sys.argv) > 1:
port = int(sys.argv[1])
app = create_app()
app.run(port=port)
|
py | b402866175f1e35cf6fc24bcbee8f5e3a15f18cc | """
ASGI config for YAInstagram project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'YAInstagram.settings')
application = get_asgi_application()
|
py | b40286f162eaa421c70ed3df68fce5ad5648516a | #!/usr/bin/env python
# coding=utf-8
from __future__ import division, print_function, unicode_literals
from sacred.config.config_scope import ConfigScope
SIX = 6
@ConfigScope
def cfg():
answer = 7 * SIX
@ConfigScope
def cfg2():
answer = 6 * SEVEN
|
py | b40287e16e63629f066a7c5a22a1d35258226e29 | # Copyright (c) 2021 PPViT Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration
Configuration for data, model archtecture, and training, etc.
Config can be set by .yaml file or by argparser(limited usage)
"""
import os
from yacs.config import CfgNode as CN
import yaml
_C = CN()
_C.BASE = ['']
# data settings
_C.DATA = CN()
_C.DATA.BATCH_SIZE = 256 #256 # train batch_size for single GPU
_C.DATA.BATCH_SIZE_EVAL = 8 #64 # val batch_size for single GPU
_C.DATA.DATA_PATH = '/dataset/imagenet/' # path to dataset
_C.DATA.DATASET = 'imagenet2012' # dataset name
_C.DATA.IMAGE_SIZE = 224 # input image size: 224 for pretrain, 384 for finetune
_C.DATA.CROP_PCT = 0.875 # input image scale ratio, scale is applied before centercrop in eval mode
_C.DATA.NUM_WORKERS = 2 # number of data loading threads
# model settings
_C.MODEL = CN()
_C.MODEL.TYPE = 'VOLO'
_C.MODEL.NAME = 'VOLO'
_C.MODEL.RESUME = None
_C.MODEL.PRETRAINED = None
_C.MODEL.NUM_CLASSES = 1000
_C.MODEL.DROPOUT = 0.1
_C.MODEL.DROPPATH = 0.1
_C.MODEL.ATTENTION_DROPOUT = 0.0
_C.MODEL.STEM_HIDDEN_DIM = 128
# transformer settings
_C.MODEL.TRANS = CN()
_C.MODEL.TRANS.PATCH_SIZE = 32
_C.MODEL.TRANS.LAYERS = [12, 12, 20, 4]
_C.MODEL.TRANS.EMBED_DIMS = [384, 768, 768, 768]
_C.MODEL.TRANS.MLP_RATIOS = [4, 4, 4, 4]
_C.MODEL.TRANS.DOWNSAMPLES = [True, False, False, False]
_C.MODEL.TRANS.OUTLOOK_ATTENTION = [True, False, False, False]
_C.MODEL.TRANS.NUM_HEADS = [12, 16, 16, 16]
_C.MODEL.TRANS.QKV_BIAS = False
_C.MODEL.TRANS.QK_SCALE = False
# training settings
_C.TRAIN = CN()
_C.TRAIN.LAST_EPOCH = 0
_C.TRAIN.NUM_EPOCHS = 300
_C.TRAIN.WARMUP_EPOCHS = 3 #34 # ~ 10k steps for 4096 batch size
_C.TRAIN.WEIGHT_DECAY = 0.05 #0.3 # 0.0 for finetune
_C.TRAIN.BASE_LR = 0.001 #0.003 for pretrain # 0.03 for finetune
_C.TRAIN.WARMUP_START_LR = 1e-6 #0.0
_C.TRAIN.END_LR = 5e-4
_C.TRAIN.GRAD_CLIP = 1.0
_C.TRAIN.ACCUM_ITER = 2 #1
_C.TRAIN.LR_SCHEDULER = CN()
_C.TRAIN.LR_SCHEDULER.NAME = 'warmupcosine'
_C.TRAIN.LR_SCHEDULER.MILESTONES = "30, 60, 90" # only used in StepLRScheduler
_C.TRAIN.LR_SCHEDULER.DECAY_EPOCHS = 30 # only used in StepLRScheduler
_C.TRAIN.LR_SCHEDULER.DECAY_RATE = 0.1 # only used in StepLRScheduler
_C.TRAIN.OPTIMIZER = CN()
_C.TRAIN.OPTIMIZER.NAME = 'AdamW'
_C.TRAIN.OPTIMIZER.EPS = 1e-8
_C.TRAIN.OPTIMIZER.BETAS = (0.9, 0.999) # for adamW
_C.TRAIN.OPTIMIZER.MOMENTUM = 0.9
# misc
_C.SAVE = "./output"
_C.TAG = "default"
_C.SAVE_FREQ = 10 # freq to save chpt
_C.REPORT_FREQ = 50 # freq to logging info
_C.VALIDATE_FREQ = 100 # freq to do validation
_C.SEED = 0
_C.EVAL = False # run evaluation only
_C.LOCAL_RANK = 0
_C.NGPUS = -1
def _update_config_from_file(config, cfg_file):
config.defrost()
with open(cfg_file, 'r') as infile:
yaml_cfg = yaml.load(infile, Loader=yaml.FullLoader)
for cfg in yaml_cfg.setdefault('BASE', ['']):
if cfg:
_update_config_from_file(
config, os.path.join(os.path.dirname(cfg_file), cfg)
)
print('merging config from {}'.format(cfg_file))
config.merge_from_file(cfg_file)
config.freeze()
def update_config(config, args):
"""Update config by ArgumentParser
Args:
args: ArgumentParser contains options
Return:
config: updated config
"""
if args.cfg:
_update_config_from_file(config, args.cfg)
config.defrost()
if args.dataset:
config.DATA.DATASET = args.dataset
if args.batch_size:
config.DATA.BATCH_SIZE = args.batch_size
if args.image_size:
config.DATA.IMAGE_SIZE = args.image_size
if args.data_path:
config.DATA.DATA_PATH = args.data_path
if args.ngpus:
config.NGPUS = args.ngpus
if args.eval:
config.EVAL = True
config.DATA.BATCH_SIZE_EVAL = args.batch_size
if args.pretrained:
config.MODEL.PRETRAINED = args.pretrained
if args.resume:
config.MODEL.RESUME = args.resume
if args.last_epoch:
config.TRAIN.LAST_EPOCH = args.last_epoch
#config.freeze()
return config
def get_config(cfg_file=None):
"""Return a clone of config or load from yaml file"""
config = _C.clone()
if cfg_file:
_update_config_from_file(config, cfg_file)
return config
|
py | b402883ba0163968f84dc957a08c56e7f97cede8 | from django.apps import AppConfig
class EmployeeappConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'EmployeeApp'
|
py | b402889f28b904e9ad8f26033145ad9f7a4dd806 | expressao = str(input('Digite uma expressão qualquer que use parenteses: ')).strip()
# lista = expressao.split()
# print(lista)
if '(' or ')' in expressao:
if expressao.count('(') == expressao.count(')'):
print('OK')
else:
print('Sua expressão está errada!') |
py | b402891b58e1edf33d9d13358b1c63a696f48737 | from RFEM.initModel import Model, clearAtributes, ConvertToDlString
class Opening():
def __init__(self,
no: int = 1,
lines_no: str = '1 2 3 4',
comment: str = '',
params: dict = None,
model = Model):
'''
Args:
no (int): Opening Tag
lines_no (str): Tags of Lines defining Opening
comment (str, optional): Comments
params (dict, optional): Any WS Parameter relevant to the object and its value in form of a dictionary
'''
# Client model | Opening
clientObject = model.clientModel.factory.create('ns0:opening')
# Clears object atributes | Sets all atributes to None
clearAtributes(clientObject)
# Opening No.
clientObject.no = no
# Boundary Lines No.
clientObject.boundary_lines = ConvertToDlString(lines_no)
# Comment
clientObject.comment = comment
# Adding optional parameters via dictionary
if params:
for key in params:
clientObject[key] = params[key]
# Add Opening to client model
model.clientModel.service.set_opening(clientObject)
|
py | b4028a9b00ece3179c0e955536f7b4de6ebf9c4b | # -*- coding: utf-8 -*-
"""
Combine the stresses
@revision: 0
@author: etodorov
@date: 14 Feb 16
"""
#software imports
import sys
from base import TestProblem, Solver
#mathematical imports
import numpy as np
inputList=[ 'sigma_z_r', 'sigma_z_f',
'tau_totr', 'tau_totf',
'xr', 'yr', 'z']
outputList = ["vonMisesStress_ring", 'vonMisesStress_floor' ]
class MisesStress(Solver):
def solve(self,Problem):
"""
**Stress combiner**
\n
sigma_z is a fucntion of (x,y,z);
tau_tot is a 2d array in directions (theta, z);
\n
vonMisesStresses are 2D arrays (z, xsectional)
"""
for i in self.inputList:
setattr(sys.modules[__name__], i, getattr(Problem, i))
#YOUR CODE STARTS HERE
vonMisesStress_ring = np.sqrt(sigma_z_r**2+3*tau_totr**2)
vonMisesStress_floor= np.sqrt(sigma_z_f**2+3*tau_totf**2)
#YOUR CODE ENDS HERE
for o in self.outputList:
if o in locals():
setattr(Problem, o, locals()[o])
else:
print "WARNING: missing output ",o
mohr = MisesStress(inputList,outputList)
#TODO: add unit tests
if __name__=="__main__": #exectutes only when you run THIS file
tp1 = TestProblem()
# provide all inputs in this shape
tp1.sigma_z_r = 526000000
tp1.sigma_z_f = 0
tp1.tau_totr = 100
tp1.tau_totf = 0
tp1.xr = 0
tp1.yr = 0
tp1.z = 0
#execute the solving routine
mohr.solve(tp1)
#verify that outputs agree with expectation
print tp1.vonMisesStress_ring
print tp1.vonMisesStress_floor
|
py | b4028aa6e517ea8a26077c174cda301370df696e | from .registry import (Serialiser, Registry)
import base64
import pickle
class PickleString(Serialiser):
def __init__(self, cls):
super(PickleString, self).__init__(cls)
def encode(self, obj, make_rec):
data = base64.b64encode(pickle.dumps(obj)).decode('ascii')
return make_rec(data)
def decode(self, cls, data):
return pickle.loads(
base64.b64decode(data.encode('ascii')))
def registry():
"""Returns a serialisation registry that "just pickles everything".
This registry can be used to bolt-on other registries and keep the
pickle as the default. The objects are first pickled to a byte-array,
which is subsequently encoded with base64."""
return Registry(
default=PickleString(object)
)
|
py | b4028aaf9eebf99f6293ba1b3b44712208992dae | # Generated by Django 2.0.2 on 2018-02-19 14:14
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('interface', '0006_auto_20180216_1355'),
]
operations = [
migrations.RemoveField(
model_name='device',
name='user',
),
migrations.AddField(
model_name='device',
name='users',
field=models.ManyToManyField(db_index=True, default=(), to=settings.AUTH_USER_MODEL),
),
]
|
py | b4028ab1616ed4e42c54dee977ec853c2ac53669 | # You can external libraris, as long as they are in the 'requirements.txt' file
import numpy as np
# This is for syntax highlighting and auto completion of RedisGears stuff
# during development.
# I.e: 'GearsBuilder' and its functions.
# It can be removed for final deployments, but can also be left as is.
from redgrease import GearsBuilder
def foo(_):
pass
rg = np.random.default_rng(1)
# import matplotlib.pyplot as plt
# Build a vector of 10000 normal deviates with variance 0.5^2 and mean 2
mu, sigma = 2, 0.5
v = rg.normal(mu, sigma, 10000)
# Plot a normalized histogram with 50 bins
# plt.hist(v, bins=50, density=1) # matplotlib version (plot)
# Compute the histogram with numpy and then plot it
(n, bins) = np.histogram(v, bins=50, density=True) # NumPy version (no plot)
# plt.plot(.5*(bins[1:]+bins[:-1]), n)
gb = GearsBuilder("CommandReader")
gb.map(foo)
gb.register(trigger="video")
|
py | b4028ba26d4f8028818d5eb405a516a8e17aed57 | """
Description - Week 1 homework for 1mwtt program
Author - Cristina Tarantino
Date - July 2018
"""
from datetime import datetime
from random import randint
days_in_year = 365
# year leap. Source https://en.wikipedia.org/wiki/Year#Variation_in_the_length_of_the_year_and_the_day
days_in_year_leap = 365.2422
# 60 minutes/hour * 24 hours/day
minutes_in_a_day = 60 * 24
# 60 seconds/minute * 60 minutes/hour * 24 hours/day
seconds_in_a_day = 60 * 60 * 24
seconds_in_a_year = seconds_in_a_day * days_in_year
seconds_in_a_year_leap = seconds_in_a_day * days_in_year_leap
seconds_in_milliseconds = 1000
# 1. Hours in a year. How many hours are in a year?
# a day has 24h and a year has 365 days
# therefore
# 1 common year = 365 days = (365 days) times (24 hours/day) = 8760 hours
print("\nHours in a year: " + str(24 * days_in_year))
# 2. Minutes in a decade. How many minutes are in a decade?
# 60 (minutes in 1 hour) times 24 (hours in a day) times 365 times 10 = 5256000 (Integer)
print("\nMinutes in a decade: " + str(minutes_in_a_day * days_in_year * 10))
# If we want to be more accurate though we should know that
# a year is actually 365.2422 making the calculation = to 5259487.68 (Float)
# source https://en.wikipedia.org/wiki/Year#Variation_in_the_length_of_the_year_and_the_day
print("\nMinutes in a decade considering leaps: " + str(minutes_in_a_day * days_in_year_leap * 10))
# 3. Your age in seconds. How many seconds old are you?
# 60 seconds/minutes * 60 minutes/hours * 24 hours/days * 365.2422 days/year * 32 year
my_age = 32
print("\nMy age in seconds: " + str(seconds_in_a_year_leap * my_age))
# 4. Andreea is 48618000 seconds old. Calculate her age
# example showing use of escape characters
andreea_seconds_old = 48618000
print('\nAndreea\'s age: ' + str(andreea_seconds_old / seconds_in_a_year_leap))
# https://github.com/1millionwomentotech/toolkitten/issues/35
print("\nAndreea's corrected age: " + str(andreea_seconds_old / seconds_in_a_year_leap * 24))
# 5. How many days does it take for a 32-bit system to timeout, if it has a bug with integer overflow?
# The Binary Register Width of a processor determines the range of values that can be represented.
# The maximum representable value for a 32-bit system will be 2^32-1
# When an arithmetic operation (in this case the increment of a millisecond in the time)
# produces a result larger than the above we will have an `integer overflow`
# To calculate the days it will take to reach that situation for a 32-bit system
# we need to convert 2^32 milliseconds in days by dividing by 1000s then 60s then 60m 24h
# source https://en.wikipedia.org/wiki/Integer_overflow
max_value_32 = pow(2, 32)
print("\nDays it will take for a 32-bit system to timeout: " + str(
max_value_32 / seconds_in_milliseconds / seconds_in_a_day))
# 6. How many days does it take for a 64-bit system to timeout, if it has a bug with integer overflow?
# The Binary Register Width of a processor determines the range of values that can be represented.
# The maximum representable value for a 32-bit system will be 2^64-1
# When an arithmetic operation (in this case the increment of a millisecond in the time)
# produces a result larger than the above we will have an `integer overflow`
# To calculate the days it will take to reach that situation for a 64-bit system
# we need to convert 2^64 milliseconds in days by dividing by 1000s then 60s then 60m 24h
# source https://en.wikipedia.org/wiki/Integer_overflow
max_value_64 = pow(2, 64)
print("\nDays it will take for a 64-bit system to timeout: " + str(
max_value_64 / seconds_in_milliseconds / seconds_in_a_day))
# 7. Calculate your age accurately based on your birthday
# https://docs.python.org/3/library/datetime.html#datetime.timedelta.total_seconds
# https://docs.python.org/3/library/stdtypes.html#printf-style-string-formatting
# example showing %s string variable
delta = datetime.now() - datetime(1986, 12, 8, 18, 45)
print("\nMy age is %d seconds" % delta.total_seconds())
# or
# print("\nMy age is {} seconds".format(delta.total_seconds()))
# 8. Full name greeting. Write a program that asks for a person's first name, then middle, and then last.
# Finally, it should greet the person using their full name.
name = input("\nCould you please type your first name: ")
middle_name = input("Could you please type your middle name: ")
last_name = input("Could you please type your last name: ")
print("\nHello %s %s %s! A really warm welcome to you!" % (name, middle_name, last_name))
# 9. Bigger, better favorite number. Write a program that asks for a person's favorite number.
# Have your program add 1 to the number, and then suggest the result as a bigger and better favorite number.
print("\nEXERCISE Bigger, better favorite number")
# infinite loop
while True:
# try to convert the input in an integer
try:
favorite_number = int(input("\n" + name + " could you please type your favourite number? "))
# if it is not possible acknowledge the user and continue to prompt him to insert a number
except ValueError:
print("That wasn't a number!")
continue
# else execute the input manipulation and break the infinite loop
else:
big_favorite_number = str(favorite_number + 1)
print("I have for you a bigger and better favourite number. What about a nice %s." % big_favorite_number)
break
# 10. Angry boss. Write an angry boss program that rudely asks what you want.
# Whatever you answer, the angry boss should yell it back to you and then fire you.
print("\nEXERCISE Angry boss")
answer = input("\n" + name + " what the hell do you want? ")
print(("whaddaya mean \"" + answer + "\"?!? you're fired!!").upper())
# 11. Table of contents. Here's something for you to do in order to play around more with center, ljust, and rjust:
# write a program that will display a table of contents so that it looks like this:
# Table of Contents
#
# Chapter 1: Getting Started page 1
# Chapter 2: Numbers page 9
# Chapter 3: Letters page 13
print("\nEXERCISE Table of contents")
rows = [
["\nTable of Contents", "\n"],
["Chapter 1: Getting Started", "page 1"],
["Chapter 2: Numbers", "page 9"],
["Chapter 3: Letters", "page 13"]
]
# get the length of the longest world from each row in rows and for each word in row + some padding
col_width = max(len(r[0]) for r in rows) + 10 # padding
# for every row in rows
for r in rows:
# print the first word of the row leaving empty spaces to fill up the column width and then print the second element
print(r[0].ljust(col_width) + r[1])
# 12. Write a program that prints out the lyrics to that beloved classic, "99 Bottles of Beer on the Wall."
# source http://www.99-bottles-of-beer.net/lyrics.html
print("\nEXERCISE \"99 Bottles of Beer on the Wall.\"")
BEER_TOTAL = 99
# print the lyrics title
print("\n", (" Lyrics of the song %s Bottles of Beer " % BEER_TOTAL).center(50, "🍺"), "\n")
for i in range(BEER_TOTAL, 0, -1):
# print the lyrics in the loop from 99 to 0
print("\n", i, "bottles of beer on the wall,", i, "bottles of beer."
"\nTake one down and pass it around,", i - 1, "bottles of beer on the wall.\n")
# print the end of the lyrics
print("No more bottles of beer on the wall, no more bottles of beer."
"\nGo to the store and buy some more,", BEER_TOTAL, "bottles of beer on the wall.")
# 13. Deaf grandma.
# Whatever you say to Grandma (whatever you type in), she should respond with this: HUH?! SPEAK UP, GIRL!
# unless you shout it (type in all capitals). If you shout, she can hear you (or at least she thinks so) and yells back:
# NO, NOT SINCE 1938!
# To make your program really believable, have Grandma shout a different year each time,
# maybe any year at random between 1930 and 1950.
# You can't stop talking to Grandma until you shout BYE.
print("\nEXERCISE Deaf grandma")
tell_grandma = ""
while tell_grandma != "BYE":
tell_grandma = input("Tell something to Grandma: ")
# if tell_grandma.isupper() and not tell_grandma.islower(): => this would be semantically more correct however
# I think that the above method will scan the string tell_grandma twice whilst the one below only once
if tell_grandma == tell_grandma.upper():
random_year = randint(1930, 1950)
print("NO, NOT SINCE %s" % random_year)
else:
print("HUH?! SPEAK UP, GIRL!")
# 14. Deaf grandma extended. What if Grandma doesn't want you to leave?
# When you shout BYE, she could pretend not to hear you.
# Change your previous program so that you have to shout BYE three times in a row.
# Make sure to test your program: if you shout BYE three times but not in a row, you should still be talking to Grandma.
print("\nEXERCISE Deaf grandma extended")
tell_grandma_extended = ""
num_bye = 0
while num_bye < 3:
tell_grandma_extended = input("Tell something to Grandma: ")
# if tell_grandma.isupper() and not tell_grandma.islower(): => this would be semantically more correct however
# I think that the above method will scan the string tell_grandma twice whilst the one below only once
if tell_grandma_extended == tell_grandma_extended.upper():
if tell_grandma_extended == "BYE":
num_bye = num_bye + 1
else:
num_bye = 0
random_year = randint(1930, 1950)
print("NO, NOT SINCE %s" % random_year)
else:
num_bye = 0
print("HUH?! SPEAK UP, GIRL!")
if num_bye == 3:
print("GOODBYE HONEY!!! SEE YOU SOON! I LOVE YOU!")
# 15. Leap years.
# Write a program that asks for a starting year and an ending year and
# then puts all the leap years between them (and including them,
# if they are also leap years). Leap years are years divisible by 4 (like 1984 and 2004).
# However, years divisible by 100 are not leap years (such as 1800 and 1900)
# unless they are also divisible by 400 (such as 1600 and 2000, which were in fact leap years). What a mess!
print("\nEXERCISE Leap years ")
print("\nLet's find leap years. Type a range of two years to find all the leap years in the range.")
loop_years = []
# infinite loop
while True:
# try to convert the input in an integer
try:
year_one = int(input("\nPlease type the starting year: "))
year_two = int(input("\nPlease type the ending year: "))
# check that year one is minor of year two
if year_one >= year_two:
raise ValueError("\nThe starting year must be greater than the ending year!")
# if it is not possible acknowledge the user and continue to prompt her to insert a number
except ValueError as error:
if error:
print(error)
else:
print("\nThat wasn't a valid year!")
continue
# else execute the input manipulation and break the infinite loop
else:
current_year = year_one
while current_year <= year_two:
if current_year % 400 == 0 or (current_year % 4 == 0 and current_year % 100 != 0):
loop_years.append(current_year)
current_year += 1
for y in loop_years:
print(y)
break
# 16. Find something today in your life, that is a calculation.
# I had a JavaScript interview today and the question was to perform a left rotation operation on an array.
# For example, if left rotations are performed on array, then the array would become .
# For example, if 2 left rotations are performed on an array [1, 2, 3, 4, 5],
# then the array would become [3, 4, 5, 1, 2].
# Here is my algorithm:
print("\nEXERCISE FROM YOUR LIFE")
def rotate_left(array, rotations_num):
return array[rotations_num:] + array[:rotations_num]
# O(n) complexity alternative
# def rotate_left(array, rotations_num):
# a_length = len(array)
# new_array = [None]*a_length
# pos_to_left = rotations_num
#
# i = 0
# while i < a_length:
# pos_to_left = pos_to_left if pos_to_left != 0 else a_length
# to_index = a_length - pos_to_left
# new_array[to_index] = array[i]f
# pos_to_left -= 1
# i += 1
#
# return new_array
# O(n) complexity alternative suggested by mentor
# The method above is the mere translation from JS to Python.
# In Python array[-2] is a valid operation as lists are circular linked list (I presume)
# In JS array[-2] is not possible so you have to reset the index
# In Python therefore the function would be the below
# def rotate_left(array, rotations_num):
# a_length = len(array)
# new_array = [None] * a_length
# for i in range(a_length):
# print(i - rotations_num)
# new_array[i-rotations_num] = array[i]
# return new_array
print("\nRotate the following array [1, 2, 3, 4, 5] of 2 position to the left")
print(rotate_left([1, 2, 3, 4, 5], 2))
print("\nRotate the following array [1, 2, 3, 4, 5] of 4 position to the left")
print(rotate_left([1, 2, 3, 4, 5], 4))
print("\nRotate the following array [1, 2, 3, 4, 5] of 5 position to the left")
print(rotate_left([1, 2, 3, 4, 5], 5))
print("\nRotate the following array [1, 2, 3, 4, 5] of 6 position to the left")
print(rotate_left([1, 2, 3, 4, 5], 6))
# 17. Building and sorting an array. Write the program that asks us to type as many words as we want
# (one word per line, continuing until we just press Enter on an empty line)
# and then repeats the words back to us in alphabetical order. Make sure to test your program thoroughly; for example,
# does hitting Enter on an empty line always exit your program? Even on the first line? And the second?
# Hint: There’s a lovely array method that will give you a sorted version of an array: sorted(). Use it!
print("\nEXERCISE Building and sorting an array")
word_list = []
user_word = input("\nPlease type as many words as you want one word per line, "
"continuing until you press Enter on an empty line "
"and I will repeat them to you in alphabetical order: ")
while user_word != '':
word_list.append(user_word)
user_word = input()
print(sorted(word_list))
# 18. Table of contents. Write a table of contents program here.
# Start the program with a list holding all of the information for your table of contents
# (chapter names, page numbers, and so on).
# Then print out the information from the list in a beautifully formatted table of contents.
# Use string formatting such as left align, right align, center.
print("\nEXERCISE Table of contents with function and info array")
def print_contents(contents_list):
# get the length of the longest world from each row in rows and for each word in row + some padding
col_width = max(len(r[1]) for r in contents_list) + 10 # padding
print("\nTable of Contents\n")
for c in contents_list:
print("Chapter " + c[0] + ": " + c[1].ljust(col_width) + "page " + c[2])
contents_table = [
["1", "Getting Started", "1"],
["2", "Numbers", "9"],
["3", "Letters", "13"],
]
print_contents(contents_table)
# 19. Write a function that prints out "moo" n times.
def get_input_number(callback, msg):
# try to convert the input in an integer
try:
user_number = int(input(msg))
# if it is not possible acknowledge the user and continue to prompt him to insert a number
except ValueError:
print("\nThat wasn't a valid number!")
return get_input_number(callback, msg)
# else execute the input manipulation and break the infinite loop
else:
return callback(user_number)
print("\nEXERCISE moo")
def moo(number):
print("\nmoo" * number)
get_input_number(moo, "\nPlease type how many times you want to 'moo': ")
# 20. Old-school Roman numerals. In the early days of Roman numerals,
# the Romans didn't bother with any of this new-fangled subtraction “IX” nonsense.
# No Mylady, it was straight addition, biggest to littlest—so 9 was written “VIIII,” and so on.
# Write a method that when passed an integer between 1 and 3000 (or so) returns a string containing
# the proper old-school Roman numeral. In other words, old_roman_numeral 4 should return 'IIII'.
# Make sure to test your method on a bunch of different numbers.
#
# Hint: Use the integer division and modulus methods.
#
# For reference, these are the values of the letters used:
# I = 1
# V = 5
# X = 10
# L = 50
# C = 100
# D = 500
# M = 1000
print("\nEXERCISE Old-school Roman numerals")
def old_romans(number):
result = ''
decimal = [1000, 500, 100, 50, 10, 5, 1]
roman = ["M", "D", "C", "L", "X", "V", "I"]
# looping over every element of our arrays
for i in range(len(decimal)):
# keep trying the same number until we need to move to a smaller one
while number%decimal[i] < number:
# add the matching roman number to our result string
result += roman[i]
# subtract the decimal value of the roman number from our number
number -= decimal[i]
return result
print(get_input_number(old_romans, "\nPlease type a number between 1 and 3000: "))
# 21. “Modern” Roman numerals.
# Eventually, someone thought it would be terribly clever if putting a smaller number before a larger one meant you
# had to subtract the smaller one. As a result of this development, you must now suffer.
# Rewrite your previous method to return the new-style Roman numerals so when someone calls roman_numeral 4,
# it should return 'IV', 90 should be 'XC' etc.
print("\nEXERCISE “Modern” Roman numerals.")
def modern_romans(number):
result = ''
decimal = [1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1]
roman = ["M", "CM", "D", "CD", "C", "XC", "L", "XL", "X", "IX", "V", "IV", "I"]
# looping over every element of our arrays
for i in range(len(decimal)):
# keep trying the same number until we need to move to a smaller one
while number%decimal[i] < number:
# add the matching roman number to our result string
result += roman[i]
# subtract the decimal value of the roman number from our number
number -= decimal[i]
return result
print(get_input_number(modern_romans, "\nPlease type a number between 1 and 3000: "))
|
py | b4028bb9d5b79d8b6a12b1556ecc1346457c8e9a | import time
from requests import Response
import subprocess
import os
import signal
import tempfile
import Helpers
import SettingsStorage
def create_ssh_tunnel(tun: dict):
tunnel_id: int = tun["id"]
port_to_tunnel: int = tun["port_to_tunnel"]
timeout_time: str = tun["timeout_time"]
temporaray_pubkey_to_accept: str = tun["temporary_pubkey_for_agent_ssh"]
remote_ssh_server: str = tun["remote_ssh_server"]
reverse_port: int = tun["reverse_port"]
remote_ssh_port: int = tun["remote_ssh_port"]
remote_ssh_fingerprint: str = tun["remote_ssh_fingerprint"]
temporary_tunnel_privkey: str = tun["temporary_tunnel_privkey"]
if len(remote_ssh_server) < 1:
remote_ssh_server = SettingsStorage.server_domain_ip
for t in SettingsStorage.datajson["tunnels"]:
if t["id"] == tun["id"]:
Helpers.log_that("The tunnel id {} is already running, this should not happened!".format(t["id"]))
return
# Fist of all, we need to add the server SSH pubkey to known_hosts
Helpers.add_known_host(remote_ssh_server, remote_ssh_port, remote_ssh_fingerprint)
# Ok now try to connect, the SSH server should have been prepared for a long time
Helpers.log_that("Trying to create a tunnel to port {} with a reverse {} on {} ...".format(port_to_tunnel, reverse_port, remote_ssh_server))
tf = tempfile.NamedTemporaryFile(mode="w", delete=False)
tf.write(temporary_tunnel_privkey)
tf.close()
tunnel_process = subprocess.Popen(
["ssh", "-T", "-o ServerAliveInterval 30", "-o ServerAliveCountMax 3", "-o PasswordAuthentication=no",
"-R" + str(reverse_port) + ":localhost:" + str(port_to_tunnel),
"-i" + tf.name,
remote_ssh_server,
"-p" + str(remote_ssh_port)]
)
time.sleep(5)
os.remove(tf.name)
# if the process is alive, there is no poll. After 5 seconds this should only mean that the tunnel is a success
if not tunnel_process.poll():
Helpers.log_that("TUNNEL SUCCESSFULLY CREATED")
tun["pid"] = tunnel_process.pid
tun["connection_state"] = Helpers.ConnectionStateEnum.connected
# Adding the tunnel so we can remember
SettingsStorage.datajson["tunnels"].append(tun)
# Send the confirmation to the API:
resp: Response = Helpers.ReqSession.post(SettingsStorage.server_url + "/agents/tunnel_changed", json=Helpers.get_tunnel_changed_json(tunnel_id, Helpers.ConnectionStateEnum.connected))
if resp.status_code == 200:
Helpers.log_that("API now knows that the tunnel is connected")
else:
mes = ""
if "detail" in resp.json().keys():
mes = resp.json()["detail"]
Helpers.log_that("ERROR: The API response for tunnel_connected {} with a message {}".format(resp.status_code, mes))
# We can save the public key of a support personnel if any
if len(temporaray_pubkey_to_accept) > 0:
Helpers.set_ssh_auth_key(timeout_time, temporaray_pubkey_to_accept)
Helpers.log_that("Auth key is set!!")
else:
Helpers.log_that("TUNNEL COULD NOT BE CREATED")
#os.kill(tunnel_process.pid, signal.SIGTERM)
# datajson["tunnels"].append(tun)
# log_that("Appending tunnel to ")
def destroy_ssh_tunnel(tun: dict):
# we want the tunnel from the storage:
for t in SettingsStorage.datajson["tunnels"]:
if t["id"] == tun["id"]:
tun = t
break
Helpers.log_that("Trying to kill Tunnel id {}".format(tun["id"]))
try:
os.kill(int(tun["pid"]), signal.SIGTERM)
except OSError as e:
Helpers.log_that("Process not there")
except KeyError as e:
Helpers.log_that("Process ID not in the structure :O")
try:
SettingsStorage.datajson["tunnels"].remove(tun)
except KeyError as e:
Helpers.log_that("We have no information about such tunnel")
resp: Response = Helpers.ReqSession.post(
SettingsStorage.server_url + "/agents/tunnel_changed", json=Helpers.get_tunnel_changed_json(tun["id"], Helpers.ConnectionStateEnum.disconnected))
if resp.status_code == 200:
Helpers.log_that("API now knows that the tunnel is disconnected")
else:
mes = ""
if "detail" in resp.json().keys():
mes = resp.json()["detail"]
Helpers.log_that(
"ERROR: The API response for tunnel_connected {} with a message {}".format(resp.status_code, mes)) |
py | b4028c35fdaec5a936bb3d84c81fa71f7c59fba1 | from fontTools.misc.py23 import *
from . import DefaultTable
from fontTools.misc import sstruct
from fontTools.misc.textTools import safeEval
import struct
VDMX_HeaderFmt = """
> # big endian
version: H # Version number (0 or 1)
numRecs: H # Number of VDMX groups present
numRatios: H # Number of aspect ratio groupings
"""
# the VMDX header is followed by an array of RatRange[numRatios] (i.e. aspect
# ratio ranges);
VDMX_RatRangeFmt = """
> # big endian
bCharSet: B # Character set
xRatio: B # Value to use for x-Ratio
yStartRatio: B # Starting y-Ratio value
yEndRatio: B # Ending y-Ratio value
"""
# followed by an array of offset[numRatios] from start of VDMX table to the
# VDMX Group for this ratio range (offsets will be re-calculated on compile);
# followed by an array of Group[numRecs] records;
VDMX_GroupFmt = """
> # big endian
recs: H # Number of height records in this group
startsz: B # Starting yPelHeight
endsz: B # Ending yPelHeight
"""
# followed by an array of vTable[recs] records.
VDMX_vTableFmt = """
> # big endian
yPelHeight: H # yPelHeight to which values apply
yMax: h # Maximum value (in pels) for this yPelHeight
yMin: h # Minimum value (in pels) for this yPelHeight
"""
class table_V_D_M_X_(DefaultTable.DefaultTable):
def decompile(self, data, ttFont):
pos = 0 # track current position from to start of VDMX table
dummy, data = sstruct.unpack2(VDMX_HeaderFmt, data, self)
pos += sstruct.calcsize(VDMX_HeaderFmt)
self.ratRanges = []
for i in range(self.numRatios):
ratio, data = sstruct.unpack2(VDMX_RatRangeFmt, data)
pos += sstruct.calcsize(VDMX_RatRangeFmt)
# the mapping between a ratio and a group is defined further below
ratio['groupIndex'] = None
self.ratRanges.append(ratio)
lenOffset = struct.calcsize('>H')
_offsets = [] # temporarily store offsets to groups
for i in range(self.numRatios):
offset = struct.unpack('>H', data[0:lenOffset])[0]
data = data[lenOffset:]
pos += lenOffset
_offsets.append(offset)
self.groups = []
for groupIndex in range(self.numRecs):
# the offset to this group from beginning of the VDMX table
currOffset = pos
group, data = sstruct.unpack2(VDMX_GroupFmt, data)
# the group lenght and bounding sizes are re-calculated on compile
recs = group.pop('recs')
startsz = group.pop('startsz')
endsz = group.pop('endsz')
pos += sstruct.calcsize(VDMX_GroupFmt)
for j in range(recs):
vTable, data = sstruct.unpack2(VDMX_vTableFmt, data)
vTableLength = sstruct.calcsize(VDMX_vTableFmt)
pos += vTableLength
# group is a dict of (yMax, yMin) tuples keyed by yPelHeight
group[vTable['yPelHeight']] = (vTable['yMax'], vTable['yMin'])
# make sure startsz and endsz match the calculated values
minSize = min(group.keys())
maxSize = max(group.keys())
assert startsz == minSize, \
"startsz (%s) must equal min yPelHeight (%s): group %d" % \
(group.startsz, minSize, groupIndex)
assert endsz == maxSize, \
"endsz (%s) must equal max yPelHeight (%s): group %d" % \
(group.endsz, maxSize, groupIndex)
self.groups.append(group)
# match the defined offsets with the current group's offset
for offsetIndex, offsetValue in enumerate(_offsets):
# when numRecs < numRatios there can more than one ratio range
# sharing the same VDMX group
if currOffset == offsetValue:
# map the group with the ratio range thas has the same
# index as the offset to that group (it took me a while..)
self.ratRanges[offsetIndex]['groupIndex'] = groupIndex
# check that all ratio ranges have a group
for i in range(self.numRatios):
ratio = self.ratRanges[i]
if ratio['groupIndex'] is None:
from fontTools import ttLib
raise ttLib.TTLibError(
"no group defined for ratRange %d" % i)
def _getOffsets(self):
"""
Calculate offsets to VDMX_Group records.
For each ratRange return a list of offset values from the beginning of
the VDMX table to a VDMX_Group.
"""
lenHeader = sstruct.calcsize(VDMX_HeaderFmt)
lenRatRange = sstruct.calcsize(VDMX_RatRangeFmt)
lenOffset = struct.calcsize('>H')
lenGroupHeader = sstruct.calcsize(VDMX_GroupFmt)
lenVTable = sstruct.calcsize(VDMX_vTableFmt)
# offset to the first group
pos = lenHeader + self.numRatios*lenRatRange + self.numRatios*lenOffset
groupOffsets = []
for group in self.groups:
groupOffsets.append(pos)
lenGroup = lenGroupHeader + len(group) * lenVTable
pos += lenGroup # offset to next group
offsets = []
for ratio in self.ratRanges:
groupIndex = ratio['groupIndex']
offsets.append(groupOffsets[groupIndex])
return offsets
def compile(self, ttFont):
if not(self.version == 0 or self.version == 1):
from fontTools import ttLib
raise ttLib.TTLibError(
"unknown format for VDMX table: version %s" % self.version)
data = sstruct.pack(VDMX_HeaderFmt, self)
for ratio in self.ratRanges:
data += sstruct.pack(VDMX_RatRangeFmt, ratio)
# recalculate offsets to VDMX groups
for offset in self._getOffsets():
data += struct.pack('>H', offset)
for group in self.groups:
recs = len(group)
startsz = min(group.keys())
endsz = max(group.keys())
gHeader = {'recs': recs, 'startsz': startsz, 'endsz': endsz}
data += sstruct.pack(VDMX_GroupFmt, gHeader)
for yPelHeight, (yMax, yMin) in sorted(group.items()):
vTable = {'yPelHeight': yPelHeight, 'yMax': yMax, 'yMin': yMin}
data += sstruct.pack(VDMX_vTableFmt, vTable)
return data
def toXML(self, writer, ttFont):
writer.simpletag("version", value=self.version)
writer.newline()
writer.begintag("ratRanges")
writer.newline()
for ratio in self.ratRanges:
groupIndex = ratio['groupIndex']
writer.simpletag(
"ratRange",
bCharSet=ratio['bCharSet'],
xRatio=ratio['xRatio'],
yStartRatio=ratio['yStartRatio'],
yEndRatio=ratio['yEndRatio'],
groupIndex=groupIndex
)
writer.newline()
writer.endtag("ratRanges")
writer.newline()
writer.begintag("groups")
writer.newline()
for groupIndex in range(self.numRecs):
group = self.groups[groupIndex]
recs = len(group)
startsz = min(group.keys())
endsz = max(group.keys())
writer.begintag("group", index=groupIndex)
writer.newline()
writer.comment("recs=%d, startsz=%d, endsz=%d" %
(recs, startsz, endsz))
writer.newline()
for yPelHeight, (yMax, yMin) in sorted(group.items()):
writer.simpletag(
"record",
[('yPelHeight', yPelHeight), ('yMax', yMax), ('yMin', yMin)])
writer.newline()
writer.endtag("group")
writer.newline()
writer.endtag("groups")
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
if name == "version":
self.version = safeEval(attrs["value"])
elif name == "ratRanges":
if not hasattr(self, "ratRanges"):
self.ratRanges = []
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name == "ratRange":
if not hasattr(self, "numRatios"):
self.numRatios = 1
else:
self.numRatios += 1
ratio = {
"bCharSet": safeEval(attrs["bCharSet"]),
"xRatio": safeEval(attrs["xRatio"]),
"yStartRatio": safeEval(attrs["yStartRatio"]),
"yEndRatio": safeEval(attrs["yEndRatio"]),
"groupIndex": safeEval(attrs["groupIndex"])
}
self.ratRanges.append(ratio)
elif name == "groups":
if not hasattr(self, "groups"):
self.groups = []
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name == "group":
if not hasattr(self, "numRecs"):
self.numRecs = 1
else:
self.numRecs += 1
group = {}
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name == "record":
yPelHeight = safeEval(attrs["yPelHeight"])
yMax = safeEval(attrs["yMax"])
yMin = safeEval(attrs["yMin"])
group[yPelHeight] = (yMax, yMin)
self.groups.append(group)
|
py | b4028e7e6be0d66626b5b7469f9b60939e0bfcc7 | """
.. py:module:: test_xspf
:platform: Unix, Windows
:synopsis: tests the xspf-related functions for the playlister utility.
"""
import pytest
from .context import xspf
class TestXSPF(object):
"""Groups the tests of the xspf-related functions"""
def test_to_xspf_track(self):
"""Tests converting a track record to an xspf string"""
test_track = {
"Location": "/foo/bar/baz.mp3",
"Total Time": "192000",
"Name": "testing123",
"Artist": "Kool Kat",
"Album": "For the road"
}
result = """ <track>
<location>file:///foo/bar/baz.mp3</location>
<title>testing123</title>
<creator>Kool Kat</creator>
<album>For the road</album>
<duration>192000</duration>
</track>"""
assert(xspf.to_xspf_track(test_track) == result) |
py | b4028ece5bd95f114f4216a26a8ee36a5ff8199b | #!python
"""
wflow_prepare_step2
===================
wflow data preparation script. Data preparation can be done by hand or using
the two scripts. This script does the resampling. This scripts need the pcraster and gdal executables to be
available in you search path.
Usage::
wflow_prepare_step2 [-W workdir][-f][-h] -I inifile
::
-f force recreation of ldd if it already exists
-h show this information
-W set the working directory, default is current dir
-I name of the ini file with settings
$Id: $
"""
import wflow.wflow_lib as tr
import os
import os.path
import getopt
import configparser
import sys
tr.Verbose = 1
def usage(*args):
sys.stdout = sys.stderr
for msg in args:
print(msg)
print(__doc__)
sys.exit(0)
def configget(config, section, var, default):
"""
"""
try:
ret = config.get(section, var)
except:
print("returning default (" + default + ") for " + section + ":" + var)
ret = default
return ret
def OpenConf(fn):
config = configparser.ConfigParser()
config.optionxform = str
if os.path.exists(fn):
config.read(fn)
else:
print("Cannot open config file: " + fn)
sys.exit(1)
return config
def resamplemaps(step1dir, step2dir):
"""
Resample the maps from step1 and rename them in the process
"""
os.system(
"resample --clone "
+ step2dir
+ "/cutout.map "
+ step1dir
+ "/dem10.map "
+ step2dir
+ "/wflow_dem10.map"
)
os.system(
"resample --clone "
+ step2dir
+ "/cutout.map "
+ step1dir
+ "/dem25.map "
+ step2dir
+ "/wflow_dem25.map"
)
os.system(
"resample --clone "
+ step2dir
+ "/cutout.map "
+ step1dir
+ "/dem33.map "
+ step2dir
+ "/wflow_dem33.map"
)
os.system(
"resample --clone "
+ step2dir
+ "/cutout.map "
+ step1dir
+ "/dem50.map "
+ step2dir
+ "/wflow_dem50.map"
)
os.system(
"resample --clone "
+ step2dir
+ "/cutout.map "
+ step1dir
+ "/dem66.map "
+ step2dir
+ "/wflow_dem66.map"
)
os.system(
"resample --clone "
+ step2dir
+ "/cutout.map "
+ step1dir
+ "/dem75.map "
+ step2dir
+ "/wflow_dem75.map"
)
os.system(
"resample --clone "
+ step2dir
+ "/cutout.map "
+ step1dir
+ "/dem90.map "
+ step2dir
+ "/wflow_dem90.map"
)
os.system(
"resample --clone "
+ step2dir
+ "/cutout.map "
+ step1dir
+ "/demavg.map "
+ step2dir
+ "/wflow_dem.map"
)
os.system(
"resample --clone "
+ step2dir
+ "/cutout.map "
+ step1dir
+ "/demmin.map "
+ step2dir
+ "/wflow_demmin.map"
)
os.system(
"resample --clone "
+ step2dir
+ "/cutout.map "
+ step1dir
+ "/demmax.map "
+ step2dir
+ "/wflow_demmax.map"
)
os.system(
"resample --clone "
+ step2dir
+ "/cutout.map "
+ step1dir
+ "/riverlength_fact.map "
+ step2dir
+ "/wflow_riverlength_fact.map"
)
os.system(
"resample --clone "
+ step2dir
+ "/cutout.map "
+ step1dir
+ "/catchment_overall.map "
+ step2dir
+ "/catchment_cut.map"
)
os.system(
"resample --clone "
+ step2dir
+ "/cutout.map "
+ step1dir
+ "/rivers.map "
+ step2dir
+ "/wflow_riverburnin.map"
)
def main():
"""
"""
workdir = "."
inifile = "wflow_prepare.ini"
try:
opts, args = getopt.getopt(sys.argv[1:], "W:hI:f")
except getopt.error as msg:
usage(msg)
for o, a in opts:
if o == "-W":
workdir = a
if o == "-I":
inifile = a
if o == "-h":
usage()
if o == "-f":
recreate = True
os.chdir(workdir)
config = OpenConf(workdir + "/" + inifile)
step1dir = configget(config, "directories", "step1dir", "step1")
step2dir = configget(config, "directories", "step2dir", "step2")
snapgaugestoriver = bool(
int(configget(config, "settings", "snapgaugestoriver", "1"))
)
# make the directories to save results in
if not os.path.isdir(step1dir + "/"):
os.makedirs(step1dir)
if not os.path.isdir(step2dir):
os.makedirs(step2dir)
##first make the clone map
try:
Xul = float(config.get("settings", "Xul"))
Yul = float(config.get("settings", "Yul"))
Xlr = float(config.get("settings", "Xlr"))
Ylr = float(config.get("settings", "Ylr"))
except:
print("Xul, Xul, Xlr and Ylr are required entries in the ini file")
sys.exit(1)
csize = float(configget(config, "settings", "cellsize", "1"))
try:
gauges_x = config.get("settings", "gauges_x")
gauges_y = config.get("settings", "gauges_y")
except:
print("gauges_x and gauges_y are required entries in the ini file")
sys.exit(1)
strRiver = int(configget(config, "settings", "riverorder_step2", "4"))
corevolume = float(configget(config, "settings", "corevolume", "1E35"))
catchmentprecipitation = float(
configget(config, "settings", "catchmentprecipitation", "1E35")
)
corearea = float(configget(config, "settings", "corearea", "1E35"))
outflowdepth = float(configget(config, "settings", "lddoutflowdepth", "1E35"))
lddmethod = configget(config, "settings", "lddmethod", "dem")
lddglobaloption = configget(config, "settings", "lddglobaloption", "lddout")
tr.setglobaloption(lddglobaloption)
nrrow = round(abs(Yul - Ylr) / csize)
nrcol = round(abs(Xlr - Xul) / csize)
mapstr = (
"mapattr -s -S -R "
+ str(nrrow)
+ " -C "
+ str(nrcol)
+ " -l "
+ str(csize)
+ " -x "
+ str(Xul)
+ " -y "
+ str(Yul)
+ " -P yb2t "
+ step2dir
+ "/cutout.map"
)
os.system(mapstr)
tr.setclone(step2dir + "/cutout.map")
lu_water = configget(config, "files", "lu_water", "")
lu_paved = configget(config, "files", "lu_paved", "")
if lu_water:
os.system(
"resample --clone "
+ step2dir
+ "/cutout.map "
+ lu_water
+ " "
+ step2dir
+ "/wflow_waterfrac.map"
)
if lu_paved:
os.system(
"resample --clone "
+ step2dir
+ "/cutout.map "
+ lu_paved
+ " "
+ step2dir
+ "/PathFrac.map"
)
#
try:
lumap = config.get("files", "landuse")
except:
print("no landuse map...creating uniform map")
clone = tr.readmap(step2dir + "/cutout.map")
tr.report(tr.nominal(clone), step2dir + "/wflow_landuse.map")
else:
os.system(
"resample --clone "
+ step2dir
+ "/cutout.map "
+ lumap
+ " "
+ step2dir
+ "/wflow_landuse.map"
)
try:
soilmap = config.get("files", "soil")
except:
print("no soil map..., creating uniform map")
clone = tr.readmap(step2dir + "/cutout.map")
tr.report(tr.nominal(clone), step2dir + "/wflow_soil.map")
else:
os.system(
"resample --clone "
+ step2dir
+ "/cutout.map "
+ soilmap
+ " "
+ step2dir
+ "/wflow_soil.map"
)
resamplemaps(step1dir, step2dir)
dem = tr.readmap(step2dir + "/wflow_dem.map")
demmin = tr.readmap(step2dir + "/wflow_demmin.map")
demmax = tr.readmap(step2dir + "/wflow_demmax.map")
# catchcut = tr.readmap(step2dir + "/catchment_cut.map")
catchcut = tr.readmap(step2dir + "/cutout.map")
# now apply the area of interest (catchcut) to the DEM
# dem=tr.ifthen(catchcut >=1 , dem)
#
# See if there is a shape file of the river to burn in
try:
rivshp = config.get("files", "river")
except:
print("no river file specified")
riverburn = tr.readmap(step2dir + "/wflow_riverburnin.map")
else:
print("river file speficied.....")
# rivshpattr = config.get("files","riverattr")
tr.report(dem * 0.0, step2dir + "/nilmap.map")
thestr = (
"gdal_translate -of GTiff "
+ step2dir
+ "/nilmap.map "
+ step2dir
+ "/wflow_riverburnin.tif"
)
os.system(thestr)
rivshpattr = os.path.splitext(os.path.basename(rivshp))[0]
os.system(
"gdal_rasterize -burn 1 -l "
+ rivshpattr
+ " "
+ rivshp
+ " "
+ step2dir
+ "/wflow_riverburnin.tif"
)
thestr = (
"gdal_translate -of PCRaster "
+ step2dir
+ "/wflow_riverburnin.tif "
+ step2dir
+ "/wflow_riverburnin.map"
)
os.system(thestr)
riverburn = tr.readmap(step2dir + "/wflow_riverburnin.map")
# ldddem = tr.ifthenelse(riverburn >= 1.0, dem -1000 , dem)
# Only burn within the original catchment
riverburn = tr.ifthen(tr.scalar(catchcut) >= 1, riverburn)
# Now setup a very high wall around the catchment that is scale
# based on the distance to the catchment so that it slopes away from the
# catchment
if lddmethod != "river":
print("Burning in highres-river ...")
disttocatch = tr.spread(tr.nominal(catchcut), 0.0, 1.0)
demmax = tr.ifthenelse(
tr.scalar(catchcut) >= 1.0,
demmax,
demmax + (tr.celllength() * 100.0) / disttocatch,
)
tr.setglobaloption("unitcell")
# demregional=tr.windowaverage(demmin,100)
demburn = tr.cover(tr.ifthen(tr.boolean(riverburn), demmin - 100.0), demmax)
else:
print("using average dem..")
demburn = dem
ldd = tr.lddcreate_save(
step2dir + "/wflow_ldd.map",
demburn,
True,
outflowdepth=outflowdepth,
corevolume=corevolume,
catchmentprecipitation=catchmentprecipitation,
corearea=corearea,
)
# Find catchment (overall)
outlet = tr.find_outlet(ldd)
sub = tr.subcatch(ldd, outlet)
tr.report(sub, step2dir + "/wflow_catchment.map")
tr.report(outlet, step2dir + "/wflow_outlet.map")
# make river map
strorder = tr.streamorder(ldd)
tr.report(strorder, step2dir + "/wflow_streamorder.map")
river = tr.ifthen(tr.boolean(strorder >= strRiver), strorder)
tr.report(river, step2dir + "/wflow_river.map")
# make subcatchments
# os.system("col2map --clone " + step2dir + "/cutout.map gauges.col " + step2dir + "/wflow_gauges.map")
exec("X=np.array(" + gauges_x + ")")
exec("Y=np.array(" + gauges_y + ")")
tr.setglobaloption("unittrue")
outlmap = tr.points_to_map(dem, X, Y, 0.5)
tr.report(outlmap, step2dir + "/wflow_gauges_.map")
if snapgaugestoriver:
print("Snapping gauges to river")
tr.report(outlmap, step2dir + "/wflow_orggauges.map")
outlmap = tr.snaptomap(outlmap, river)
outlmap = tr.ifthen(outlmap > 0, outlmap)
tr.report(outlmap, step2dir + "/wflow_gauges.map")
scatch = tr.subcatch(ldd, outlmap)
tr.report(scatch, step2dir + "/wflow_subcatch.map")
if __name__ == "__main__":
main()
|
py | b4028f1117d89192e9dbb8f8a037ce704dc95257 | import inspect
import warnings
import dask
import numpy as np
import xarray as xr
from climpred.constants import CLIMPRED_DIMS, CONCAT_KWARGS, PM_CALENDAR_STR
from .checks import (
has_dims,
has_valid_lead_units,
warn_if_chunking_would_increase_performance,
)
from .comparisons import (
ALL_COMPARISONS,
COMPARISON_ALIASES,
HINDCAST_COMPARISONS,
__m2o,
)
from .exceptions import KeywordError
from .metrics import ALL_METRICS, METRIC_ALIASES
from .prediction import compute_hindcast, compute_perfect_model
from .reference import compute_persistence
from .stats import dpp, varweighted_mean_period
from .utils import (
_transpose_and_rechunk_to,
assign_attrs,
convert_time_index,
find_start_dates_for_given_init,
get_comparison_class,
get_lead_cftime_shift_args,
get_metric_class,
lead_units_equal_control_time_stride,
rechunk_to_single_chunk_if_more_than_one_chunk_along_dim,
shift_cftime_singular,
)
def _resample(hind, resample_dim):
"""Resample with replacement in dimension ``resample_dim``.
Args:
hind (xr.object): input xr.object to be resampled.
resample_dim (str): dimension to resample along.
Returns:
xr.object: resampled along ``resample_dim``.
"""
to_be_resampled = hind[resample_dim].values
smp = np.random.choice(to_be_resampled, len(to_be_resampled))
smp_hind = hind.sel({resample_dim: smp})
# ignore because then inits should keep their labels
if resample_dim != "init":
smp_hind[resample_dim] = hind[resample_dim].values
return smp_hind
def _resample_iterations(init, iterations, dim="member", dim_max=None, replace=True):
"""Resample over ``dim`` by index ``iterations`` times.
.. note::
This gives the same result as `_resample_iterations_idx`. When using dask, the
number of tasks in `_resample_iterations` will scale with iterations but
constant chunksize, whereas the tasks in `_resample_iterations_idx` will stay
constant with increasing chunksize.
Args:
init (xr.DataArray, xr.Dataset): Initialized prediction ensemble.
iterations (int): Number of bootstrapping iterations.
dim (str): Dimension name to bootstrap over. Defaults to ``'member'``.
dim_max (int): Number of items to select in `dim`.
replace (bool): Bootstrapping with or without replacement. Defaults to ``True``.
Returns:
xr.DataArray, xr.Dataset: Bootstrapped data with additional dim ```iteration```
"""
if dim_max is not None and dim_max <= init[dim].size:
# select only dim_max items
select_dim_items = dim_max
new_dim = init[dim].isel({dim: slice(None, dim_max)})
else:
select_dim_items = init[dim].size
new_dim = init[dim]
if replace:
idx = np.random.randint(0, init[dim].size, (iterations, select_dim_items))
elif not replace:
# create 2d np.arange()
idx = np.linspace(
(np.arange(select_dim_items)),
(np.arange(select_dim_items)),
iterations,
dtype="int",
)
# shuffle each line
for ndx in np.arange(iterations):
np.random.shuffle(idx[ndx])
idx_da = xr.DataArray(
idx,
dims=("iteration", dim),
coords=({"iteration": range(iterations), dim: new_dim}),
)
init_smp = []
for i in np.arange(iterations):
idx = idx_da.sel(iteration=i).data
init_smp2 = init.isel({dim: idx}).assign_coords({dim: new_dim})
init_smp.append(init_smp2)
init_smp = xr.concat(init_smp, dim="iteration", **CONCAT_KWARGS)
init_smp["iteration"] = np.arange(1, 1 + iterations)
return init_smp
def _resample_iterations_idx(
init, iterations, dim="member", replace=True, chunk=True, dim_max=None
):
"""Resample over ``dim`` by index ``iterations`` times.
.. note::
This is a much faster way to bootstrap than resampling each iteration
individually and applying the function to it. However, this will create a
DataArray with dimension ``iteration`` of size ``iterations``. It is probably
best to do this out-of-memory with ``dask`` if you are doing a large number
of iterations or using spatial output (i.e., not time series data).
Args:
init (xr.DataArray, xr.Dataset): Initialized prediction ensemble.
iterations (int): Number of bootstrapping iterations.
dim (str): Dimension name to bootstrap over. Defaults to ``'member'``.
replace (bool): Bootstrapping with or without replacement. Defaults to ``True``.
chunk: (bool): Auto-chunk along chunking_dims to get optimal blocksize
dim_max (int): Number of indices from `dim` to return. Not implemented.
Returns:
xr.DataArray, xr.Dataset: Bootstrapped data with additional dim ```iteration```
"""
if dask.is_dask_collection(init):
init = init.chunk({"lead": -1, "member": -1})
init = init.copy(deep=True)
def select_bootstrap_indices_ufunc(x, idx):
"""Selects multi-level indices ``idx`` from xarray object ``x`` for all
iterations."""
# `apply_ufunc` sometimes adds a singleton dimension on the end, so we squeeze
# it out here. This leverages multi-level indexing from numpy, so we can
# select a different set of, e.g., ensemble members for each iteration and
# construct one large DataArray with ``iterations`` as a dimension.
return np.moveaxis(x.squeeze()[idx.squeeze().transpose()], 0, -1)
if dask.is_dask_collection(init):
if chunk:
chunking_dims = [d for d in init.dims if d not in CLIMPRED_DIMS]
init = _chunk_before_resample_iterations_idx(
init, iterations, chunking_dims
)
# resample with or without replacement
if replace:
idx = np.random.randint(0, init[dim].size, (iterations, init[dim].size))
elif not replace:
# create 2d np.arange()
idx = np.linspace(
(np.arange(init[dim].size)),
(np.arange(init[dim].size)),
iterations,
dtype="int",
)
# shuffle each line
for ndx in np.arange(iterations):
np.random.shuffle(idx[ndx])
idx_da = xr.DataArray(
idx,
dims=("iteration", dim),
coords=({"iteration": range(iterations), dim: init[dim]}),
)
transpose_kwargs = (
{"transpose_coords": False} if isinstance(init, xr.DataArray) else {}
)
return xr.apply_ufunc(
select_bootstrap_indices_ufunc,
init.transpose(dim, ..., **transpose_kwargs),
idx_da,
dask="parallelized",
output_dtypes=[float],
)
def _distribution_to_ci(ds, ci_low, ci_high, dim="iteration"):
"""Get confidence intervals from bootstrapped distribution.
Needed for bootstrapping confidence intervals and p_values of a metric.
Args:
ds (xarray object): distribution.
ci_low (float): low confidence interval.
ci_high (float): high confidence interval.
dim (str): dimension to apply xr.quantile to. Default: 'iteration'
Returns:
uninit_hind (xarray object): uninitialize hindcast with hind.coords.
"""
ds = rechunk_to_single_chunk_if_more_than_one_chunk_along_dim(ds, dim)
return ds.quantile(q=[ci_low, ci_high], dim=dim, skipna=False)
def _pvalue_from_distributions(simple_fct, init, metric=None):
"""Get probability that skill of a reference forecast (e.g., persistence or
uninitialized skill) is larger than initialized skill.
Needed for bootstrapping confidence intervals and p_values of a metric in
the hindcast framework. Checks whether a simple forecast like persistence
or uninitialized performs better than initialized forecast. Need to keep in
mind the orientation of metric (whether larger values are better or worse
than smaller ones.)
Args:
simple_fct (xarray object): persistence or uninitialized skill.
init (xarray object): hindcast skill.
metric (Metric): metric class Metric
Returns:
pv (xarray object): probability that simple forecast performs better
than initialized forecast.
"""
pv = ((simple_fct - init) > 0).sum("iteration") / init.iteration.size
if not metric.positive:
pv = 1 - pv
return pv
def bootstrap_uninitialized_ensemble(hind, hist):
"""Resample uninitialized hindcast from historical members.
Note:
Needed for bootstrapping confidence intervals and p_values of a metric in
the hindcast framework. Takes hind.lead.size timesteps from historical at
same forcing and rearranges them into ensemble and member dimensions.
Args:
hind (xarray object): hindcast.
hist (xarray object): historical uninitialized.
Returns:
uninit_hind (xarray object): uninitialize hindcast with hind.coords.
"""
has_dims(hist, "member", "historical ensemble")
has_dims(hind, "member", "initialized hindcast ensemble")
# Put this after `convert_time_index` since it assigns 'years' attribute if the
# `init` dimension is a `float` or `int`.
has_valid_lead_units(hind)
# find range for bootstrapping
first_init = max(hist.time.min(), hind["init"].min())
n, freq = get_lead_cftime_shift_args(hind.lead.attrs["units"], hind.lead.size)
hist_last = shift_cftime_singular(hist.time.max(), -1 * n, freq)
last_init = min(hist_last, hind["init"].max())
hind = hind.sel(init=slice(first_init, last_init))
uninit_hind = []
for init in hind.init.values:
# take uninitialized members from hist at init forcing
# (Goddard et al. allows 5 year forcing range here)
uninit_at_one_init_year = hist.sel(
time=slice(
shift_cftime_singular(init, 1, freq),
shift_cftime_singular(init, n, freq),
),
).rename({"time": "lead"})
uninit_at_one_init_year["lead"] = np.arange(
1, 1 + uninit_at_one_init_year["lead"].size
)
uninit_hind.append(uninit_at_one_init_year)
uninit_hind = xr.concat(uninit_hind, "init")
uninit_hind["init"] = hind["init"].values
uninit_hind.lead.attrs["units"] = hind.lead.attrs["units"]
uninit_hind["member"] = hist["member"].values
return (
_transpose_and_rechunk_to(
uninit_hind, hind.isel(member=[0] * uninit_hind.member.size)
)
if dask.is_dask_collection(uninit_hind)
else uninit_hind
)
def bootstrap_uninit_pm_ensemble_from_control_cftime(init_pm, control):
"""Create a pseudo-ensemble from control run.
Bootstrap random numbers for years to construct an uninitialized ensemble from.
This assumes a continous control simulation without gaps.
Note:
Needed for block bootstrapping a metric in perfect-model framework. Takes
random segments of length ``block_length`` from control based on ``dayofyear``
(and therefore assumes a constant climate control simulation) and rearranges
them into ensemble and member dimensions.
Args:
init_pm (xarray object): initialized ensemble simulation.
control (xarray object): control simulation.
Returns:
uninit_pm (xarray object): uninitialized ensemble generated from control run.
"""
lead_units_equal_control_time_stride(init_pm, control)
# short cut if annual leads
if init_pm.lead.attrs["units"] == "years":
return _bootstrap_by_stacking(init_pm, control)
block_length = init_pm.lead.size
freq = get_lead_cftime_shift_args(init_pm.lead.attrs["units"], block_length)[1]
nmember = init_pm.member.size
# start and end years possible to resample the actual uninitialized ensembles from
c_start_year = control.time.min().dt.year.astype("int")
# dont resample from years that control wont have timesteps for all leads
c_end_year = (
shift_cftime_singular(control.time.max(), -block_length, freq).dt.year.astype(
"int"
)
- 1
)
def sel_time(start_year_int, suitable_start_dates):
"""Select time segments from control from ``suitable_start_dates`` based on
year ``start_year_int``."""
start_time = suitable_start_dates.time.sel(time=str(start_year_int))
end_time = shift_cftime_singular(start_time, block_length - 1, freq)
new = control.sel(time=slice(*start_time, *end_time))
new["time"] = init_pm.lead.values
return new
def create_pseudo_members(init):
"""For every initialization take a different set of start years."""
startlist = np.random.randint(c_start_year, c_end_year, nmember)
suitable_start_dates = find_start_dates_for_given_init(control, init)
return xr.concat(
(sel_time(start, suitable_start_dates) for start in startlist),
dim="member",
**CONCAT_KWARGS,
)
uninit = xr.concat(
(create_pseudo_members(init) for init in init_pm.init),
dim="init",
**CONCAT_KWARGS,
).rename({"time": "lead"})
uninit["member"] = init_pm.member.values
uninit["lead"] = init_pm.lead
# chunk to same dims
transpose_kwargs = (
{"transpose_coords": False} if isinstance(init_pm, xr.DataArray) else {}
)
uninit = uninit.transpose(*init_pm.dims, **transpose_kwargs)
return (
_transpose_and_rechunk_to(uninit, init_pm)
if dask.is_dask_collection(uninit)
else uninit
)
def _bootstrap_by_stacking(init_pm, control):
"""Bootstrap member, lead, init from control by reshaping. Fast track of function
`bootstrap_uninit_pm_ensemble_from_control_cftime` when lead units is 'years'."""
assert type(init_pm) == type(control)
lead_unit = init_pm.lead.attrs["units"]
if isinstance(init_pm, xr.Dataset):
init_pm = init_pm.to_array()
init_was_dataset = True
else:
init_was_dataset = False
if isinstance(control, xr.Dataset):
control = control.to_array()
init_size = init_pm.init.size * init_pm.member.size * init_pm.lead.size
# select random start points
new_time = np.random.randint(
0, control.time.size - init_pm.lead.size, init_size // (init_pm.lead.size)
)
new_time = np.array(
[np.arange(s, s + init_pm.lead.size) for s in new_time]
).flatten()[:init_size]
larger = control.isel(time=new_time)
fake_init = init_pm.stack(time=tuple(d for d in init_pm.dims if d in CLIMPRED_DIMS))
# exchange values
transpose_kwargs = (
{"transpose_coords": False} if isinstance(init_pm, xr.DataArray) else {}
)
larger = larger.transpose(*fake_init.dims, **transpose_kwargs)
fake_init.data = larger.data
fake_uninit = fake_init.unstack()
if init_was_dataset:
fake_uninit = fake_uninit.to_dataset(dim="variable")
fake_uninit["lead"] = init_pm["lead"]
fake_uninit.lead.attrs["units"] = lead_unit
return fake_uninit
def _bootstrap_hindcast_over_init_dim(
hind,
hist,
verif,
dim,
reference,
resample_dim,
iterations,
metric,
comparison,
compute,
reference_compute,
resample_uninit,
**metric_kwargs,
):
"""Bootstrap hindcast skill over the ``init`` dimension.
When bootstrapping over the ``member`` dimension, an additional dimension
``iteration`` can be added and skill can be computing over that entire
dimension in parallel, since all members are being aligned the same way.
However, to our knowledge, when bootstrapping over the ``init`` dimension,
one must evaluate each iteration independently. I.e., in a looped fashion,
since alignment of initializations and target dates is unique to each
iteration.
See ``bootstrap_compute`` for explanation of inputs.
"""
pers_skill = []
bootstrapped_init_skill = []
bootstrapped_uninit_skill = []
for i in range(iterations):
# resample with replacement
smp_hind = _resample(hind, resample_dim)
# compute init skill
init_skill = compute(
smp_hind,
verif,
metric=metric,
comparison=comparison,
add_attrs=False,
dim=dim,
**metric_kwargs,
)
# reset inits when probabilistic, otherwise tests fail
if (
resample_dim == "init"
and metric.probabilistic
and "init" in init_skill.coords
):
init_skill["init"] = hind.init.values
bootstrapped_init_skill.append(init_skill)
if "uninitialized" in reference:
# generate uninitialized ensemble from hist
uninit_hind = resample_uninit(hind, hist)
# compute uninit skill
bootstrapped_uninit_skill.append(
compute(
uninit_hind,
verif,
metric=metric,
comparison=comparison,
dim=dim,
add_attrs=False,
**metric_kwargs,
)
)
if "persistence" in reference:
# compute persistence skill
# impossible for probabilistic
if not metric.probabilistic:
pers_skill.append(
reference_compute(
smp_hind,
verif,
metric=metric,
dim=dim,
add_attrs=False,
**metric_kwargs,
)
)
bootstrapped_init_skill = xr.concat(
bootstrapped_init_skill, dim="iteration", **CONCAT_KWARGS
)
if "uninitialized" in reference:
bootstrapped_uninit_skill = xr.concat(
bootstrapped_uninit_skill, dim="iteration", **CONCAT_KWARGS
)
else:
bootstrapped_uninit_skill = None
if "persistence" in reference:
if pers_skill != []:
bootstrapped_pers_skill = xr.concat(
pers_skill, dim="iteration", **CONCAT_KWARGS
)
else:
bootstrapped_pers_skill = None
return (
bootstrapped_init_skill,
bootstrapped_uninit_skill,
bootstrapped_pers_skill,
)
def _get_resample_func(ds):
"""Decide for resample function based on input `ds`.
Returns:
callable: `_resample_iterations`: if big and chunked `ds`
`_resample_iterations_idx`: else (if small and eager `ds`)
"""
resample_func = (
_resample_iterations
if (
dask.is_dask_collection(ds)
and len(ds.dims) > 3
# > 2MB
and ds.nbytes > 2000000
)
else _resample_iterations_idx
)
return resample_func
def _maybe_auto_chunk(ds, dims):
"""Auto-chunk on dimension `dims`.
Args:
ds (xr.object): input data.
dims (list of str or str): Dimensions to auto-chunk in.
Returns:
xr.object: auto-chunked along `dims`
"""
if dask.is_dask_collection(ds) and dims is not []:
if isinstance(dims, str):
dims = [dims]
chunks = [d for d in dims if d in ds.dims]
chunks = {key: "auto" for key in chunks}
ds = ds.chunk(chunks)
return ds
def _chunk_before_resample_iterations_idx(
ds, iterations, chunking_dims, optimal_blocksize=100000000
):
"""Chunk ds so small that after _resample_iteration_idx chunks have optimal size
`optimal_blocksize`.
Args:
ds (xr.obejct): input data`.
iterations (int): number of bootstrap iterations in `_resample_iterations_idx`.
chunking_dims (list of str or str): Dimension(s) to chunking in.
optimal_blocksize (int): dask blocksize to aim at in bytes.
Defaults to 100000000.
Returns:
xr.object: chunked to have blocksize: optimal_blocksize/iterations.
"""
if isinstance(chunking_dims, str):
chunking_dims = [chunking_dims]
# size of CLIMPRED_DIMS
climpred_dim_chunksize = 8 * np.product(
np.array([ds[d].size for d in CLIMPRED_DIMS if d in ds.dims])
)
# remaining blocksize for remaining dims considering iteration
spatial_dim_blocksize = optimal_blocksize / (climpred_dim_chunksize * iterations)
# size of remaining dims
chunking_dims_size = np.product(
np.array([ds[d].size for d in ds.dims if d not in CLIMPRED_DIMS])
) # ds.lat.size*ds.lon.size
# chunks needed to get to optimal blocksize
chunks_needed = chunking_dims_size / spatial_dim_blocksize
# get size clon, clat for spatial chunks
cdim = [1 for i in chunking_dims]
nchunks = np.product(cdim)
stepsize = 1
counter = 0
while nchunks < chunks_needed:
for i, d in enumerate(chunking_dims):
c = cdim[i]
if c <= ds[d].size:
c = c + stepsize
cdim[i] = c
nchunks = np.product(cdim)
counter += 1
if counter == 100:
break
# convert number of chunks to chunksize
chunks = dict()
for i, d in enumerate(chunking_dims):
chunksize = ds[d].size // cdim[i]
if chunksize < 1:
chunksize = 1
chunks[d] = chunksize
ds = ds.chunk(chunks)
return ds
def bootstrap_compute(
hind,
verif,
hist=None,
alignment="same_verifs",
metric="pearson_r",
comparison="m2e",
dim="init",
reference=["uninitialized", "persistence"],
resample_dim="member",
sig=95,
iterations=500,
pers_sig=None,
compute=compute_hindcast,
resample_uninit=bootstrap_uninitialized_ensemble,
reference_compute=compute_persistence,
**metric_kwargs,
):
"""Bootstrap compute with replacement.
Args:
hind (xr.Dataset): prediction ensemble.
verif (xr.Dataset): Verification data.
hist (xr.Dataset): historical/uninitialized simulation.
metric (str): `metric`. Defaults to 'pearson_r'.
comparison (str): `comparison`. Defaults to 'm2e'.
dim (str or list): dimension(s) to apply metric over. default: 'init'.
reference (str, list of str): Type of reference forecasts with which to
verify. One or more of ['persistence', 'uninitialized'].
If None or empty, returns no p value.
resample_dim (str): dimension to resample from. default: 'member'::
- 'member': select a different set of members from hind
- 'init': select a different set of initializations from hind
sig (int): Significance level for uninitialized and
initialized skill. Defaults to 95.
pers_sig (int): Significance level for persistence skill confidence levels.
Defaults to sig.
iterations (int): number of resampling iterations (bootstrap
with replacement). Defaults to 500.
compute (func): function to compute skill.
Choose from
[:py:func:`climpred.prediction.compute_perfect_model`,
:py:func:`climpred.prediction.compute_hindcast`].
resample_uninit (func): function to create an uninitialized ensemble
from a control simulation or uninitialized large
ensemble. Choose from:
[:py:func:`bootstrap_uninitialized_ensemble`,
:py:func:`bootstrap_uninit_pm_ensemble_from_control`].
reference_compute (func): function to compute a reference forecast skill with.
Default: :py:func:`climpred.prediction.compute_persistence`.
** metric_kwargs (dict): additional keywords to be passed to metric
(see the arguments required for a given metric in :ref:`Metrics`).
Returns:
results: (xr.Dataset): bootstrapped results for the three different skills:
- `initialized` for the initialized hindcast `hind` and describes skill due
to initialization and external forcing
- `uninitialized` for the uninitialized/historical and approximates skill
from external forcing
- `persistence` for the persistence forecast computed by
`compute_persistence`
the different results:
- `verify skill`: skill values
- `p`: p value
- `low_ci` and `high_ci`: high and low ends of confidence intervals based
on significance threshold `sig`
Reference:
* Goddard, L., A. Kumar, A. Solomon, D. Smith, G. Boer, P.
Gonzalez, V. Kharin, et al. “A Verification Framework for
Interannual-to-Decadal Predictions Experiments.” Climate
Dynamics 40, no. 1–2 (January 1, 2013): 245–72.
https://doi.org/10/f4jjvf.
See also:
* climpred.bootstrap.bootstrap_hindcast
* climpred.bootstrap.bootstrap_perfect_model
"""
warn_if_chunking_would_increase_performance(hind, crit_size_in_MB=5)
if pers_sig is None:
pers_sig = sig
if isinstance(dim, str):
dim = [dim]
if isinstance(reference, str):
reference = [reference]
if reference is None:
reference = []
p = (100 - sig) / 100
ci_low = p / 2
ci_high = 1 - p / 2
p_pers = (100 - pers_sig) / 100
ci_low_pers = p_pers / 2
ci_high_pers = 1 - p_pers / 2
# get metric/comparison function name, not the alias
metric = METRIC_ALIASES.get(metric, metric)
comparison = COMPARISON_ALIASES.get(comparison, comparison)
# get class Metric(metric)
metric = get_metric_class(metric, ALL_METRICS)
# get comparison function
comparison = get_comparison_class(comparison, ALL_COMPARISONS)
# Perfect Model requires `same_inits` setup
isHindcast = True if comparison.name in HINDCAST_COMPARISONS else False
reference_alignment = alignment if isHindcast else "same_inits"
chunking_dims = [d for d in hind.dims if d not in CLIMPRED_DIMS]
# carry alignment for compute_reference separately
metric_kwargs_reference = metric_kwargs.copy()
metric_kwargs_reference["alignment"] = reference_alignment
# carry alignment in metric_kwargs
if isHindcast:
metric_kwargs["alignment"] = alignment
if hist is None: # PM path, use verif = control
hist = verif
# slower path for hindcast and resample_dim init
if resample_dim == "init" and isHindcast:
warnings.warn("resample_dim=`init` will be slower than resample_dim=`member`.")
(
bootstrapped_init_skill,
bootstrapped_uninit_skill,
bootstrapped_pers_skill,
) = _bootstrap_hindcast_over_init_dim(
hind,
hist,
verif,
dim,
reference,
resample_dim,
iterations,
metric,
comparison,
compute,
reference_compute,
resample_uninit,
**metric_kwargs,
)
else: # faster: first _resample_iterations_idx, then compute skill
resample_func = _get_resample_func(hind)
if not isHindcast:
if "uninitialized" in reference:
# create more members than needed in PM to make the uninitialized
# distribution more robust
members_to_sample_from = 50
repeat = members_to_sample_from // hind.member.size + 1
uninit_hind = xr.concat(
[resample_uninit(hind, hist) for i in range(repeat)],
dim="member",
**CONCAT_KWARGS,
)
uninit_hind["member"] = np.arange(1, 1 + uninit_hind.member.size)
if dask.is_dask_collection(uninit_hind):
# too minimize tasks: ensure uninit_hind get pre-computed
# alternativly .chunk({'member':-1})
uninit_hind = uninit_hind.compute().chunk()
# resample uninit always over member and select only hind.member.size
bootstrapped_uninit = resample_func(
uninit_hind,
iterations,
"member",
replace=False,
dim_max=hind["member"].size,
)
bootstrapped_uninit["lead"] = hind["lead"]
# effectively only when _resample_iteration_idx which doesnt use dim_max
bootstrapped_uninit = bootstrapped_uninit.isel(
member=slice(None, hind.member.size)
)
if dask.is_dask_collection(bootstrapped_uninit):
bootstrapped_uninit = bootstrapped_uninit.chunk({"member": -1})
bootstrapped_uninit = _maybe_auto_chunk(
bootstrapped_uninit, ["iteration"] + chunking_dims
)
else: # hindcast
if "uninitialized" in reference:
uninit_hind = resample_uninit(hind, hist)
if dask.is_dask_collection(uninit_hind):
# too minimize tasks: ensure uninit_hind get pre-computed
# maybe not needed
uninit_hind = uninit_hind.compute().chunk()
bootstrapped_uninit = resample_func(
uninit_hind, iterations, resample_dim
)
bootstrapped_uninit = bootstrapped_uninit.isel(
member=slice(None, hind.member.size)
)
bootstrapped_uninit["lead"] = hind["lead"]
if dask.is_dask_collection(bootstrapped_uninit):
bootstrapped_uninit = _maybe_auto_chunk(
bootstrapped_uninit.chunk({"lead": 1}),
["iteration"] + chunking_dims,
)
if "uninitialized" in reference:
bootstrapped_uninit_skill = compute(
bootstrapped_uninit,
verif,
metric=metric,
comparison="m2o" if isHindcast else comparison,
dim=dim,
add_attrs=False,
**metric_kwargs,
)
# take mean if 'm2o' comparison forced before
if isHindcast and comparison != __m2o:
bootstrapped_uninit_skill = bootstrapped_uninit_skill.mean("member")
bootstrapped_hind = resample_func(hind, iterations, resample_dim)
if dask.is_dask_collection(bootstrapped_hind):
bootstrapped_hind = bootstrapped_hind.chunk({"member": -1})
bootstrapped_init_skill = compute(
bootstrapped_hind,
verif,
metric=metric,
comparison=comparison,
add_attrs=False,
dim=dim,
**metric_kwargs,
)
if "persistence" in reference:
if not metric.probabilistic:
pers_skill = reference_compute(
hind,
verif,
metric=metric,
dim=dim,
**metric_kwargs_reference,
)
# bootstrap pers
if resample_dim == "init":
bootstrapped_pers_skill = reference_compute(
bootstrapped_hind,
verif,
metric=metric,
**metric_kwargs_reference,
)
else: # member
_, bootstrapped_pers_skill = xr.broadcast(
bootstrapped_init_skill, pers_skill, exclude=CLIMPRED_DIMS
)
else:
bootstrapped_pers_skill = bootstrapped_init_skill.isnull()
# calc mean skill without any resampling
init_skill = compute(
hind,
verif,
metric=metric,
comparison=comparison,
dim=dim,
**metric_kwargs,
)
if "uninitialized" in reference:
# uninit skill as mean resampled uninit skill
uninit_skill = bootstrapped_uninit_skill.mean("iteration")
if "persistence" in reference:
if not metric.probabilistic:
pers_skill = reference_compute(
hind, verif, metric=metric, dim=dim, **metric_kwargs_reference
)
else:
pers_skill = init_skill.isnull()
# align to prepare for concat
if set(bootstrapped_pers_skill.coords) != set(bootstrapped_init_skill.coords):
if (
"time" in bootstrapped_pers_skill.dims
and "init" in bootstrapped_init_skill.dims
):
bootstrapped_pers_skill = bootstrapped_pers_skill.rename(
{"time": "init"}
)
# allow member to be broadcasted
bootstrapped_init_skill, bootstrapped_pers_skill = xr.broadcast(
bootstrapped_init_skill,
bootstrapped_pers_skill,
exclude=("init", "lead", "time"),
)
# get confidence intervals CI
init_ci = _distribution_to_ci(bootstrapped_init_skill, ci_low, ci_high)
if "uninitialized" in reference:
uninit_ci = _distribution_to_ci(bootstrapped_uninit_skill, ci_low, ci_high)
# probabilistic metrics wont have persistence forecast
# therefore only get CI if persistence was computed
if "persistence" in reference:
if "iteration" in bootstrapped_pers_skill.dims:
pers_ci = _distribution_to_ci(
bootstrapped_pers_skill, ci_low_pers, ci_high_pers
)
else:
# otherwise set all persistence outputs to false
pers_ci = init_ci == -999
# pvalue whether uninit or pers better than init forecast
if "uninitialized" in reference:
p_uninit_over_init = _pvalue_from_distributions(
bootstrapped_uninit_skill, bootstrapped_init_skill, metric=metric
)
if "persistence" in reference:
p_pers_over_init = _pvalue_from_distributions(
bootstrapped_pers_skill, bootstrapped_init_skill, metric=metric
)
# wrap results together in one xr object
if reference == []:
results = xr.concat(
[
init_skill,
init_ci.isel(quantile=0, drop=True),
init_ci.isel(quantile=1, drop=True),
],
dim="results",
)
results["results"] = ["verify skill", "low_ci", "high_ci"]
results["skill"] = ["initialized"]
results = results.squeeze()
elif reference == ["persistence"]:
skill = xr.concat([init_skill, pers_skill], dim="skill", **CONCAT_KWARGS)
skill["skill"] = ["initialized", "persistence"]
# ci for each skill
ci = xr.concat([init_ci, pers_ci], "skill", coords="minimal").rename(
{"quantile": "results"}
)
ci["skill"] = ["initialized", "persistence"]
results = xr.concat([skill, p_pers_over_init], dim="results", **CONCAT_KWARGS)
results["results"] = ["verify skill", "p"]
if set(results.coords) != set(ci.coords):
res_drop = [c for c in results.coords if c not in ci.coords]
ci_drop = [c for c in ci.coords if c not in results.coords]
results = results.drop_vars(res_drop)
ci = ci.drop_vars(ci_drop)
results = xr.concat([results, ci], dim="results", **CONCAT_KWARGS)
results["results"] = ["verify skill", "p", "low_ci", "high_ci"]
elif reference == ["uninitialized"]:
skill = xr.concat([init_skill, uninit_skill], dim="skill", **CONCAT_KWARGS)
skill["skill"] = ["initialized", "uninitialized"]
# ci for each skill
ci = xr.concat([init_ci, uninit_ci], "skill", coords="minimal").rename(
{"quantile": "results"}
)
ci["skill"] = ["initialized", "uninitialized"]
results = xr.concat([skill, p_uninit_over_init], dim="results", **CONCAT_KWARGS)
results["results"] = ["verify skill", "p"]
if set(results.coords) != set(ci.coords):
res_drop = [c for c in results.coords if c not in ci.coords]
ci_drop = [c for c in ci.coords if c not in results.coords]
results = results.drop_vars(res_drop)
ci = ci.drop_vars(ci_drop)
results = xr.concat([results, ci], dim="results", **CONCAT_KWARGS)
results["results"] = ["verify skill", "p", "low_ci", "high_ci"]
elif set(reference) == set(["uninitialized", "persistence"]):
skill = xr.concat(
[init_skill, uninit_skill, pers_skill], dim="skill", **CONCAT_KWARGS
)
skill["skill"] = ["initialized", "uninitialized", "persistence"]
# probability that i beats init
p = xr.concat(
[p_uninit_over_init, p_pers_over_init], dim="skill", **CONCAT_KWARGS
)
p["skill"] = ["uninitialized", "persistence"]
# ci for each skill
ci = xr.concat([init_ci, uninit_ci, pers_ci], "skill", coords="minimal").rename(
{"quantile": "results"}
)
ci["skill"] = ["initialized", "uninitialized", "persistence"]
results = xr.concat([skill, p], dim="results", **CONCAT_KWARGS)
results["results"] = ["verify skill", "p"]
if set(results.coords) != set(ci.coords):
res_drop = [c for c in results.coords if c not in ci.coords]
ci_drop = [c for c in ci.coords if c not in results.coords]
results = results.drop_vars(res_drop)
ci = ci.drop_vars(ci_drop)
results = xr.concat([results, ci], dim="results", **CONCAT_KWARGS)
results["results"] = ["verify skill", "p", "low_ci", "high_ci"]
else:
raise ValueError("results not created")
# Attach climpred compute information to skill
metadata_dict = {
"confidence_interval_levels": f"{ci_high}-{ci_low}",
"bootstrap_iterations": iterations,
"reference": reference,
}
if reference is not None:
metadata_dict[
"p"
] = "probability that reference performs better than initialized"
metadata_dict.update(metric_kwargs)
results = assign_attrs(
results,
hind,
alignment=alignment,
metric=metric,
comparison=comparison,
dim=dim,
function_name=inspect.stack()[0][3], # take function.__name__
metadata_dict=metadata_dict,
)
# Ensure that the lead units get carried along for the calculation. The attribute
# tends to get dropped along the way due to ``xarray`` functionality.
results["lead"] = hind["lead"]
if "units" in hind["lead"].attrs and "units" not in results["lead"].attrs:
results["lead"].attrs["units"] = hind["lead"].attrs["units"]
return results
def bootstrap_hindcast(
hind,
hist,
verif,
alignment="same_verifs",
metric="pearson_r",
comparison="e2o",
dim="init",
reference=["uninitialized", "persistence"],
resample_dim="member",
sig=95,
iterations=500,
pers_sig=None,
reference_compute=compute_persistence,
**metric_kwargs,
):
"""Bootstrap compute with replacement. Wrapper of
py:func:`bootstrap_compute` for hindcasts.
Args:
hind (xr.Dataset): prediction ensemble.
verif (xr.Dataset): Verification data.
hist (xr.Dataset): historical/uninitialized simulation.
metric (str): `metric`. Defaults to 'pearson_r'.
comparison (str): `comparison`. Defaults to 'e2o'.
dim (str): dimension to apply metric over. default: 'init'.
reference (str, list of str): Type of reference forecasts with which to
verify. One or more of ['persistence', 'uninitialized'].
If None or empty, returns no p value.
resample_dim (str or list): dimension to resample from. default: 'member'.
- 'member': select a different set of members from hind
- 'init': select a different set of initializations from hind
sig (int): Significance level for uninitialized and
initialized skill. Defaults to 95.
pers_sig (int): Significance level for persistence skill confidence levels.
Defaults to sig.
iterations (int): number of resampling iterations (bootstrap
with replacement). Defaults to 500.
reference_compute (func): function to compute a reference forecast skill with.
Default: :py:func:`climpred.prediction.compute_persistence`.
** metric_kwargs (dict): additional keywords to be passed to metric
(see the arguments required for a given metric in :ref:`Metrics`).
Returns:
results: (xr.Dataset): bootstrapped results for the three different kinds of
predictions:
- `initialized` for the initialized hindcast `hind` and describes skill due
to initialization and external forcing
- `uninitialized` for the uninitialized/historical and approximates skill
from external forcing
- `persistence` for the persistence forecast computed by
`compute_persistence`
the different results:
- `verify skill`: skill values
- `p`: p value
- `low_ci` and `high_ci`: high and low ends of confidence intervals based
on significance threshold `sig`
Reference:
* Goddard, L., A. Kumar, A. Solomon, D. Smith, G. Boer, P.
Gonzalez, V. Kharin, et al. “A Verification Framework for
Interannual-to-Decadal Predictions Experiments.” Climate
Dynamics 40, no. 1–2 (January 1, 2013): 245–72.
https://doi.org/10/f4jjvf.
See also:
* climpred.bootstrap.bootstrap_compute
* climpred.prediction.compute_hindcast
Example:
>>> hind = climpred.tutorial.load_dataset('CESM-DP-SST')['SST']
>>> hist = climpred.tutorial.load_dataset('CESM-LE')['SST']
>>> obs = load_dataset('ERSST')['SST']
>>> bootstrapped_skill = climpred.bootstrap.bootstrap_hindcast(hind, hist, obs)
>>> bootstrapped_skill.coords
Coordinates:
* lead (lead) int64 1 2 3 4 5 6 7 8 9 10
* kind (kind) object 'initialized' 'persistence' 'uninitialized'
* results (results) <U7 'verify skill' 'p' 'low_ci' 'high_ci'
"""
# Check that init is int, cftime, or datetime; convert ints or datetime to cftime.
hind = convert_time_index(hind, "init", "hind[init]")
hist = convert_time_index(hist, "time", "uninitialized[time]")
verif = convert_time_index(verif, "time", "verif[time]")
# Put this after `convert_time_index` since it assigns 'years' attribute if the
# `init` dimension is a `float` or `int`.
has_valid_lead_units(hind)
if ("same_verif" in alignment) & (resample_dim == "init"):
raise KeywordError(
"Cannot have both alignment='same_verifs' and "
"resample_dim='init'. Change `resample_dim` to 'member' to keep "
"common verification alignment or `alignment` to 'same_inits' to "
"resample over initializations."
)
# Kludge for now. Since we're computing persistence here we need to ensure that
# all products have a union in their time axis.
times = np.sort(
list(set(hind.init.data) & set(hist.time.data) & set(verif.time.data))
)
hind = hind.sel(init=times)
hist = hist.sel(time=times)
verif = verif.sel(time=times)
return bootstrap_compute(
hind,
verif,
hist=hist,
alignment=alignment,
metric=metric,
comparison=comparison,
dim=dim,
reference=reference,
resample_dim=resample_dim,
sig=sig,
iterations=iterations,
pers_sig=pers_sig,
compute=compute_hindcast,
resample_uninit=bootstrap_uninitialized_ensemble,
reference_compute=reference_compute,
**metric_kwargs,
)
def bootstrap_perfect_model(
init_pm,
control,
metric="pearson_r",
comparison="m2e",
dim=["init", "member"],
reference=["uninitialized", "persistence"],
resample_dim="member",
sig=95,
iterations=500,
pers_sig=None,
reference_compute=compute_persistence,
**metric_kwargs,
):
"""Bootstrap compute with replacement. Wrapper of
py:func:`bootstrap_compute` for perfect-model framework.
Args:
hind (xr.Dataset): prediction ensemble.
verif (xr.Dataset): Verification data.
hist (xr.Dataset): historical/uninitialized simulation.
metric (str): `metric`. Defaults to 'pearson_r'.
comparison (str): `comparison`. Defaults to 'm2e'.
dim (str): dimension to apply metric over. default: ['init', 'member'].
reference (str, list of str): Type of reference forecasts with which to
verify. One or more of ['persistence', 'uninitialized'].
If None or empty, returns no p value.
resample_dim (str or list): dimension to resample from. default: 'member'.
- 'member': select a different set of members from hind
- 'init': select a different set of initializations from hind
sig (int): Significance level for uninitialized and
initialized skill. Defaults to 95.
pers_sig (int): Significance level for persistence skill confidence levels.
Defaults to sig.
iterations (int): number of resampling iterations (bootstrap
with replacement). Defaults to 500.
reference_compute (func): function to compute a reference forecast skill with.
Default: :py:func:`climpred.prediction.compute_persistence`.
** metric_kwargs (dict): additional keywords to be passed to metric
(see the arguments required for a given metric in :ref:`Metrics`).
Returns:
results: (xr.Dataset): bootstrapped results for the three different kinds of
predictions:
- `initialized` for the initialized hindcast `hind` and describes skill due
to initialization and external forcing
- `uninitialized` for the uninitialized/historical and approximates skill
from external forcing
- `pers` for the reference forecast computed by `reference_compute`, which
defaults to `compute_persistence`
the different results:
- `skill`: skill values
- `p`: p value
- `low_ci` and `high_ci`: high and low ends of confidence intervals based
on significance threshold `sig`
Reference:
* Goddard, L., A. Kumar, A. Solomon, D. Smith, G. Boer, P.
Gonzalez, V. Kharin, et al. “A Verification Framework for
Interannual-to-Decadal Predictions Experiments.” Climate
Dynamics 40, no. 1–2 (January 1, 2013): 245–72.
https://doi.org/10/f4jjvf.
See also:
* climpred.bootstrap.bootstrap_compute
* climpred.prediction.compute_perfect_model
Example:
>>> init = climpred.tutorial.load_dataset('MPI-PM-DP-1D')
>>> control = climpred.tutorial.load_dataset('MPI-control-1D')
>>> bootstrapped_s = climpred.bootstrap.bootstrap_perfect_model(init, control)
>>> bootstrapped_s.coords
Coordinates:
* lead (lead) int64 1 2 3 4 5 6 7 8 9 10
* kind (kind) object 'initialized' 'persistence' 'uninitialized'
* results (results) <U7 'verify skill' 'p' 'low_ci' 'high_ci'
"""
if dim is None:
dim = ["init", "member"]
# Check init & time is int, cftime, or datetime; convert ints or datetime to cftime.
init_pm = convert_time_index(
init_pm, "init", "init_pm[init]", calendar=PM_CALENDAR_STR
)
control = convert_time_index(
control, "time", "control[time]", calendar=PM_CALENDAR_STR
)
lead_units_equal_control_time_stride(init_pm, control)
return bootstrap_compute(
init_pm,
control,
hist=None,
metric=metric,
comparison=comparison,
dim=dim,
reference=reference,
resample_dim=resample_dim,
sig=sig,
iterations=iterations,
pers_sig=pers_sig,
compute=compute_perfect_model,
resample_uninit=bootstrap_uninit_pm_ensemble_from_control_cftime,
reference_compute=reference_compute,
**metric_kwargs,
)
def _bootstrap_func(
func,
ds,
resample_dim,
sig=95,
iterations=500,
*func_args,
**func_kwargs,
):
"""Sig % threshold of function based on iterations resampling with replacement.
Reference:
* Mason, S. J., and G. M. Mimmack. “The Use of Bootstrap Confidence
Intervals for the Correlation Coefficient in Climatology.” Theoretical and
Applied Climatology 45, no. 4 (December 1, 1992): 229–33.
https://doi.org/10/b6fnsv.
Args:
func (function): function to be bootstrapped.
ds (xr.object): first input argument of func. `chunk` ds on `dim` other
than `resample_dim` for potential performance increase when multiple
CPUs available.
resample_dim (str): dimension to resample from.
sig (int,float,list): significance levels to return. Defaults to 95.
iterations (int): number of resample iterations. Defaults to 500.
*func_args (type): `*func_args`.
**func_kwargs (type): `**func_kwargs`.
Returns:
sig_level: bootstrapped significance levels with
dimensions of init_pm and len(sig) if sig is list
"""
if not callable(func):
raise ValueError(f"Please provide func as a function, found {type(func)}")
warn_if_chunking_would_increase_performance(ds)
if isinstance(sig, list):
psig = [i / 100 for i in sig]
else:
psig = sig / 100
resample_func = _get_resample_func(ds)
bootstraped_ds = resample_func(ds, iterations, dim=resample_dim, replace=False)
bootstraped_results = func(bootstraped_ds, *func_args, **func_kwargs)
bootstraped_results = rechunk_to_single_chunk_if_more_than_one_chunk_along_dim(
bootstraped_results, dim="iteration"
)
sig_level = bootstraped_results.quantile(dim="iteration", q=psig, skipna=False)
return sig_level
def dpp_threshold(control, sig=95, iterations=500, dim="time", **dpp_kwargs):
"""Calc DPP significance levels from re-sampled dataset.
Reference:
* Feng, X., T. DelSole, and P. Houser. “Bootstrap Estimated Seasonal
Potential Predictability of Global Temperature and Precipitation.”
Geophysical Research Letters 38, no. 7 (2011).
https://doi.org/10/ft272w.
See also:
* climpred.bootstrap._bootstrap_func
* climpred.stats.dpp
"""
return _bootstrap_func(
dpp, control, dim, sig=sig, iterations=iterations, **dpp_kwargs
)
def varweighted_mean_period_threshold(control, sig=95, iterations=500, time_dim="time"):
"""Calc the variance-weighted mean period significance levels from re-sampled
dataset.
See also:
* climpred.bootstrap._bootstrap_func
* climpred.stats.varweighted_mean_period
"""
return _bootstrap_func(
varweighted_mean_period,
control,
time_dim,
sig=sig,
iterations=iterations,
)
|
py | b4028fc2fa65efbbd8130590595e439ad6d40675 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 16 10:37:45 2019
@author: Luke
"""
import cdsapi
c = cdsapi.Client()
c.retrieve(
'reanalysis-era5-single-levels',
{
'product_type':'reanalysis',
'variable':'lake_mix_layer_temperature',
'year':[
'1979','1980','1981',
'1982','1983','1984',
'1985','1986','1987',
'1988','1989','1990',
'1991','1992','1993',
'1994','1995','1996',
'1997','1998','1999',
'2000','2001','2002',
'2003','2004','2005',
'2006','2007','2008',
'2009','2010','2011',
'2012','2013','2014',
'2015','2016','2017',
'2018','2019'
],
'month':[
'01','02','03',
'04','05','06',
'07','08','09',
'10','11','12'
],
'day':[
'01','02','03',
'04','05','06',
'07','08','09',
'10','11','12',
'13','14','15',
'16','17','18',
'19','20','21',
'22','23','24',
'25','26','27',
'28','29','30',
'31'
],
'time':[
'00:00','06:00','12:00',
'18:00'
],
'format':'netcdf'
},
'era5_lakes_mixlayertemp_6hourly_1979_2019.nc') |
py | b4029098b37c0784c3f044832381f139b15c08dd | __name__='MFParse'
import numpy as np
from os import listdir
from os.path import join, isdir
from scipy.signal import hilbert
import h5py
def parse_egg(pathtoegg, Vrange=5.5e-8,nbit=8 ):
f=h5py.File(pathtoegg,'r')
dset=f['streams']['stream0']['acquisitions']['0']
channels=list(f['channels'].keys())
Nsamp=dset.shape[1]//(2*len(channels))
ind=[]
for ch in channels:
ind.append(int(ch.split('l')[1]))
#print(ch.split('l'))
ind=np.array(ind)
data=dset[0,:].reshape(ind.size,2*Nsamp)
Idata=np.float64(data[:,np.arange(0,2*Nsamp,2)])
Qdata=np.float64(data[:,np.arange(1,2*Nsamp,2)])
for i in range(len(channels)):
Idata[i,:]-=np.mean(Idata[i,:])
Qdata[i,:]-=np.mean(Qdata[i,:])
for i in range(len(channels)):
Idata[i,:]*=Vrange/(2**nbit)
Qdata[i,:]*=Vrange/(2**nbit)
complexdata=Idata+1j*Qdata
f.close()
return complexdata
def slice_egg(egg_data,slicenum,slicesize):
start=slicenum*slicesize
end=(slicenum+1)*slicesize
return egg_data[:,start:end]
def parse_testbed(path_to_data,antennakey,V_pp=0.5,adc_bits=14):
parse_data={}
position_paths=[]
for i in [0,1,2]:
if isdir(join(path_to_data,str(i))):
parse_data.update({int(i):{}})
position_paths.append(join(path_to_data,str(i)))
# for each position get the acqusitions
for i,p in enumerate(position_paths):
for j in [0,1,2]:
if isdir(join(p,str(j))):
parse_data[i].update({int(j):{}})
# Read and parse the csv antenna files
for i,k in enumerate(parse_data): # positions
for j,l in enumerate(parse_data[k]): # acquisitions
for antenna in antennakey[l]: # antennas
if antenna not in parse_data[k][l].keys():
parse_data[k][l].update({90-antenna:[]})
for i,pos_path in enumerate(position_paths):
#print(pos_path)
for j,acq in enumerate(parse_data[i]):
for u in range(len(listdir(join(pos_path,str(acq))))): # This loop iterates through the antenna data
x=[]
with open(join(join(pos_path,str(acq)),'wave'+str(u)+'.txt'),newline='\n') as infile:
for n,m in enumerate(infile):
if n>=7:
x.append(float(m))
parse_data[i][acq][90-antennakey[acq][u]]=np.asarray(x)
# Remove DC offset and convert from adc bits to voltages
for i,pos in enumerate(parse_data): # positions
for j,acq in enumerate(parse_data[pos]): # iterates through acquisitions
for n,ant in enumerate(parse_data[pos][acq]): # iterates through the acquisitions that antenna was in
parse_data[pos][acq][ant]-=np.mean(parse_data[pos][acq][ant])
parse_data[pos][acq][ant]*=(V_pp/2**adc_bits)
#for i,pos in enumerate(parse_data):
# for j,acq in enumerate(parse_data[pos]):
# for n,ant in enumerate(parse_data[pos][acq]):
# print(np.sqrt(np.mean(np.array(parse_data[pos][acq][ant])**2)),pos)
return parse_data
def combine_and_calc_phis(parse_data,Nsamples=8200,Fsample=500e6):
phis={}
for i,pos in enumerate(parse_data):
phis.update({pos:{}})
for j,acq in enumerate(parse_data[pos]):
phis[pos].update({acq:{}})
for n,ant in enumerate(parse_data[pos][acq]):
alpha1=np.real((np.fft.fft(parse_data[pos][acq][ant])[:Nsamples//2])
[np.argmax(abs(np.fft.fft(parse_data[pos][acq][ant])[:Nsamples//2]))])
alpha2=np.imag((np.fft.fft(parse_data[pos][acq][ant])[:Nsamples//2])
[np.argmax(abs(np.fft.fft(parse_data[pos][acq][ant])[:Nsamples//2]))])
phis[pos][acq].update({ant:np.arctan2(-alpha2,alpha1)})
corrected_data={}
for i,pos in enumerate(parse_data):
corrected_data.update({pos:{}})
for j,acq in enumerate(parse_data[pos]):
corrected_data[pos].update({acq:{}})
for n,ant in enumerate(parse_data[pos][acq]):
corrected_data[pos][acq].update({ant:hilbert(parse_data[pos][acq][ant])*
np.exp(-1j*(phis[pos][0][90]-phis[pos][acq][90]))})
antennas={}
# create dictionary {position:{antenna:acquisition}}
for i,pos in enumerate(corrected_data): #positions
antennas.update({pos:{}})
for j,acq in enumerate(corrected_data[pos]): #acquisitions
for ant in corrected_data[pos][acq].keys(): # antennas
if n not in antennas[pos].keys():
antennas[pos].update({ant:acq})
combined_data={}
for i,pos in enumerate(antennas):
combined_data.update({pos:{}})
for n,ant in enumerate(antennas[pos]):
if ant not in combined_data[pos].keys():
combined_data[pos].update({ant:corrected_data[pos][antennas[pos][ant]][ant]})
#fold all antennas into the upper RH quadrant, mirror symmetry
mirror_combined_data={}
for i,pos in enumerate(combined_data):
mirror_combined_data.update({pos:{}})
for n,ant in enumerate(combined_data[pos]):
mirror_combined_data[pos].update({abs(ant):combined_data[pos][ant]})
for i,pos in enumerate(mirror_combined_data): #positions
if pos == 2:
for n,ant in enumerate(mirror_combined_data[pos]): # antennas
mirror_combined_data[pos][ant]*=np.exp(-1j*(phis[1][0][90]-phis[2][0][90]))
#for i,pos in enumerate(mirror_combined_data):
# for n,ant in enumerate(mirror_combined_data[pos]):
# print(np.sqrt(np.mean(np.real(mirror_combined_data[pos][ant])**2)),ant,pos)
#for i,pos in enumerate(mirror_combined_data):
# for n, ant in enumerate(mirror_combined_data[pos]):
# mirror_combined_data[pos][ant]=np.real(mirror_combined_data[pos][ant])
return mirror_combined_data,phis
def generate_array_data(combined_data,Nsamples=8200):
array_data={}
for i,pos in enumerate(combined_data):
if pos ==0 or pos==1:
array_data.update({pos:{}})
for n,ant in enumerate(combined_data[pos]):
if pos ==2:
continue
elif pos==0:
if ant != 90 and ant !=0:
array_data[pos].update({ant:combined_data[pos][ant]})
array_data[pos].update({-ant:combined_data[pos][ant]})
array_data[pos].update({180-ant:combined_data[pos][ant]})
array_data[pos].update({-180+ant:combined_data[pos][ant]})
elif ant ==90 or ant==0:
array_data[pos].update({ant:combined_data[pos][ant]})
array_data[pos].update({ant-180:combined_data[pos][ant]})
elif pos ==1:
if ant != 90 and ant !=0:
array_data[pos].update({ant:combined_data[pos][ant]})
array_data[pos].update({-ant:combined_data[pos][ant]})
array_data[pos].update({180-ant:combined_data[pos+1][ant]})
array_data[pos].update({-180+ant:combined_data[pos+1][ant]})
elif ant ==90: #don't double count 90 degrees
array_data[pos].update({ant:combined_data[pos][ant]})
array_data[pos].update({ant-180:combined_data[pos][ant]})
elif ant ==0: #don't double count 90 degrees
array_data[pos].update({ant:combined_data[pos][ant]})
array_data[pos].update({ant-180:combined_data[pos+1][ant]})
return array_data
def check_egg_slice(eggSlice):
data_grad=np.sqrt(np.gradient(np.real(eggSlice[0,:]))**2)
mean_data_grad=np.sqrt(np.mean(np.gradient(np.real(eggSlice[0,:]))**2))
zero_grad=np.where(data_grad==0)[0]
N_zero_grad=np.where(np.diff(zero_grad)==1)[0].size
if N_zero_grad>42:
return False
else:
return True
|
py | b40290ffe7e3f6d1fc1b1c7baa65203d45912040 | #-*- coding: utf-8 -*-
"""
Copyright (c) 2012 University of Oxford
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, --INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import logging
from rdfdatabank.lib.base import BaseController, render
class AboutController(BaseController):
def index(self):
return render('/about.html')
|
py | b40291a36efa94c6875bcc88f02f7c304674f0ca | # ____ _ ____ __ __
# / __ )_________ _(_)___ / __ \____ _ ______ / /___ ____ _____/ /
# / __ / ___/ __ `/ / __ \ / / / / __ \ | /| / / __ \/ / __ \/ __ `/ __ /
# / /_/ / / / /_/ / / / / / / /_/ / /_/ / |/ |/ / / / / / /_/ / /_/ / /_/ /
# /_____/_/ \__,_/_/_/ /_/ /_____/\____/|__/|__/_/ /_/_/\____/\__,_/\__,_/
#
# Version : 0.9.0.201222
# Code by : Anuradha Gunawardhana(LKBrilliant)
# Date : 2020.12.30
# Description : Record and save raw EEG data extracted from 5 channel Emotiv Insight headset while
# displaying images or playing audio clips of multiple object classes.
# Saved records contain...
# Experiments > Random image persentation
# > If needed change the data composition by changeing the image count
# > Random Audio playing
# > Left-Right Arrow Images
# > This type of imagesets should contain multiple classes of arrows and a single center image named as 'Center_001.JPG'
# > The GUI recognized this project by the phrase 'Arrow' in the folder name
# > The center image will be shown before every image
# > Visual Q&A
# > This kind of image-set should contain images form multiple classes Eg. 10 classes
# > The GUI recognize this type of image-set by the phrase 'Q&A' in the folder name
# > GUI will add a '?' to every class name and generate a question list Eg.for 10 classes -> 10 questions
# > When showing an image, a question is asked above the image to get a 'YES', 'NO' responce from the subject
# > For 10 classes the program can ask 1 'YES' responce question to 9 'NO' responce questions
# > But Asking the correct and incorrect question probability is set to 50%
#
# Limitations : > Projects of mixed audio and images does not support
from PyQt5 import QtCore, QtGui, QtWidgets, QtTest
from playsound import playsound
from eegExport_demo import RecordThread
from os import listdir
from os.path import isfile, isdir, join
import sys
import random
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
self.interval = 0
self.files = []
self.projectName = ""
self.recordingState = False
self.lastName = ""
self.extention = ""
self.audioProject = False
self.count = 0
self.comboTest = False
self.directionalProj = False
self.dirTempBool = True
self.VQAProj = False
self.centerImageName = 'Center_001.JPG'
self.directionalProjName = "Arrow"
self.VQAProjName = "Q&A"
self.questions = []
MainWindow.setObjectName("MainWindow")
MainWindow.resize(920, 910)
MainWindow.setStyleSheet("background-color: #212121;")
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout_2 = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout_2.setObjectName("gridLayout_2")
self.gridLayout_3 = QtWidgets.QGridLayout()
self.gridLayout_3.setObjectName("gridLayout_3")
self.lbl_class = QtWidgets.QLabel(self.centralwidget)
self.lbl_class.setMinimumSize(QtCore.QSize(0, 0))
self.lbl_class.setMaximumSize(QtCore.QSize(2000, 72))
font = QtGui.QFont()
# font.setFamily("Times New Roman")
font.setFamily("Nirmala UI")
font.setPointSize(28)
font.setBold(True)
font.setItalic(False)
font.setWeight(75)
self.lbl_class.setFont(font)
self.lbl_class.setStyleSheet("color: #ffffff;")
self.lbl_class.setAlignment(QtCore.Qt.AlignCenter)
self.lbl_class.setObjectName("lbl_class")
self.gridLayout_3.addWidget(self.lbl_class, 0, 0, 1, 6)
self.image = QtWidgets.QLabel(self.centralwidget)
self.image.setMaximumSize(QtCore.QSize(2000, 2000))
self.image.setText("")
self.image.setPixmap(QtGui.QPixmap("UI_graphics/brain_download.png"))
self.image.setScaledContents(False)
self.image.setAlignment(QtCore.Qt.AlignCenter)
self.image.setObjectName("image")
self.gridLayout_3.addWidget(self.image, 1, 0, 1, 6)
self.btn_main = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.btn_main.sizePolicy().hasHeightForWidth())
self.btn_main.setSizePolicy(sizePolicy)
self.btn_main.setMaximumSize(QtCore.QSize(200, 100))
self.btn_main.setMinimumSize(QtCore.QSize(170, 50))
self.btn_main.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.btn_main.setStyleSheet(open("style_sheets/button_start.qss","r").read())
self.btn_main.setObjectName("btn_main")
self.btn_main.clicked.connect(self.buttonPress)
self.gridLayout_3.addWidget(self.btn_main, 2, 5, 2, 1)
self.lbl_status = QtWidgets.QLabel(self.centralwidget)
self.lbl_status.setMaximumSize(QtCore.QSize(2000, 25))
self.lbl_status.setStyleSheet("color: #505050;")
self.lbl_status.setObjectName("lbl_status")
self.gridLayout_3.addWidget(self.lbl_status, 3, 0, 1, 1)
self.lbl_contactQuality = QtWidgets.QLabel(self.centralwidget)
self.lbl_contactQuality.setMaximumSize(QtCore.QSize(2000, 25))
self.lbl_contactQuality.setStyleSheet("color: #505050;")
self.lbl_contactQuality.setObjectName("lbl_contactQuality")
self.gridLayout_3.addWidget(self.lbl_contactQuality, 2, 0, 1, 1)
self.txt_subjectName = QtWidgets.QLineEdit(self.centralwidget)
self.txt_subjectName.setMinimumSize(QtCore.QSize(100, 0))
self.txt_subjectName.setMaximumSize(QtCore.QSize(150, 25))
self.txt_subjectName.setLayoutDirection(QtCore.Qt.LeftToRight)
self.txt_subjectName.setStyleSheet(open("style_sheets/lineEdit_default.qss","r").read())
self.txt_subjectName.setText("")
self.txt_subjectName.setMaxLength(15)
self.txt_subjectName.setAlignment(QtCore.Qt.AlignCenter)
self.txt_subjectName.setDragEnabled(False)
self.txt_subjectName.setObjectName("txt_subjectName")
self.gridLayout_3.addWidget(self.txt_subjectName, 2, 4, 1, 1)
self.comboBox = QtWidgets.QComboBox(self.centralwidget)
self.comboBox.setMaximumSize(QtCore.QSize(150, 25))
self.comboBox.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.comboBox.setContextMenuPolicy(QtCore.Qt.DefaultContextMenu)
self.comboBox.setLayoutDirection(QtCore.Qt.LeftToRight)
self.comboBox.setStyleSheet(open("style_sheets/comboBox_default.qss","r").read())
self.comboBox.setFrame(True)
self.comboBox.setObjectName("comboBox")
self.comboBox.currentIndexChanged.connect(self.comboChanged)
self.gridLayout_3.addWidget(self.comboBox, 3, 4, 1, 1)
self.lbl_projectName = QtWidgets.QLabel(self.centralwidget)
self.lbl_projectName.setMaximumSize(QtCore.QSize(120, 25))
self.lbl_projectName.setStyleSheet("color: rgb(80,80, 80);")
self.lbl_projectName.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.lbl_projectName.setObjectName("lbl_projectName")
self.gridLayout_3.addWidget(self.lbl_projectName, 3, 3, 1, 1)
self.lbl_subjectName = QtWidgets.QLabel(self.centralwidget)
self.lbl_subjectName.setMaximumSize(QtCore.QSize(100, 25))
self.lbl_subjectName.setStyleSheet("color: rgb(80,80, 80);")
self.lbl_subjectName.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.lbl_subjectName.setObjectName("lbl_subjectName")
self.gridLayout_3.addWidget(self.lbl_subjectName, 2, 3, 1, 1)
self.txt_interval = QtWidgets.QLineEdit(self.centralwidget)
self.txt_interval.setMinimumSize(QtCore.QSize(0, 0))
self.txt_interval.setMaximumSize(QtCore.QSize(60, 25))
self.txt_interval.setLayoutDirection(QtCore.Qt.LeftToRight)
self.txt_interval.setStyleSheet(open("style_sheets/lineEdit_default.qss","r").read())
self.txt_interval.setMaxLength(3)
self.txt_interval.setFrame(True)
self.txt_interval.setAlignment(QtCore.Qt.AlignCenter)
self.txt_interval.setDragEnabled(False)
self.txt_interval.setObjectName("txt_interval")
self.txt_interval.setToolTip('Period between two files')
self.gridLayout_3.addWidget(self.txt_interval, 2, 2, 1, 1)
self.lbl_interval = QtWidgets.QLabel(self.centralwidget)
self.lbl_interval.setMinimumSize(QtCore.QSize(120, 0))
self.lbl_interval.setStyleSheet("color: rgb(80,80, 80);")
self.lbl_interval.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.lbl_interval.setObjectName("lbl_interval")
self.gridLayout_3.addWidget(self.lbl_interval, 2, 1, 1, 1)
self.txt_count= QtWidgets.QLineEdit(self.centralwidget)
self.txt_count.setMinimumSize(QtCore.QSize(0, 0))
self.txt_count.setMaximumSize(QtCore.QSize(60, 25))
self.txt_count.setLayoutDirection(QtCore.Qt.LeftToRight)
self.txt_count.setStyleSheet(open("style_sheets/lineEdit_default.qss","r").read())
self.txt_count.setMaxLength(3)
self.txt_count.setAlignment(QtCore.Qt.AlignCenter)
self.txt_count.setDragEnabled(False)
self.txt_count.setObjectName("txt_count")
self.txt_count.setToolTip('Number of files for a recording')
self.gridLayout_3.addWidget(self.txt_count, 3, 2, 1, 1)
self.lbl_count= QtWidgets.QLabel(self.centralwidget)
self.lbl_count.setMinimumSize(QtCore.QSize(120, 0))
self.lbl_count.setStyleSheet("color: rgb(80,80, 80);")
self.lbl_count.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.lbl_count.setObjectName("lbl_count")
self.gridLayout_3.addWidget(self.lbl_count, 3, 1, 1, 1)
self.gridLayout_2.addLayout(self.gridLayout_3, 0, 0, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
self.record = RecordThread() # Initialize the Recording thread
self.record.status.connect(self.threadMsgHandler)
self.record.contactQuality.connect(self.CQHandler)
self.menuVisibility('all',False)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Brain Download"))
MainWindow.setWindowIcon(QtGui.QIcon('UI_graphics/logo.png'))
self.lbl_class.setText(_translate("MainWindow", ""))
self.btn_main.setText(_translate("MainWindow", "Start"))
self.lbl_status.setText(_translate("MainWindow", "Status:"))
self.lbl_contactQuality.setText(_translate("MainWindow", ""))
self.lbl_subjectName.setText(_translate("MainWindow", "Subject Name: "))
self.lbl_projectName.setText(_translate("MainWindow", "Project Name: "))
self.txt_interval.setText(_translate("MainWindow", "2"))
self.lbl_interval.setText(_translate("MainWindow", "Interval(s):"))
self.txt_count.setText(_translate("MainWindow", "20"))
self.lbl_count.setText(_translate("MainWindow", "Count:"))
def threadMsgHandler(self,result):
self.statusUpdate(result)
if result == "Initializing":
self.btn_main.setText("Initializing")
self.btn_main.setStyleSheet(open("style_sheets/button_initializing.qss","r").read())
elif result == "Initialization: Successful":
self.btn_main.setText("Start Recording")
self.btn_main.setStyleSheet(open("style_sheets/button_startRecording.qss","r").read())
self.menuVisibility('all',True)
self.comboBoxUpdated = self.updateComboBox()
elif result == "Initialization: Failed":
self.btn_main.setText("Try again")
self.btn_main.setStyleSheet(open("style_sheets/button_start.qss","r").read())
elif result == "Recording":
self.btn_main.setText("Cancel")
self.btn_main.setStyleSheet(open("style_sheets/button_stop.qss","r").read())
self.recordingState = True
self.sequencing()
def CQHandler(self,result):
self.lbl_contactQuality.setText("Contact Quality: {}%".format(result))
def buttonPress(self):
btn_text = self.btn_main.text()
if btn_text == "Start":
self.record.start() # start the thread
elif btn_text == "Cancel":
self.record.saving = False # Cancel and don't save the recording
self.stopRecording()
elif btn_text == "Start Recording":
self.record.contactTest = False
if self.prerequisiteTest():
self.interval = float(self.txt_interval.text())
self.record.interval = self.interval
self.count = int(self.txt_count.text())
self.record.count = self.count
self.record.audioProject = self.audioProject
self.getFileList()
self.record.startRecording = True # start headset data collection
self.record.start() # start the thread
self.menuVisibility('all',False)
elif btn_text == "Try again":
self.btn_main.setStyleSheet(open("style_sheets/button_initializing.qss","r").read())
self.btn_main.setText("Initializing")
self.record.start() # start the thread
def stopRecording(self):
self.btn_main.setStyleSheet(open("style_sheets/button_startRecording.qss","r").read())
self.btn_main.setText("Start Recording")
self.recordingState = False # Stop sequencing of images
self.record.startRecording = False # stop headset data collection
self.menuVisibility('all',True)
self.record.marker = ""
self.image.setPixmap(QtGui.QPixmap("UI_graphics/brain_download.png"))
self.lbl_class.setText("")
self.record.contactTest = True
QtTest.QTest.qWait(2000) # delay some time before starting the contact quality test again
self.record.start()
def sequencing(self):
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.nextFile)
self.timer.start(self.interval*1000)
def nextFile(self):
if self.recordingState:
fileName = random.choice(self.files)
while fileName == self.lastName: # avoid consecutive same file selection
fileName = random.choice(self.files)
self.lastName = fileName
filePath = "Data/{}/{}".format(self.projectName,fileName)
self.record.tick = not self.record.tick
className = fileName.split('_')[0]
if not self.VQAProj and not self.directionalProj: self.record.marker = className # Insert markers only if the projects are not 'VQ&A' or 'Arrow'
if self.count != 0:
if self.audioProject: playsound(filePath) # When Audio project selected
elif self.directionalProj: # If the directional(Arrow) project was selected
if self.dirTempBool:
self.record.marker = 'Center'
centerImgPath = "Data/{}/{}".format(self.projectName,self.centerImageName)
self.dirTempBool = False
self.image.setPixmap(QtGui.QPixmap(centerImgPath))
else:
self.record.marker = className
self.image.setPixmap(QtGui.QPixmap(filePath))
self.dirTempBool = True
elif self.VQAProj:
wrong = [x for x in self.questions if not className in x] # Get what can be ask as a wrong question
q = random.choice(wrong) if random.choice([1,0]) else className+'?' # Get the correct probability for asking correct and incorrect question
self.lbl_class.setText(q) # Ask the chosen question
self.image.setPixmap(QtGui.QPixmap(filePath))
self.record.marker = 'YES' if className in q else 'NO'
else: self.image.setPixmap(QtGui.QPixmap(filePath))
else:
self.record.saving = True # Stop and save the recording
self.stopRecording()
self.count -= 1
def statusUpdate(self,message):
self.lbl_status.setText("Status: {}".format(message))
def isFloat(self,num):
try :
float(num)
return True
except :
return False
def prerequisiteTest(self):
if self.txt_subjectName.text() != "" and self.txt_interval.text() != "" and self.txt_count.text() != "" and self.comboBoxUpdated:
self.txt_subjectName.setStyleSheet(open("style_sheets/lineEdit_default.qss","r").read())
self.txt_interval.setStyleSheet(open("style_sheets/lineEdit_default.qss","r").read())
self.txt_count.setStyleSheet(open("style_sheets/lineEdit_default.qss","r").read())
self.comboBox.setStyleSheet(open("style_sheets/comboBox_default.qss","r").read())
self.record.subjectName = self.txt_subjectName.text()
check = True
if self.txt_subjectName.text() == "":
self.statusUpdate("Error:Fill the required fields")
self.txt_subjectName.setStyleSheet(open("style_sheets/lineEdit_warning.qss","r").read())
check = False
if not self.isFloat(self.txt_interval.text()):
self.statusUpdate("Error:Fill the required fields")
self.txt_interval.setStyleSheet(open("style_sheets/lineEdit_warning.qss","r").read())
check = False
if not(self.txt_count.text().isdigit()):
self.statusUpdate("Error:Fill the required fields")
self.txt_count.setStyleSheet(open("style_sheets/lineEdit_warning.qss","r").read())
check = False
if not self.comboBoxUpdated:
self.statusUpdate("Error:Add project folders to the 'Data' directory")
self.comboBox.setStyleSheet(open("style_sheets/comboBox_warning.qss","r").read())
check = False
if not self.comboTest:
self.statusUpdate("Error:Support extentions ['jpg','png','wav','mp3']")
self.comboBox.setStyleSheet(open("style_sheets/comboBox_warning.qss","r").read())
check = False
if check == True: return True
else: return False
def updateComboBox(self):
path = "Data"
dirs = listdir(path)
if len(dirs) == 0:
self.statusUpdate("Error:No Projects Found")
self.comboBox.setStyleSheet(open("style_sheets/comboBox_warning.qss","r").read())
return False
else:
self.comboBox.addItems(dirs)
return True
def getFileList(self):
path = "Data/{}".format(self.projectName)
self.files = [f for f in listdir(path) if isfile(join(path, f))]
justText = [i.split('_')[0] for i in self.files] # Get just the text from all files names
uniqueText = list(set(justText)) # Get all unique names from the files
uniqueText.sort() # Sort to make the order always the same
if self.VQAProj:
self.record.classes = ['YES','NO']
self.questions = ["{}{}".format(i,'?') for i in uniqueText] # Add a question mark '?' at the end of each class name
else: self.record.classes = uniqueText
def menuVisibility(self,section,state):
if section == 'all':
self.lbl_subjectName.setVisible(state)
self.lbl_interval.setVisible(state)
self.lbl_count.setVisible(state)
self.lbl_projectName.setVisible(state)
self.txt_subjectName.setVisible(state)
self.txt_interval.setVisible(state)
self.txt_count.setVisible(state)
self.comboBox.setVisible(state)
if section == 'part':
self.lbl_interval.setVisible(state)
self.txt_interval.setVisible(state)
self.lbl_count.setVisible(state)
self.txt_count.setVisible(state)
def comboChanged(self):
self.directionalProj = False
self.VQAProj = False
self.projectName = self.comboBox.currentText()
self.record.projectName = self.comboBox.currentText()
path = "Data/{}".format(self.projectName)
self.files = [f for f in listdir(path) if isfile(join(path, f))]
extensions = [i.split('.')[1] for i in self.files] # Get just the extentions from all files names
uniqueExt = [x.lower() for x in list(set(extensions))] # list of all unique extentions in lower case
self.statusUpdate("Initialization: Successful")
self.comboBox.setStyleSheet(open("style_sheets/comboBox_default.qss","r").read())
self.menuVisibility('all',True)
if len(uniqueExt)==0:
self.statusUpdate("Error:Empty Folder")
self.comboBox.setStyleSheet(open("style_sheets/comboBox_warning.qss","r").read())
self.menuVisibility('part',False)
self.comboTest = False
elif set(uniqueExt) <= set(['mp3','wav']): # check if uniqueExt is a subset of ['mp3,'wav']
self.audioProject = True
self.comboTest = True
elif set(uniqueExt) <= set(['jpg','png','txt']):
self.audioProject = False
self.comboTest = True
if self.directionalProjName in self.projectName:
self.directionalProj = True
self.files.remove(self.centerImageName) # Remove the Center dot image from the arrow list
if self.VQAProjName in self.projectName:
self.VQAProj = True
else:
self.statusUpdate("Error:No Audio/Image Files")
self.comboBox.setStyleSheet(open("style_sheets/comboBox_warning.qss","r").read())
self.menuVisibility('part',False)
self.comboTest = False
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_()) |
py | b40291dcb27ec44fd10086da94992d6079c4fb64 | import dragonfly as df
import constants
class Item:
def __init__(self, name: str, commands=None):
self.name = name
if commands is None:
self.commands = self.commands_from_name(name)
else:
self.commands = commands
def commands_from_name(self, name: str):
# 'Wood Fence' -> ['wood fence']
return [name.lower()]
craftable_items = (
Item("Ancient Seeds"),
Item("Bait"),
Item("Barbed Hook"),
Item("Barrel Brazier"),
Item("Basic Retaining Soil"),
Item("Basic Fertilizer"),
Item("Bee House"),
Item("Bomb"),
Item("Bone Mill"),
Item("Brick Floor"),
Item("Bug Steak"),
Item("Campfire"),
Item("Carved Brazier"),
Item("Cask"),
Item("Charcoal Kiln"),
Item("Cheese Press"),
Item("Cherry Bomb"),
Item("Chest"),
Item("Cobblestone Path"),
Item("Cookout Kit"),
Item("Cork Bobber"),
Item("Crab Pot"),
Item("Crystalarium"),
Item("Crystal Floor"),
Item("Crystal Path"),
Item("Dark Sign"),
Item("Deluxe Fertilizer"),
Item("Deluxe Retaining Soil"),
Item("Deluxe Scarecrow"),
Item("Deluxe Speed-Gro", ["deluxe speed grow"]),
Item("Dressed Spinner"),
Item("Drum Block"),
Item("Explosive Ammo"),
Item("Fairy Dust"),
Item("Farm Computer"),
Item("Fiber Seeds"),
Item("Field Snack"),
Item("Flute Block"),
Item("Garden Pot"),
Item("Geode Crusher"),
Item("Glowstone Ring"),
Item("Gold Bar"),
Item("Gold Brazier"),
Item("Grass Starter"),
Item("Gravel Path"),
Item("Hardwood Fence"),
Item("Hopper"),
Item("Heavy Tapper"),
Item("Hyper Speed-Gro", ["hyper speed grow"]),
Item("Iridium Band"),
Item("Iridium Sprinkler"),
Item("Iron Bar"),
Item("Iron Fence"),
Item("Iron Lamp-post"),
Item("Jack-O-Lantern", ["jack-o-lantern"]),
Item("Keg"),
Item("Life Elixir"),
Item("Lightning Rod"),
Item("Loom"),
Item("Magic Bait"),
Item("Magnet"),
Item("Marble Brazier"),
Item("Mayonnaise Machine"),
Item("Mega Bomb"),
Item("Mini-Jukebox", ["mini jukebox"]),
Item("Mini-Obelisk", ["mini obelisk"]),
Item("Monster Musk"),
Item("Oil Maker"),
Item("Oil Of Garlic"),
Item("Ostrich Incubator"),
Item("Quality Bobber"),
Item("Quality Fertilizer"),
Item("Quality Retaining Soil"),
Item("Quality Sprinkler"),
Item("Preserves Jar"),
Item("Rain Totem"),
Item("Recycling Machine"),
Item("Ring of Yoba"),
Item("Rustic Plank Floor"),
Item("Scarecrow"),
Item("Seed Maker"),
Item("Skull Brazier"),
Item("Slime Egg-Press", ["slime egg press"]),
Item("Slime Incubator"),
Item("Solar Panel"),
Item("Speed-Gro", ["speed grow"]),
Item("Spinner"),
Item("Sprinkler"),
Item("Staircase"),
Item("Stepping Stone Path"),
Item("Stone Brazier"),
Item("Stone Chest"),
Item("Stone Fence"),
Item("Stone Floor"),
Item("Stone Walkway Floor"),
Item("Stone Sign"),
Item("Straw Floor"),
Item("Stump Brazier"),
Item("Sturdy Ring"),
Item("Tapper"),
Item("Tea Sapling"),
Item("Thorns Ring"),
Item("Torch"),
Item("Trap Bobber"),
Item("Treasure Hunter"),
Item("Tree Fertilizer"),
Item("Tub o' Flowers", ["tub [of | o] flowers"]),
Item("Warp Totem Beach", ["warp totem beach", "beach warp totem"]),
Item("Warp Totem Farm", ["warp totem farm", "farm warp totem"]),
Item("Warp Totem Island", ["warp totem island", "island warp totem"]),
Item("Warp Totem Mountains", ["warp totem mountains", "mountains warp totem"]),
Item("Warrior Ring"),
Item("Weathered Floor"),
Item("Wicked Statue"),
Item("Wild Bait"),
Item("Wild Seeds (Fa)", ["fall wild seeds", "wild fall seeds", "wild seeds fall"]),
Item("Wild Seeds (Sp)", ["spring wild seeds", "wild spring seeds", "wild seeds spring"]),
Item("Wild Seeds (Su)", ["summer wild seeds", "wild summer seeds", "wild seeds summer"]),
Item("Wild Seeds (Wi)", ["winter wild seeds", "wild winter seeds", "wild seeds winter"]),
Item("Worm Bin"),
Item("Wood Fence"),
Item("Wood Floor"),
Item("Wood Lamp-post"),
Item("Wood Path"),
Item("Wood Sign"),
Item("Wooden Brazier"),
)
tools = (
Item(constants.AXE),
Item(constants.FISHING_ROD, ["fishing (rod | pole)"]),
Item(constants.HOE),
Item(constants.PICKAXE),
Item(constants.SCYTHE),
Item(constants.WATERING_CAN),
Item(constants.SHEARS),
Item(constants.MILK_PAIL),
Item(constants.PAN),
)
other_items = (
Item("Small Glow Ring"),
Item("Glow Ring"),
Item("Small Magnet Ring"),
Item("Magnet Ring"),
Item("Slime Charmer Ring"),
Item("Vampire Ring"),
Item("Savage Ring"),
Item("Yoba Ring"),
Item("Sturdy Ring"),
Item("Burglars Ring"),
Item("Iridium Band"),
Item("Jukebox Ring"),
Item("Amethyst Ring"),
Item("Topaz Ring"),
Item("Aquamarine Ring"),
Item("Jade Ring"),
Item("Emerald Ring"),
Item("Ruby Ring"),
Item("Cowboy Hat"),
Item("Bowler Hat"),
Item("Top Hat"),
Item("Sombrero"),
Item("Straw Hat"),
Item("Official Cap"),
Item("Blue Bonnet"),
Item("Plum Chapeau"),
Item("Skeleton Mask"),
Item("Goblin Mask"),
Item("Chicken Mask"),
Item("Earmuffs"),
Item("Delicate Bow"),
Item("Tropiclip"),
Item("Butterfly Bow"),
Item("Hunters Cap"),
Item("Trucker Hast"),
Item("Sailors Cap"),
Item("Good Ol Cap"),
Item("Fedora"),
Item("Cool Cap"),
Item("Lucky Bow"),
Item("Polka Bow"),
Item("Gnomes Cap"),
Item("Eye Patch"),
Item("Santa Hat"),
Item("Tiara"),
Item("Hard Hat"),
Item("Souwester"),
Item("Daisy"),
Item("Watermelon Band"),
Item("Mouse Ears"),
Item("Cat Ears"),
Item("Cowgal Hat"),
Item("Cowpoke Hat"),
Item("Archers Cap"),
Item("Panda Hat"),
Item("Blue Cowboy Hat"),
Item("Red Cowboy Hat"),
Item("Cone Hat"),
Item("Sneakers"),
Item("Rubber Boots"),
Item("Leather Boots"),
Item("Work Boots"),
Item("Combat Boots"),
Item("Tundra Boots"),
Item("Thermal Boots"),
Item("Dark Boots"),
Item("Firewalker Boots"),
Item("Genie Shoes"),
Item("Space Boots"),
Item("Cowboy Boots"),
Item("House Plant"),
Item("Keg"),
Item("Furnace"),
Item("Table Piece Left"),
Item("Table Piece Right"),
Item("Mayonnaise Machine"),
Item("Seed Maker"),
Item("Wood Chair"),
Item("Skeleton Model"),
Item("Obelisk"),
Item("Chicken Statue"),
Item("Stone Cairn"),
Item("Suit Of Armor"),
Item("Sign Of The Vessel"),
Item("Basic Log"),
Item("Lawn Flamingo"),
Item("Big Green Cane"),
Item("Green Canes"),
Item("Mixed Cane"),
Item("Red Canes"),
Item("Big Red Cane"),
Item("Ornamental Hay Bale"),
Item("Log Section"),
Item("Grave Stone"),
Item("Seasonal Decor"),
Item("Stone Frog"),
Item("Stone Parrot"),
Item("Stone Owl"),
Item("Stone Junimo"),
Item("Slime Ball"),
Item("Garden Pot"),
Item("Bookcase"),
Item("Fancy Table"),
Item("Ancient Table"),
Item("Ancient Stool"),
Item("Grandfather Clock"),
Item("Teddy Timer"),
Item("Dead Tree"),
Item("Staircase"),
Item("Tall Torch"),
Item("Ritual Mask"),
Item("Bonfire"),
Item("Bongo"),
Item("Decorative Spears"),
Item("Boulder"),
Item("Door"),
Item("Locked Door"),
Item("Wicked Statue"),
Item("Sloth Skeleton Left"),
Item("Sloth Skeleton Middle"),
Item("Sloth Skeleton Right"),
Item("Standing Geode"),
Item("Obsidian Vase"),
Item("Crystal Chair"),
Item("Singing Stone"),
Item("Strange Capsule"),
Item("Empty Capsule"),
Item("Feed Hopper"),
Item("Incubator"),
Item("Heater"),
Item("Tapper"),
Item("Camera"),
Item("Plush Bunny"),
Item("Rarecrow"),
Item("Decorative Pitcher"),
Item("Dried Sunflower Seeds"),
Item("Stardew Hero Trophy"),
Item("Soda Machine"),
Item("Barrel"),
Item("Crate"),
Item("Statue Of Endless Fortune"),
Item("Mushroom Box"),
Item("Praire King Arcade System"),
Item("Campfire"),
Item("Slime Incubator"),
Item("Slime Egg Press"),
Item("Junimo Kart Arcade System"),
Item("Statue Of Perfection"),
Item("Pinky Lemon"),
Item("Foroguemon"),
Item("Cask"),
Item("Solid Gold Lewis"),
Item("Auto Grabber"),
Item("Seasonal Plant"),
Item("Weeds"),
Item("Stone"),
Item("Wild Horseradish"),
Item("Daffodil"),
Item("Leek"),
Item("Dandelion"),
Item("Parsnip"),
Item("Lumber"),
Item("Emerald"),
Item("Aquamarine"),
Item("Ruby"),
Item("Amethyst"),
Item("Topaz"),
Item("Jade"),
Item("Diamond"),
Item("Prismatic Shard"),
Item("Cave Carrot"),
Item("Secret Note"),
Item("Quartz"),
Item("Fire Quartz"),
Item("Frozen Tear"),
Item("Earth Crystal"),
Item("Coconut"),
Item("Cactus Fruit"),
Item("Sap"),
Item("Torch"),
Item("Spirit Torch"),
Item("Dwarf Scroll I", ["dwarf scroll one"]),
Item("Dwarf Scroll II", ["dwarf scroll two"]),
Item("Dwarf Scroll III", ["dwarf scroll three"]),
Item("Dwarf Scroll IV", ["dwarf scroll four"]),
Item("Chipped Amphora"),
Item("Arrowhead"),
Item("Lost Book"),
Item("Ancient Doll"),
Item("Elvish Jewelry"),
Item("Chewing Stick"),
Item("Ornamental Fan"),
Item("Dinosaur Egg"),
Item("Rare Disc"),
Item("Ancient Sword"),
Item("Rusty Spoon"),
Item("Rusty Spur"),
Item("Rusty Cog"),
Item("Ancient Seed"),
Item("Prehistoric Tool"),
Item("Dried Starfish"),
Item("Anchor"),
Item("Glass Shards"),
Item("Bone Flute"),
Item("Prehistoric Handaxe"),
Item("Dwarvish Helm"),
Item("Dwarf Gadget"),
Item("Ancient Drum"),
Item("Golden Mask"),
Item("Golden Relic"),
Item("Strange Doll"),
Item("Pufferfish"),
Item("Anchovy"),
Item("Tuna"),
Item("Sardine"),
Item("Bream"),
Item("Largemouth Bass"),
Item("Smallmouth Bass"),
Item("Rainbow Trout"),
Item("Salmon"),
Item("Walleye"),
Item("Perch"),
Item("Carp"),
Item("Catfish"),
Item("Pike"),
Item("Sunfish"),
Item("Red Mullet"),
Item("Herring"),
Item("Eel"),
Item("Octopus"),
Item("Red Snapper"),
Item("Squid"),
Item("Seaweed"),
Item("Green Algae"),
Item("Sea Cucumber"),
Item("Super Cucumber"),
Item("Ghostfish"),
Item("White Algae"),
Item("Stonefish"),
Item("Crimsonfish"),
Item("Angler"),
Item("Ice Pip"),
Item("Lava Eel"),
Item("Legend"),
Item("Sandfish"),
Item("Scorpion Carp"),
Item("Treasure Chest"),
Item("Joja Cola"),
Item("Trash"),
Item("Driftwood"),
Item("Broken Glasses"),
Item("Broken CD"),
Item("Soggy Newspaper"),
Item("Large Egg"),
Item("Egg"),
Item("Hay"),
Item("Milk"),
Item("Large Milk"),
Item("Goat Milk"),
Item("L. Goat Milk", ["large goat milk"]),
Item("Green Bean"),
Item("Cauliflower"),
Item("Potato"),
Item("Fried Egg"),
Item("Omelet"),
Item("Salad"),
Item("Cheese Cauliflower"),
Item("Baked Fish"),
Item("Parsnip Soup"),
Item("Vegetable Medley"),
Item("Complete Breakfast"),
Item("Fried Calamari"),
Item("Strange Bun"),
Item("Lucky Lunch"),
Item("Fried Mushroom"),
Item("Pizza"),
Item("Bean Hotpot"),
Item("Glazed Yams"),
Item("Carp Surprise"),
Item("Hashbrowns"),
Item("Pancakes"),
Item("Salmon Dinner"),
Item("Fish Taco"),
Item("Crispy Bass"),
Item("Pepper Poppers"),
Item("Bread"),
Item("Tom Kha Soup"),
Item("Trout Soup"),
Item("Chocolate Cake"),
Item("Pink Cake"),
Item("Rhubarb Pie"),
Item("Cookie"),
Item("Spaghetti"),
Item("Fried Eel"),
Item("Spicy Eel"),
Item("Sashimi"),
Item("Maki Roll"),
Item("Tortilla"),
Item("Red Plate"),
Item("Eggplant Parmesan"),
Item("Rice Pudding"),
Item("Ice Cream"),
Item("Blueberry Tart"),
Item("Autumns Bounty"),
Item("Pumpkin Soup"),
Item("Super Meal"),
Item("Cranberry Sauce"),
Item("Stuffing"),
Item("Farmers Lunch"),
Item("Survival Burger"),
Item("Dish OThe Sea"),
Item("Miners Treat"),
Item("Roots Platter"),
Item("Sugar"),
Item("Wheat Flour"),
Item("Oil"),
Item("Garlic"),
Item("Kale"),
Item("Rhubarb"),
Item("Melon"),
Item("Tomato"),
Item("Morel"),
Item("Blueberry"),
Item("Fiddlehead Fern"),
Item("Hot Pepper"),
Item("Wheat"),
Item("Radish"),
Item("Red Cabbage"),
Item("Starfruit"),
Item("Corn"),
Item("Eggplant"),
Item("Artichoke"),
Item("Pumpkin"),
Item("Bok Choy"),
Item("Yam"),
Item("Chanterelle"),
Item("Cranberries"),
Item("Holly"),
Item("Beet"),
Item("Cherry Bomb"),
Item("Bomb"),
Item("Mega Bomb"),
Item("Twig"),
Item("Salmonberry"),
Item("Grass Starter"),
Item("Amaranth Seeds"),
Item("Amaranth"),
Item("Grape Starter"),
Item("Hops Starter"),
Item("Pale Ale"),
Item("Hops"),
Item("Void Egg"),
Item("Mayonnaise"),
Item("Duck Mayonnaise"),
Item("Void Mayonnaise"),
Item("Acorn"),
Item("Maple Seed"),
Item("Pine Cone"),
Item("Dwarvish Translation Guide"),
Item("Clay"),
Item("Copper Bar"),
Item("Iron Bar"),
Item("Gold Bar"),
Item("Iridium Bar"),
Item("Refined Quartz"),
Item("Honey"),
Item("Tea Set"),
Item("Pickles"),
Item("Jelly"),
Item("Beer"),
Item("Rare Seed"),
Item("Wine"),
Item("Energy Tonic"),
Item("Juice"),
Item("Muscle Remedy"),
Item("Clam"),
Item("Golden Pumpkin"),
Item("Poppy"),
Item("Copper Ore"),
Item("Iron Ore"),
Item("Coal"),
Item("Gold Ore"),
Item("Iridium Ore"),
Item("Wood"),
Item("Nautilus Shell"),
Item("Coral"),
Item("Rainbow Shell"),
Item("Coffee"),
Item("Spice Berry"),
Item("Sea Urchin"),
Item("Grape"),
Item("Spring Onion"),
Item("Strawberry"),
Item("Straw Floor"),
Item("Sweet Pea"),
Item("Field Snack"),
Item("Common Mushroom"),
Item("Wood Path"),
Item("Wild Plum"),
Item("Gravel Path"),
Item("Hazelnut"),
Item("Crystal Path"),
Item("Blackberry"),
Item("Cobblestone Path"),
Item("Winter Root"),
Item("Blue Slime Egg"),
Item("Crystal Fruit"),
Item("Stepping Stone Path"),
Item("Snow Yam"),
Item("Sweet Gem Berry"),
Item("Crocus"),
Item("Vinegar"),
Item("Red Mushroom"),
Item("Sunflower"),
Item("Purple Mushroom"),
Item("Rice"),
Item("Cheese"),
Item("Fairy Seeds"),
Item("Goat Cheese"),
Item("Tulip Bulb"),
Item("Cloth"),
Item("Jazz Seeds"),
Item("Truffle"),
Item("Sunflower Seeds"),
Item("Truffle Oil"),
Item("Coffee Bean"),
Item("Stardrop"),
Item("Red Slime Egg"),
Item("Purple Slime Egg"),
Item("Wool"),
Item("Explosive Ammo"),
Item("Duck Egg"),
Item("Duck Feather"),
Item("Rabbits Foot"),
Item("Stone Base"),
Item("Poppy Seeds"),
Item("Ancient Fruit"),
Item("Spangle Seeds"),
Item("Algae Soup"),
Item("Pale Broth"),
Item("Bouquet"),
Item("Mead"),
Item("Mermaids Pendant"),
Item("Decorative Pot"),
Item("Drum Block"),
Item("Flute Block"),
Item("Speed Gro"),
Item("Deluxe Speed Gro"),
Item("Parsnip Seeds"),
Item("Bean Starter"),
Item("Cauliflower Seeds"),
Item("Potato Seeds"),
Item("Garlic Seeds"),
Item("Kale Seeds"),
Item("Rhubarb Seeds"),
Item("Melon Seeds"),
Item("Tomato Seeds"),
Item("Blueberry Seeds"),
Item("Pepper Seeds"),
Item("Wheat Seeds"),
Item("Radish Seeds"),
Item("Red Cabbage Seeds"),
Item("Starfruit Seeds"),
Item("Corn Seeds"),
Item("Eggplant Seeds"),
Item("Artichoke Seeds"),
Item("Pumpkin Seeds"),
Item("Bok Choy Seeds"),
Item("Yam Seeds"),
Item("Cranberry Seeds"),
Item("Beet Seeds"),
Item("Spring Seeds"),
Item("Summer Seeds"),
Item("Fall Seeds"),
Item("Winter Seeds"),
Item("Ancient Seeds"),
Item("Geode"),
Item("Frozen Geode"),
Item("Magma Geode"),
Item("Alamite"),
Item("Bixite"),
Item("Baryte"),
Item("Aerinite"),
Item("Calcite"),
Item("Dolomite"),
Item("Esperite"),
Item("Fluorapatite"),
Item("Geminite"),
Item("Helvite"),
Item("Jamborite"),
Item("Jagoite"),
Item("Kyanite"),
Item("Lunarite"),
Item("Malachite"),
Item("Neptunite"),
Item("Lemon Stone"),
Item("Nekoite"),
Item("Orpiment"),
Item("Petrified Slime"),
Item("Thunder Egg"),
Item("Pyrite"),
Item("Ocean Stone"),
Item("Ghost Crystal"),
Item("Tigerseye"),
Item("Jasper"),
Item("Opal"),
Item("Fire Opal"),
Item("Celestine"),
Item("Marble"),
Item("Sandstone"),
Item("Granite"),
Item("Basalt"),
Item("Limestone"),
Item("Soapstone"),
Item("Hematite"),
Item("Mudstone"),
Item("Obsidian"),
Item("Slate"),
Item("Fairy Stone"),
Item("Star Shards"),
Item("Prehistoric Scapula"),
Item("Prehistoric Tibia"),
Item("Prehistoric Skull"),
Item("Skeletal Hand"),
Item("Prehistoric Rib"),
Item("Prehistoric Vertebra"),
Item("Skeletal Tail"),
Item("Nautilus Fossil"),
Item("Amphibian Fossil"),
Item("Palm Fossil"),
Item("Trilobite"),
Item("Artifact Spot"),
Item("Tulip"),
Item("Summer Spangle"),
Item("Fairy Rose"),
Item("Blue Jazz"),
Item("Plum Pudding"),
Item("Artichoke Dip"),
Item("Stir Fry"),
Item("Roasted Hazelnuts"),
Item("Pumpkin Pie"),
Item("Radish Salad"),
Item("Fruit Salad"),
Item("Blackberry Cobbler"),
Item("Cranberry Candy"),
Item("Apple"),
Item("Bruschetta"),
Item("Cherry Sapling"),
Item("Apricot Sapling"),
Item("Orange Sapling"),
Item("Peach Sapling"),
Item("Pomegranate Sapling"),
Item("Apple Sapling"),
Item("Apricot"),
Item("Orange"),
Item("Peach"),
Item("Pomegranate"),
Item("Cherry"),
Item("Coleslaw"),
Item("Fiddlehead Risotto"),
Item("Poppyseed Muffin"),
Item("Green Slime Egg"),
Item("Mutant Carp"),
Item("Bug Meat"),
Item("Bait"),
Item("Sturgeon"),
Item("Tiger Trout"),
Item("Bullhead"),
Item("Tilapia"),
Item("Chub"),
Item("Magnet"),
Item("Dorado"),
Item("Albacore"),
Item("Shad"),
Item("Lingcod"),
Item("Halibut"),
Item("Hardwood"),
Item("Lobster"),
Item("Crayfish"),
Item("Crab"),
Item("Cockle"),
Item("Mussel"),
Item("Shrimp"),
Item("Snail"),
Item("Periwinkle"),
Item("Oyster"),
Item("Maple Syrup"),
Item("Oak Resin"),
Item("Pine Tar"),
Item("Chowder"),
Item("Fish Stew"),
Item("Escargot"),
Item("Lobster Bisque"),
Item("Maple Bar"),
Item("Crab Cakes"),
Item("Woodskip"),
Item("Strawberry Seeds"),
Item("Rotten Plant"),
Item("Omni Geode"),
Item("Slime"),
Item("Bat Wing"),
Item("Solar Essence"),
Item("Void Essence"),
Item("Mixed Seeds"),
Item("Fiber"),
Item("Life Elixir"),
Item("Wild Bait"),
Item("Glacierfish"),
Item("Battery Pack"),
Item("Lost Axe"),
Item("Lucky Purple Shorts"),
Item("Berry Basket"),
)
def item_commands(items):
commands = {}
for item in items:
for cmd in item.commands:
if cmd in commands:
raise RuntimeError(f"Duplicate item {cmd}")
commands[cmd] = item
return commands
craftable_commands = item_commands(craftable_items)
other_item_commands = item_commands(other_items)
tool_commands = item_commands(tools)
craftable_items_choice = df.Choice("craftable_items", craftable_commands)
items_choice = df.Choice("items", {**craftable_commands, **other_item_commands, **tool_commands})
|
py | b40291ea5ce5c7c2b3c5b71c360c644993d8732a | # $Id$
#
# Copyright (C) 2003-2006 Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
from rdkit import six
from rdkit import RDConfig
if hasattr(RDConfig, "usePgSQL") and RDConfig.usePgSQL:
from pyPgSQL import PgSQL
# as of this writing (March 2004), this results in a speedup in
# getting results back from the wrapper:
PgSQL.fetchReturnsList = 1
from pyPgSQL.PgSQL import *
sqlTextTypes = [PG_CHAR, PG_BPCHAR, PG_TEXT, PG_VARCHAR, PG_NAME]
sqlIntTypes = [PG_INT8, PG_INT2, PG_INT4]
sqlFloatTypes = [PG_FLOAT4, PG_FLOAT8]
sqlBinTypes = [PG_OID, PG_BLOB, PG_BYTEA]
getTablesSql = """select tablename from pg_tables where schemaname='public'"""
getTablesAndViewsSql = """SELECT c.relname as "Name"
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_user u ON u.usesysid = c.relowner
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE c.relkind IN ('r','v','S','')
AND n.nspname NOT IN ('pg_catalog', 'pg_toast')
AND pg_catalog.pg_table_is_visible(c.oid)
"""
getDbSql = """ select datname from pg_database where datallowconn """
fileWildcard = None
placeHolder = '%s'
binaryTypeName = "bytea"
binaryHolder = PgBytea
RDTestDatabase = "::RDTests"
elif hasattr(RDConfig, "useSqlLite") and RDConfig.useSqlLite:
try:
import sqlite3 as sqlite
#from sqlite3 import *
except ImportError:
from pysqlite2 import dbapi2 as sqlite
#from pysqlite2 import *
sqlTextTypes = []
sqlIntTypes = []
sqlFloatTypes = []
sqlBinTypes = []
getTablesSql = """select name from SQLite_Master where type='table'"""
getTablesAndViewsSql = """select name from SQLite_Master where type in ('table','view')"""
getDbSql = None
dbFileWildcard = '*.sqlt'
placeHolder = '?'
binaryTypeName = "blob"
binaryHolder = memoryview if six.PY3 else buffer
connect = lambda x, *args: sqlite.connect(x)
else:
raise ImportError("Neither sqlite nor PgSQL support found.")
|
py | b4029228176712b49c1b29609dad3e6ae8a1f5f8 | # Copyright 2014-2016 OpenMarket Ltd
# Copyright 2018-2019 New Vector Ltd
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import logging
from collections import deque
from typing import (
TYPE_CHECKING,
Any,
Awaitable,
Callable,
Collection,
Deque,
Dict,
Generic,
Iterable,
List,
Optional,
Set,
Tuple,
TypeVar,
)
import attr
from prometheus_client import Counter, Histogram
from twisted.internet import defer
from synapse.api.constants import EventTypes, Membership
from synapse.events import EventBase
from synapse.events.snapshot import EventContext
from synapse.logging import opentracing
from synapse.logging.context import PreserveLoggingContext, make_deferred_yieldable
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.storage.databases import Databases
from synapse.storage.databases.main.events import DeltaState
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
from synapse.types import (
PersistedEventPosition,
RoomStreamToken,
StateMap,
get_domain_from_id,
)
from synapse.util.async_helpers import ObservableDeferred, yieldable_gather_results
from synapse.util.metrics import Measure
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
# The number of times we are recalculating the current state
state_delta_counter = Counter("synapse_storage_events_state_delta", "")
# The number of times we are recalculating state when there is only a
# single forward extremity
state_delta_single_event_counter = Counter(
"synapse_storage_events_state_delta_single_event", ""
)
# The number of times we are reculating state when we could have resonably
# calculated the delta when we calculated the state for an event we were
# persisting.
state_delta_reuse_delta_counter = Counter(
"synapse_storage_events_state_delta_reuse_delta", ""
)
# The number of forward extremities for each new event.
forward_extremities_counter = Histogram(
"synapse_storage_events_forward_extremities_persisted",
"Number of forward extremities for each new event",
buckets=(1, 2, 3, 5, 7, 10, 15, 20, 50, 100, 200, 500, "+Inf"),
)
# The number of stale forward extremities for each new event. Stale extremities
# are those that were in the previous set of extremities as well as the new.
stale_forward_extremities_counter = Histogram(
"synapse_storage_events_stale_forward_extremities_persisted",
"Number of unchanged forward extremities for each new event",
buckets=(0, 1, 2, 3, 5, 7, 10, 15, 20, 50, 100, 200, 500, "+Inf"),
)
state_resolutions_during_persistence = Counter(
"synapse_storage_events_state_resolutions_during_persistence",
"Number of times we had to do state res to calculate new current state",
)
potential_times_prune_extremities = Counter(
"synapse_storage_events_potential_times_prune_extremities",
"Number of times we might be able to prune extremities",
)
times_pruned_extremities = Counter(
"synapse_storage_events_times_pruned_extremities",
"Number of times we were actually be able to prune extremities",
)
@attr.s(auto_attribs=True, slots=True)
class _EventPersistQueueItem:
events_and_contexts: List[Tuple[EventBase, EventContext]]
backfilled: bool
deferred: ObservableDeferred
parent_opentracing_span_contexts: List = attr.ib(factory=list)
"""A list of opentracing spans waiting for this batch"""
opentracing_span_context: Any = None
"""The opentracing span under which the persistence actually happened"""
_PersistResult = TypeVar("_PersistResult")
class _EventPeristenceQueue(Generic[_PersistResult]):
"""Queues up events so that they can be persisted in bulk with only one
concurrent transaction per room.
"""
def __init__(
self,
per_item_callback: Callable[
[List[Tuple[EventBase, EventContext]], bool],
Awaitable[_PersistResult],
],
):
"""Create a new event persistence queue
The per_item_callback will be called for each item added via add_to_queue,
and its result will be returned via the Deferreds returned from add_to_queue.
"""
self._event_persist_queues: Dict[str, Deque[_EventPersistQueueItem]] = {}
self._currently_persisting_rooms: Set[str] = set()
self._per_item_callback = per_item_callback
async def add_to_queue(
self,
room_id: str,
events_and_contexts: Iterable[Tuple[EventBase, EventContext]],
backfilled: bool,
) -> _PersistResult:
"""Add events to the queue, with the given persist_event options.
If we are not already processing events in this room, starts off a background
process to to so, calling the per_item_callback for each item.
Args:
room_id (str):
events_and_contexts (list[(EventBase, EventContext)]):
backfilled (bool):
Returns:
the result returned by the `_per_item_callback` passed to
`__init__`.
"""
queue = self._event_persist_queues.setdefault(room_id, deque())
# if the last item in the queue has the same `backfilled` setting,
# we can just add these new events to that item.
if queue and queue[-1].backfilled == backfilled:
end_item = queue[-1]
else:
# need to make a new queue item
deferred: ObservableDeferred[_PersistResult] = ObservableDeferred(
defer.Deferred(), consumeErrors=True
)
end_item = _EventPersistQueueItem(
events_and_contexts=[],
backfilled=backfilled,
deferred=deferred,
)
queue.append(end_item)
# add our events to the queue item
end_item.events_and_contexts.extend(events_and_contexts)
# also add our active opentracing span to the item so that we get a link back
span = opentracing.active_span()
if span:
end_item.parent_opentracing_span_contexts.append(span.context)
# start a processor for the queue, if there isn't one already
self._handle_queue(room_id)
# wait for the queue item to complete
res = await make_deferred_yieldable(end_item.deferred.observe())
# add another opentracing span which links to the persist trace.
with opentracing.start_active_span_follows_from(
"persist_event_batch_complete", (end_item.opentracing_span_context,)
):
pass
return res
def _handle_queue(self, room_id):
"""Attempts to handle the queue for a room if not already being handled.
The queue's callback will be invoked with for each item in the queue,
of type _EventPersistQueueItem. The per_item_callback will continuously
be called with new items, unless the queue becomes empty. The return
value of the function will be given to the deferreds waiting on the item,
exceptions will be passed to the deferreds as well.
This function should therefore be called whenever anything is added
to the queue.
If another callback is currently handling the queue then it will not be
invoked.
"""
if room_id in self._currently_persisting_rooms:
return
self._currently_persisting_rooms.add(room_id)
async def handle_queue_loop():
try:
queue = self._get_drainining_queue(room_id)
for item in queue:
try:
with opentracing.start_active_span_follows_from(
"persist_event_batch",
item.parent_opentracing_span_contexts,
inherit_force_tracing=True,
) as scope:
if scope:
item.opentracing_span_context = scope.span.context
ret = await self._per_item_callback(
item.events_and_contexts, item.backfilled
)
except Exception:
with PreserveLoggingContext():
item.deferred.errback()
else:
with PreserveLoggingContext():
item.deferred.callback(ret)
finally:
queue = self._event_persist_queues.pop(room_id, None)
if queue:
self._event_persist_queues[room_id] = queue
self._currently_persisting_rooms.discard(room_id)
# set handle_queue_loop off in the background
run_as_background_process("persist_events", handle_queue_loop)
def _get_drainining_queue(self, room_id):
queue = self._event_persist_queues.setdefault(room_id, deque())
try:
while True:
yield queue.popleft()
except IndexError:
# Queue has been drained.
pass
class EventsPersistenceStorage:
"""High level interface for handling persisting newly received events.
Takes care of batching up events by room, and calculating the necessary
current state and forward extremity changes.
"""
def __init__(self, hs: "HomeServer", stores: Databases):
# We ultimately want to split out the state store from the main store,
# so we use separate variables here even though they point to the same
# store for now.
self.main_store = stores.main
self.state_store = stores.state
assert stores.persist_events
self.persist_events_store = stores.persist_events
self._clock = hs.get_clock()
self._instance_name = hs.get_instance_name()
self.is_mine_id = hs.is_mine_id
self._event_persist_queue = _EventPeristenceQueue(self._persist_event_batch)
self._state_resolution_handler = hs.get_state_resolution_handler()
@opentracing.trace
async def persist_events(
self,
events_and_contexts: Iterable[Tuple[EventBase, EventContext]],
backfilled: bool = False,
) -> Tuple[List[EventBase], RoomStreamToken]:
"""
Write events to the database
Args:
events_and_contexts: list of tuples of (event, context)
backfilled: Whether the results are retrieved from federation
via backfill or not. Used to determine if they're "new" events
which might update the current state etc.
Returns:
List of events persisted, the current position room stream position.
The list of events persisted may not be the same as those passed in
if they were deduplicated due to an event already existing that
matched the transcation ID; the existing event is returned in such
a case.
"""
partitioned: Dict[str, List[Tuple[EventBase, EventContext]]] = {}
for event, ctx in events_and_contexts:
partitioned.setdefault(event.room_id, []).append((event, ctx))
async def enqueue(item):
room_id, evs_ctxs = item
return await self._event_persist_queue.add_to_queue(
room_id, evs_ctxs, backfilled=backfilled
)
ret_vals = await yieldable_gather_results(enqueue, partitioned.items())
# Each call to add_to_queue returns a map from event ID to existing event ID if
# the event was deduplicated. (The dict may also include other entries if
# the event was persisted in a batch with other events).
#
# Since we use `yieldable_gather_results` we need to merge the returned list
# of dicts into one.
replaced_events: Dict[str, str] = {}
for d in ret_vals:
replaced_events.update(d)
events = []
for event, _ in events_and_contexts:
existing_event_id = replaced_events.get(event.event_id)
if existing_event_id:
events.append(await self.main_store.get_event(existing_event_id))
else:
events.append(event)
return (
events,
self.main_store.get_room_max_token(),
)
@opentracing.trace
async def persist_event(
self, event: EventBase, context: EventContext, backfilled: bool = False
) -> Tuple[EventBase, PersistedEventPosition, RoomStreamToken]:
"""
Returns:
The event, stream ordering of `event`, and the stream ordering of the
latest persisted event. The returned event may not match the given
event if it was deduplicated due to an existing event matching the
transaction ID.
"""
# add_to_queue returns a map from event ID to existing event ID if the
# event was deduplicated. (The dict may also include other entries if
# the event was persisted in a batch with other events.)
replaced_events = await self._event_persist_queue.add_to_queue(
event.room_id, [(event, context)], backfilled=backfilled
)
replaced_event = replaced_events.get(event.event_id)
if replaced_event:
event = await self.main_store.get_event(replaced_event)
event_stream_id = event.internal_metadata.stream_ordering
# stream ordering should have been assigned by now
assert event_stream_id
pos = PersistedEventPosition(self._instance_name, event_stream_id)
return event, pos, self.main_store.get_room_max_token()
async def _persist_event_batch(
self,
events_and_contexts: List[Tuple[EventBase, EventContext]],
backfilled: bool = False,
) -> Dict[str, str]:
"""Callback for the _event_persist_queue
Calculates the change to current state and forward extremities, and
persists the given events and with those updates.
Returns:
A dictionary of event ID to event ID we didn't persist as we already
had another event persisted with the same TXN ID.
"""
replaced_events: Dict[str, str] = {}
if not events_and_contexts:
return replaced_events
# Check if any of the events have a transaction ID that has already been
# persisted, and if so we don't persist it again.
#
# We should have checked this a long time before we get here, but it's
# possible that different send event requests race in such a way that
# they both pass the earlier checks. Checking here isn't racey as we can
# have only one `_persist_events` per room being called at a time.
replaced_events = await self.main_store.get_already_persisted_events(
(event for event, _ in events_and_contexts)
)
if replaced_events:
events_and_contexts = [
(e, ctx)
for e, ctx in events_and_contexts
if e.event_id not in replaced_events
]
if not events_and_contexts:
return replaced_events
chunks = [
events_and_contexts[x : x + 100]
for x in range(0, len(events_and_contexts), 100)
]
for chunk in chunks:
# We can't easily parallelize these since different chunks
# might contain the same event. :(
# NB: Assumes that we are only persisting events for one room
# at a time.
# map room_id->set[event_ids] giving the new forward
# extremities in each room
new_forward_extremities: Dict[str, Set[str]] = {}
# map room_id->(type,state_key)->event_id tracking the full
# state in each room after adding these events.
# This is simply used to prefill the get_current_state_ids
# cache
current_state_for_room: Dict[str, StateMap[str]] = {}
# map room_id->(to_delete, to_insert) where to_delete is a list
# of type/state keys to remove from current state, and to_insert
# is a map (type,key)->event_id giving the state delta in each
# room
state_delta_for_room: Dict[str, DeltaState] = {}
# Set of remote users which were in rooms the server has left. We
# should check if we still share any rooms and if not we mark their
# device lists as stale.
potentially_left_users: Set[str] = set()
if not backfilled:
with Measure(self._clock, "_calculate_state_and_extrem"):
# Work out the new "current state" for each room.
# We do this by working out what the new extremities are and then
# calculating the state from that.
events_by_room: Dict[str, List[Tuple[EventBase, EventContext]]] = {}
for event, context in chunk:
events_by_room.setdefault(event.room_id, []).append(
(event, context)
)
for room_id, ev_ctx_rm in events_by_room.items():
latest_event_ids = set(
await self.main_store.get_latest_event_ids_in_room(room_id)
)
new_latest_event_ids = await self._calculate_new_extremities(
room_id, ev_ctx_rm, latest_event_ids
)
if new_latest_event_ids == latest_event_ids:
# No change in extremities, so no change in state
continue
# there should always be at least one forward extremity.
# (except during the initial persistence of the send_join
# results, in which case there will be no existing
# extremities, so we'll `continue` above and skip this bit.)
assert new_latest_event_ids, "No forward extremities left!"
new_forward_extremities[room_id] = new_latest_event_ids
len_1 = (
len(latest_event_ids) == 1
and len(new_latest_event_ids) == 1
)
if len_1:
all_single_prev_not_state = all(
len(event.prev_event_ids()) == 1
and not event.is_state()
for event, ctx in ev_ctx_rm
)
# Don't bother calculating state if they're just
# a long chain of single ancestor non-state events.
if all_single_prev_not_state:
continue
state_delta_counter.inc()
if len(new_latest_event_ids) == 1:
state_delta_single_event_counter.inc()
# This is a fairly handwavey check to see if we could
# have guessed what the delta would have been when
# processing one of these events.
# What we're interested in is if the latest extremities
# were the same when we created the event as they are
# now. When this server creates a new event (as opposed
# to receiving it over federation) it will use the
# forward extremities as the prev_events, so we can
# guess this by looking at the prev_events and checking
# if they match the current forward extremities.
for ev, _ in ev_ctx_rm:
prev_event_ids = set(ev.prev_event_ids())
if latest_event_ids == prev_event_ids:
state_delta_reuse_delta_counter.inc()
break
logger.debug("Calculating state delta for room %s", room_id)
with Measure(
self._clock, "persist_events.get_new_state_after_events"
):
res = await self._get_new_state_after_events(
room_id,
ev_ctx_rm,
latest_event_ids,
new_latest_event_ids,
)
current_state, delta_ids, new_latest_event_ids = res
# there should always be at least one forward extremity.
# (except during the initial persistence of the send_join
# results, in which case there will be no existing
# extremities, so we'll `continue` above and skip this bit.)
assert new_latest_event_ids, "No forward extremities left!"
new_forward_extremities[room_id] = new_latest_event_ids
# If either are not None then there has been a change,
# and we need to work out the delta (or use that
# given)
delta = None
if delta_ids is not None:
# If there is a delta we know that we've
# only added or replaced state, never
# removed keys entirely.
delta = DeltaState([], delta_ids)
elif current_state is not None:
with Measure(
self._clock, "persist_events.calculate_state_delta"
):
delta = await self._calculate_state_delta(
room_id, current_state
)
if delta:
# If we have a change of state then lets check
# whether we're actually still a member of the room,
# or if our last user left. If we're no longer in
# the room then we delete the current state and
# extremities.
is_still_joined = await self._is_server_still_joined(
room_id,
ev_ctx_rm,
delta,
current_state,
potentially_left_users,
)
if not is_still_joined:
logger.info("Server no longer in room %s", room_id)
latest_event_ids = set()
current_state = {}
delta.no_longer_in_room = True
state_delta_for_room[room_id] = delta
# If we have the current_state then lets prefill
# the cache with it.
if current_state is not None:
current_state_for_room[room_id] = current_state
await self.persist_events_store._persist_events_and_state_updates(
chunk,
current_state_for_room=current_state_for_room,
state_delta_for_room=state_delta_for_room,
new_forward_extremities=new_forward_extremities,
use_negative_stream_ordering=backfilled,
inhibit_local_membership_updates=backfilled,
)
await self._handle_potentially_left_users(potentially_left_users)
return replaced_events
async def _calculate_new_extremities(
self,
room_id: str,
event_contexts: List[Tuple[EventBase, EventContext]],
latest_event_ids: Collection[str],
) -> Set[str]:
"""Calculates the new forward extremities for a room given events to
persist.
Assumes that we are only persisting events for one room at a time.
"""
# we're only interested in new events which aren't outliers and which aren't
# being rejected.
new_events = [
event
for event, ctx in event_contexts
if not event.internal_metadata.is_outlier()
and not ctx.rejected
and not event.internal_metadata.is_soft_failed()
]
latest_event_ids = set(latest_event_ids)
# start with the existing forward extremities
result = set(latest_event_ids)
# add all the new events to the list
result.update(event.event_id for event in new_events)
# Now remove all events which are prev_events of any of the new events
result.difference_update(
e_id for event in new_events for e_id in event.prev_event_ids()
)
# Remove any events which are prev_events of any existing events.
existing_prevs: Collection[
str
] = await self.persist_events_store._get_events_which_are_prevs(result)
result.difference_update(existing_prevs)
# Finally handle the case where the new events have soft-failed prev
# events. If they do we need to remove them and their prev events,
# otherwise we end up with dangling extremities.
existing_prevs = await self.persist_events_store._get_prevs_before_rejected(
e_id for event in new_events for e_id in event.prev_event_ids()
)
result.difference_update(existing_prevs)
# We only update metrics for events that change forward extremities
# (e.g. we ignore backfill/outliers/etc)
if result != latest_event_ids:
forward_extremities_counter.observe(len(result))
stale = latest_event_ids & result
stale_forward_extremities_counter.observe(len(stale))
return result
async def _get_new_state_after_events(
self,
room_id: str,
events_context: List[Tuple[EventBase, EventContext]],
old_latest_event_ids: Set[str],
new_latest_event_ids: Set[str],
) -> Tuple[Optional[StateMap[str]], Optional[StateMap[str]], Set[str]]:
"""Calculate the current state dict after adding some new events to
a room
Args:
room_id:
room to which the events are being added. Used for logging etc
events_context:
events and contexts which are being added to the room
old_latest_event_ids:
the old forward extremities for the room.
new_latest_event_ids :
the new forward extremities for the room.
Returns:
Returns a tuple of two state maps and a set of new forward
extremities.
The first state map is the full new current state and the second
is the delta to the existing current state. If both are None then
there has been no change.
The function may prune some old entries from the set of new
forward extremities if it's safe to do so.
If there has been a change then we only return the delta if its
already been calculated. Conversely if we do know the delta then
the new current state is only returned if we've already calculated
it.
"""
# map from state_group to ((type, key) -> event_id) state map
state_groups_map = {}
# Map from (prev state group, new state group) -> delta state dict
state_group_deltas = {}
for ev, ctx in events_context:
if ctx.state_group is None:
# This should only happen for outlier events.
if not ev.internal_metadata.is_outlier():
raise Exception(
"Context for new event %s has no state "
"group" % (ev.event_id,)
)
continue
if ctx.state_group in state_groups_map:
continue
# We're only interested in pulling out state that has already
# been cached in the context. We'll pull stuff out of the DB later
# if necessary.
current_state_ids = ctx.get_cached_current_state_ids()
if current_state_ids is not None:
state_groups_map[ctx.state_group] = current_state_ids
if ctx.prev_group:
state_group_deltas[(ctx.prev_group, ctx.state_group)] = ctx.delta_ids
# We need to map the event_ids to their state groups. First, let's
# check if the event is one we're persisting, in which case we can
# pull the state group from its context.
# Otherwise we need to pull the state group from the database.
# Set of events we need to fetch groups for. (We know none of the old
# extremities are going to be in events_context).
missing_event_ids = set(old_latest_event_ids)
event_id_to_state_group = {}
for event_id in new_latest_event_ids:
# First search in the list of new events we're adding.
for ev, ctx in events_context:
if event_id == ev.event_id and ctx.state_group is not None:
event_id_to_state_group[event_id] = ctx.state_group
break
else:
# If we couldn't find it, then we'll need to pull
# the state from the database
missing_event_ids.add(event_id)
if missing_event_ids:
# Now pull out the state groups for any missing events from DB
event_to_groups = await self.main_store._get_state_group_for_events(
missing_event_ids
)
event_id_to_state_group.update(event_to_groups)
# State groups of old_latest_event_ids
old_state_groups = {
event_id_to_state_group[evid] for evid in old_latest_event_ids
}
# State groups of new_latest_event_ids
new_state_groups = {
event_id_to_state_group[evid] for evid in new_latest_event_ids
}
# If they old and new groups are the same then we don't need to do
# anything.
if old_state_groups == new_state_groups:
return None, None, new_latest_event_ids
if len(new_state_groups) == 1 and len(old_state_groups) == 1:
# If we're going from one state group to another, lets check if
# we have a delta for that transition. If we do then we can just
# return that.
new_state_group = next(iter(new_state_groups))
old_state_group = next(iter(old_state_groups))
delta_ids = state_group_deltas.get((old_state_group, new_state_group), None)
if delta_ids is not None:
# We have a delta from the existing to new current state,
# so lets just return that. If we happen to already have
# the current state in memory then lets also return that,
# but it doesn't matter if we don't.
new_state = state_groups_map.get(new_state_group)
return new_state, delta_ids, new_latest_event_ids
# Now that we have calculated new_state_groups we need to get
# their state IDs so we can resolve to a single state set.
missing_state = new_state_groups - set(state_groups_map)
if missing_state:
group_to_state = await self.state_store._get_state_for_groups(missing_state)
state_groups_map.update(group_to_state)
if len(new_state_groups) == 1:
# If there is only one state group, then we know what the current
# state is.
return state_groups_map[new_state_groups.pop()], None, new_latest_event_ids
# Ok, we need to defer to the state handler to resolve our state sets.
state_groups = {sg: state_groups_map[sg] for sg in new_state_groups}
events_map = {ev.event_id: ev for ev, _ in events_context}
# We need to get the room version, which is in the create event.
# Normally that'd be in the database, but its also possible that we're
# currently trying to persist it.
room_version = None
for ev, _ in events_context:
if ev.type == EventTypes.Create and ev.state_key == "":
room_version = ev.content.get("room_version", "1")
break
if not room_version:
room_version = await self.main_store.get_room_version_id(room_id)
logger.debug("calling resolve_state_groups from preserve_events")
# Avoid a circular import.
from synapse.state import StateResolutionStore
res = await self._state_resolution_handler.resolve_state_groups(
room_id,
room_version,
state_groups,
events_map,
state_res_store=StateResolutionStore(self.main_store),
)
state_resolutions_during_persistence.inc()
# If the returned state matches the state group of one of the new
# forward extremities then we check if we are able to prune some state
# extremities.
if res.state_group and res.state_group in new_state_groups:
new_latest_event_ids = await self._prune_extremities(
room_id,
new_latest_event_ids,
res.state_group,
event_id_to_state_group,
events_context,
)
return res.state, None, new_latest_event_ids
async def _prune_extremities(
self,
room_id: str,
new_latest_event_ids: Set[str],
resolved_state_group: int,
event_id_to_state_group: Dict[str, int],
events_context: List[Tuple[EventBase, EventContext]],
) -> Set[str]:
"""See if we can prune any of the extremities after calculating the
resolved state.
"""
potential_times_prune_extremities.inc()
# We keep all the extremities that have the same state group, and
# see if we can drop the others.
new_new_extrems = {
e
for e in new_latest_event_ids
if event_id_to_state_group[e] == resolved_state_group
}
dropped_extrems = set(new_latest_event_ids) - new_new_extrems
logger.debug("Might drop extremities: %s", dropped_extrems)
# We only drop events from the extremities list if:
# 1. we're not currently persisting them;
# 2. they're not our own events (or are dummy events); and
# 3. they're either:
# 1. over N hours old and more than N events ago (we use depth to
# calculate); or
# 2. we are persisting an event from the same domain and more than
# M events ago.
#
# The idea is that we don't want to drop events that are "legitimate"
# extremities (that we would want to include as prev events), only
# "stuck" extremities that are e.g. due to a gap in the graph.
#
# Note that we either drop all of them or none of them. If we only drop
# some of the events we don't know if state res would come to the same
# conclusion.
for ev, _ in events_context:
if ev.event_id in dropped_extrems:
logger.debug(
"Not dropping extremities: %s is being persisted", ev.event_id
)
return new_latest_event_ids
dropped_events = await self.main_store.get_events(
dropped_extrems,
allow_rejected=True,
redact_behaviour=EventRedactBehaviour.AS_IS,
)
new_senders = {get_domain_from_id(e.sender) for e, _ in events_context}
one_day_ago = self._clock.time_msec() - 24 * 60 * 60 * 1000
current_depth = max(e.depth for e, _ in events_context)
for event in dropped_events.values():
# If the event is a local dummy event then we should check it
# doesn't reference any local events, as we want to reference those
# if we send any new events.
#
# Note we do this recursively to handle the case where a dummy event
# references a dummy event that only references remote events.
#
# Ideally we'd figure out a way of still being able to drop old
# dummy events that reference local events, but this is good enough
# as a first cut.
events_to_check: Collection[EventBase] = [event]
while events_to_check:
new_events: Set[str] = set()
for event_to_check in events_to_check:
if self.is_mine_id(event_to_check.sender):
if event_to_check.type != EventTypes.Dummy:
logger.debug("Not dropping own event")
return new_latest_event_ids
new_events.update(event_to_check.prev_event_ids())
prev_events = await self.main_store.get_events(
new_events,
allow_rejected=True,
redact_behaviour=EventRedactBehaviour.AS_IS,
)
events_to_check = prev_events.values()
if (
event.origin_server_ts < one_day_ago
and event.depth < current_depth - 100
):
continue
# We can be less conservative about dropping extremities from the
# same domain, though we do want to wait a little bit (otherwise
# we'll immediately remove all extremities from a given server).
if (
get_domain_from_id(event.sender) in new_senders
and event.depth < current_depth - 20
):
continue
logger.debug(
"Not dropping as too new and not in new_senders: %s",
new_senders,
)
return new_latest_event_ids
times_pruned_extremities.inc()
logger.info(
"Pruning forward extremities in room %s: from %s -> %s",
room_id,
new_latest_event_ids,
new_new_extrems,
)
return new_new_extrems
async def _calculate_state_delta(
self, room_id: str, current_state: StateMap[str]
) -> DeltaState:
"""Calculate the new state deltas for a room.
Assumes that we are only persisting events for one room at a time.
"""
existing_state = await self.main_store.get_current_state_ids(room_id)
to_delete = [key for key in existing_state if key not in current_state]
to_insert = {
key: ev_id
for key, ev_id in current_state.items()
if ev_id != existing_state.get(key)
}
return DeltaState(to_delete=to_delete, to_insert=to_insert)
async def _is_server_still_joined(
self,
room_id: str,
ev_ctx_rm: List[Tuple[EventBase, EventContext]],
delta: DeltaState,
current_state: Optional[StateMap[str]],
potentially_left_users: Set[str],
) -> bool:
"""Check if the server will still be joined after the given events have
been persised.
Args:
room_id
ev_ctx_rm
delta: The delta of current state between what is in the database
and what the new current state will be.
current_state: The new current state if it already been calculated,
otherwise None.
potentially_left_users: If the server has left the room, then joined
remote users will be added to this set to indicate that the
server may no longer be sharing a room with them.
"""
if not any(
self.is_mine_id(state_key)
for typ, state_key in itertools.chain(delta.to_delete, delta.to_insert)
if typ == EventTypes.Member
):
# There have been no changes to membership of our users, so nothing
# has changed and we assume we're still in the room.
return True
# Check if any of the given events are a local join that appear in the
# current state
events_to_check = [] # Event IDs that aren't an event we're persisting
for (typ, state_key), event_id in delta.to_insert.items():
if typ != EventTypes.Member or not self.is_mine_id(state_key):
continue
for event, _ in ev_ctx_rm:
if event_id == event.event_id:
if event.membership == Membership.JOIN:
return True
# The event is not in `ev_ctx_rm`, so we need to pull it out of
# the DB.
events_to_check.append(event_id)
# Check if any of the changes that we don't have events for are joins.
if events_to_check:
members = await self.main_store.get_membership_from_event_ids(
events_to_check
)
is_still_joined = any(
member and member.membership == Membership.JOIN
for member in members.values()
)
if is_still_joined:
return True
# None of the new state events are local joins, so we check the database
# to see if there are any other local users in the room. We ignore users
# whose state has changed as we've already their new state above.
users_to_ignore = [
state_key
for typ, state_key in itertools.chain(delta.to_insert, delta.to_delete)
if typ == EventTypes.Member and self.is_mine_id(state_key)
]
if await self.main_store.is_local_host_in_room_ignoring_users(
room_id, users_to_ignore
):
return True
# The server will leave the room, so we go and find out which remote
# users will still be joined when we leave.
if current_state is None:
current_state = await self.main_store.get_current_state_ids(room_id)
current_state = dict(current_state)
for key in delta.to_delete:
current_state.pop(key, None)
current_state.update(delta.to_insert)
remote_event_ids = [
event_id
for (
typ,
state_key,
), event_id in current_state.items()
if typ == EventTypes.Member and not self.is_mine_id(state_key)
]
members = await self.main_store.get_membership_from_event_ids(remote_event_ids)
potentially_left_users.update(
member.user_id
for member in members.values()
if member and member.membership == Membership.JOIN
)
return False
async def _handle_potentially_left_users(self, user_ids: Set[str]):
"""Given a set of remote users check if the server still shares a room with
them. If not then mark those users' device cache as stale.
"""
if not user_ids:
return
joined_users = await self.main_store.get_users_server_still_shares_room_with(
user_ids
)
left_users = user_ids - joined_users
for user_id in left_users:
await self.main_store.mark_remote_user_device_list_as_unsubscribed(user_id)
|
py | b40292911cf87be1db80593fab93c592710e69aa | # Configuration file for jupyter-notebook.
#------------------------------------------------------------------------------
# Application(SingletonConfigurable) configuration
#------------------------------------------------------------------------------
## This is an application.
## The date format used by logging formatters for %(asctime)s
#c.Application.log_datefmt = '%Y-%m-%d %H:%M:%S'
## The Logging format template
#c.Application.log_format = '[%(name)s]%(highlevel)s %(message)s'
## Set the log level by value or name.
#c.Application.log_level = 30
#------------------------------------------------------------------------------
# JupyterApp(Application) configuration
#------------------------------------------------------------------------------
## Base class for Jupyter applications
## Answer yes to any prompts.
#c.JupyterApp.answer_yes = False
## Full path of a config file.
#c.JupyterApp.config_file = ''
## Specify a config file to load.
#c.JupyterApp.config_file_name = ''
## Generate default config file.
#c.JupyterApp.generate_config = False
#------------------------------------------------------------------------------
# NotebookApp(JupyterApp) configuration
#------------------------------------------------------------------------------
## Set the Access-Control-Allow-Credentials: true header
#c.NotebookApp.allow_credentials = False
## Set the Access-Control-Allow-Origin header
#
# Use '*' to allow any origin to access your server.
#
# Takes precedence over allow_origin_pat.
c.NotebookApp.allow_origin = 'localhost'
## Use a regular expression for the Access-Control-Allow-Origin header
#
# Requests from an origin matching the expression will get replies with:
#
# Access-Control-Allow-Origin: origin
#
# where `origin` is the origin of the request.
#
# Ignored if allow_origin is set.
#c.NotebookApp.allow_origin_pat = ''
## Allow password to be changed at login for the notebook server.
#
# While loggin in with a token, the notebook server UI will give the opportunity
# to the user to enter a new password at the same time that will replace the
# token login mechanism.
#
# This can be set to false to prevent changing password from the UI/API.
#c.NotebookApp.allow_password_change = True
## Whether to allow the user to run the notebook as root.
#c.NotebookApp.allow_root = False
## DEPRECATED use base_url
#c.NotebookApp.base_project_url = '/'
## The base URL for the notebook server.
#
# Leading and trailing slashes can be omitted, and will automatically be added.
#c.NotebookApp.base_url = '/'
## Specify what command to use to invoke a web browser when opening the notebook.
# If not specified, the default browser will be determined by the `webbrowser`
# standard library module, which allows setting of the BROWSER environment
# variable to override it.
#c.NotebookApp.browser = ''
## The full path to an SSL/TLS certificate file.
#c.NotebookApp.certfile = ''
## The full path to a certificate authority certificate for SSL/TLS client
# authentication.
#c.NotebookApp.client_ca = ''
## The config manager class to use
#c.NotebookApp.config_manager_class = 'notebook.services.config.manager.ConfigManager'
## The notebook manager class to use.
#c.NotebookApp.contents_manager_class = 'notebook.services.contents.largefilemanager.LargeFileManager'
## Extra keyword arguments to pass to `set_secure_cookie`. See tornado's
# set_secure_cookie docs for details.
#c.NotebookApp.cookie_options = {}
## The random bytes used to secure cookies. By default this is a new random
# number every time you start the Notebook. Set it to a value in a config file
# to enable logins to persist across server sessions.
#
# Note: Cookie secrets should be kept private, do not share config files with
# cookie_secret stored in plaintext (you can read the value from a file).
#c.NotebookApp.cookie_secret = b''
## The file where the cookie secret is stored.
#c.NotebookApp.cookie_secret_file = ''
## The default URL to redirect to from `/`
c.NotebookApp.default_url = '/tree#examples'
## Disable cross-site-request-forgery protection
#
# Jupyter notebook 4.3.1 introduces protection from cross-site request
# forgeries, requiring API requests to either:
#
# - originate from pages served by this server (validated with XSRF cookie and
# token), or - authenticate with a token
#
# Some anonymous compute resources still desire the ability to run code,
# completely without authentication. These services can disable all
# authentication and security checks, with the full knowledge of what that
# implies.
#c.NotebookApp.disable_check_xsrf = False
## Whether to enable MathJax for typesetting math/TeX
#
# MathJax is the javascript library Jupyter uses to render math/LaTeX. It is
# very large, so you may want to disable it if you have a slow internet
# connection, or for offline use of the notebook.
#
# When disabled, equations etc. will appear as their untransformed TeX source.
#c.NotebookApp.enable_mathjax = True
## extra paths to look for Javascript notebook extensions
#c.NotebookApp.extra_nbextensions_path = []
## handlers that should be loaded at higher priority than the default services
#c.NotebookApp.extra_services = []
## Extra paths to search for serving static files.
#
# This allows adding javascript/css to be available from the notebook server
# machine, or overriding individual files in the IPython
#c.NotebookApp.extra_static_paths = []
## Extra paths to search for serving jinja templates.
#
# Can be used to override templates from notebook.templates.
#c.NotebookApp.extra_template_paths = []
##
#c.NotebookApp.file_to_run = ''
## Deprecated: Use minified JS file or not, mainly use during dev to avoid JS
# recompilation
#c.NotebookApp.ignore_minified_js = False
## (bytes/sec) Maximum rate at which stream output can be sent on iopub before
# they are limited.
#c.NotebookApp.iopub_data_rate_limit = 1000000
## (msgs/sec) Maximum rate at which messages can be sent on iopub before they are
# limited.
#c.NotebookApp.iopub_msg_rate_limit = 1000
## The IP address the notebook server will listen on.
c.NotebookApp.ip = 'localhost'
## Supply extra arguments that will be passed to Jinja environment.
#c.NotebookApp.jinja_environment_options = {}
## Extra variables to supply to jinja templates when rendering.
#c.NotebookApp.jinja_template_vars = {}
## The kernel manager class to use.
#c.NotebookApp.kernel_manager_class = 'notebook.services.kernels.kernelmanager.MappingKernelManager'
## The kernel spec manager class to use. Should be a subclass of
# `jupyter_client.kernelspec.KernelSpecManager`.
#
# The Api of KernelSpecManager is provisional and might change without warning
# between this version of Jupyter and the next stable one.
#c.NotebookApp.kernel_spec_manager_class = 'jupyter_client.kernelspec.KernelSpecManager'
## The full path to a private key file for usage with SSL/TLS.
#c.NotebookApp.keyfile = ''
## The login handler class to use.
#c.NotebookApp.login_handler_class = 'notebook.auth.login.LoginHandler'
## The logout handler class to use.
#c.NotebookApp.logout_handler_class = 'notebook.auth.logout.LogoutHandler'
## The MathJax.js configuration file that is to be used.
#c.NotebookApp.mathjax_config = 'TeX-AMS-MML_HTMLorMML-full,Safe'
## A custom url for MathJax.js. Should be in the form of a case-sensitive url to
# MathJax, for example: /static/components/MathJax/MathJax.js
#c.NotebookApp.mathjax_url = ''
## Dict of Python modules to load as notebook server extensions.Entry values can
# be used to enable and disable the loading ofthe extensions. The extensions
# will be loaded in alphabetical order.
# notebook examples enables us to have an unmodified copy of the tutorials in the pkg dir and create a working copy
c.NotebookApp.nbserver_extensions = {'nbexamples.handlers': True}
import pyemma_tutorials
c.Examples.reviewed_example_dir = pyemma_tutorials.notebook_location()
c.Examples.unreviewed_example_dir = ''
## The directory to use for notebooks and kernels.
run_dir = pyemma_tutorials.run_dir()
c.NotebookApp.notebook_dir = run_dir
## Whether to open in a browser after starting. The specific browser used is
# platform dependent and determined by the python standard library `webbrowser`
# module, unless it is overridden using the --browser (NotebookApp.browser)
# configuration option.
#c.NotebookApp.open_browser = True
## Hashed password to use for web authentication.
#
# To generate, type in a python/IPython shell:
#
# from notebook.auth import passwd; passwd()
#
# The string should be of the form type:salt:hashed-password.
#c.NotebookApp.password = ''
## Forces users to use a password for the Notebook server. This is useful in a
# multi user environment, for instance when everybody in the LAN can access each
# other's machine through ssh.
#
# In such a case, server the notebook server on localhost is not secure since
# any user can connect to the notebook server via ssh.
#c.NotebookApp.password_required = False
## The port the notebook server will listen on.
#c.NotebookApp.port = 8888
## The number of additional ports to try if the specified port is not available.
#c.NotebookApp.port_retries = 50
## DISABLED: use %pylab or %matplotlib in the notebook to enable matplotlib.
#c.NotebookApp.pylab = 'disabled'
## If True, display a button in the dashboard to quit (shutdown the notebook
# server).
#c.NotebookApp.quit_button = True
## (sec) Time window used to check the message and data rate limits.
#c.NotebookApp.rate_limit_window = 3
## Reraise exceptions encountered loading server extensions?
#c.NotebookApp.reraise_server_extension_failures = False
## DEPRECATED use the nbserver_extensions dict instead
#c.NotebookApp.server_extensions = []
## The session manager class to use.
#c.NotebookApp.session_manager_class = 'notebook.services.sessions.sessionmanager.SessionManager'
## Shut down the server after N seconds with no kernels or terminals running and
# no activity. This can be used together with culling idle kernels
# (MappingKernelManager.cull_idle_timeout) to shutdown the notebook server when
# it's not in use. This is not precisely timed: it may shut down up to a minute
# later. 0 (the default) disables this automatic shutdown.
#c.NotebookApp.shutdown_no_activity_timeout = 0
## Supply SSL options for the tornado HTTPServer. See the tornado docs for
# details.
#c.NotebookApp.ssl_options = {}
## Supply overrides for terminado. Currently only supports "shell_command".
#c.NotebookApp.terminado_settings = {}
## Set to False to disable terminals.
#
# This does *not* make the notebook server more secure by itself. Anything the
# user can in a terminal, they can also do in a notebook.
#
# Terminals may also be automatically disabled if the terminado package is not
# available.
#c.NotebookApp.terminals_enabled = True
## Token used for authenticating first-time connections to the server.
#
# When no password is enabled, the default is to generate a new, random token.
#
# Setting to an empty string disables authentication altogether, which is NOT
# RECOMMENDED.
#c.NotebookApp.token = '<generated>'
## Supply overrides for the tornado.web.Application that the Jupyter notebook
# uses.
#c.NotebookApp.tornado_settings = {}
## Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headerssent by the upstream reverse proxy. Necessary if the proxy handles
# SSL
#c.NotebookApp.trust_xheaders = False
## DEPRECATED, use tornado_settings
#c.NotebookApp.webapp_settings = {}
## Specify Where to open the notebook on startup. This is the `new` argument
# passed to the standard library method `webbrowser.open`. The behaviour is not
# guaranteed, but depends on browser support. Valid values are:
#
# - 2 opens a new tab,
# - 1 opens a new window,
# - 0 opens in an existing window.
#
# See the `webbrowser.open` documentation for details.
#c.NotebookApp.webbrowser_open_new = 2
## Set the tornado compression options for websocket connections.
#
# This value will be returned from
# :meth:`WebSocketHandler.get_compression_options`. None (default) will disable
# compression. A dict (even an empty one) will enable compression.
#
# See the tornado docs for WebSocketHandler.get_compression_options for details.
#c.NotebookApp.websocket_compression_options = None
## The base URL for websockets, if it differs from the HTTP server (hint: it
# almost certainly doesn't).
#
# Should be in the form of an HTTP origin: ws[s]://hostname[:port]
#c.NotebookApp.websocket_url = ''
#------------------------------------------------------------------------------
# ConnectionFileMixin(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## Mixin for configurable classes that work with connection files
## JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
#c.ConnectionFileMixin.connection_file = ''
## set the control (ROUTER) port [default: random]
#c.ConnectionFileMixin.control_port = 0
## set the heartbeat port [default: random]
#c.ConnectionFileMixin.hb_port = 0
## set the iopub (PUB) port [default: random]
#c.ConnectionFileMixin.iopub_port = 0
## Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
#c.ConnectionFileMixin.ip = ''
## set the shell (ROUTER) port [default: random]
#c.ConnectionFileMixin.shell_port = 0
## set the stdin (ROUTER) port [default: random]
#c.ConnectionFileMixin.stdin_port = 0
##
#c.ConnectionFileMixin.transport = 'tcp'
#------------------------------------------------------------------------------
# KernelManager(ConnectionFileMixin) configuration
#------------------------------------------------------------------------------
## Manages a single kernel in a subprocess on this host.
#
# This version starts kernels with Popen.
## Should we autorestart the kernel if it dies.
#c.KernelManager.autorestart = True
## DEPRECATED: Use kernel_name instead.
#
# The Popen Command to launch the kernel. Override this if you have a custom
# kernel. If kernel_cmd is specified in a configuration file, Jupyter does not
# pass any arguments to the kernel, because it cannot make any assumptions about
# the arguments that the kernel understands. In particular, this means that the
# kernel does not receive the option --debug if it given on the Jupyter command
# line.
#c.KernelManager.kernel_cmd = ['python3']
## Time to wait for a kernel to terminate before killing it, in seconds.
#c.KernelManager.shutdown_wait_time = 5.0
#------------------------------------------------------------------------------
# Session(Configurable) configuration
#------------------------------------------------------------------------------
## Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them with ZMQ sockets
# or ZMQStream objects. Objects can communicate with each other over the
# network via Session objects, and only need to work with the dict-based IPython
# message spec. The Session will handle serialization/deserialization, security,
# and metadata.
#
# Sessions support configurable serialization via packer/unpacker traits, and
# signing with HMAC digests via the key/keyfile traits.
#
# Parameters ----------
#
# debug : bool
# whether to trigger extra debugging statements
# packer/unpacker : str : 'json', 'pickle' or import_string
# importstrings for methods to serialize message parts. If just
# 'json' or 'pickle', predefined JSON and pickle packers will be used.
# Otherwise, the entire importstring must be used.
#
# The functions must accept at least valid JSON input, and output *bytes*.
#
# For example, to use msgpack:
# packer = 'msgpack.packb', unpacker='msgpack.unpackb'
# pack/unpack : callables
# You can also set the pack/unpack callables for serialization directly.
# session : bytes
# the ID of this Session object. The default is to generate a new UUID.
# username : unicode
# username added to message headers. The default is to ask the OS.
# key : bytes
# The key used to initialize an HMAC signature. If unset, messages
# will not be signed or checked.
# keyfile : filepath
# The file containing a key. If this is set, `key` will be initialized
# to the contents of the file.
## Threshold (in bytes) beyond which an object's buffer should be extracted to
# avoid pickling.
#c.Session.buffer_threshold = 1024
## Whether to check PID to protect against calls after fork.
#
# This check can be disabled if fork-safety is handled elsewhere.
#c.Session.check_pid = True
## Threshold (in bytes) beyond which a buffer should be sent without copying.
#c.Session.copy_threshold = 65536
## Debug output in the Session
#c.Session.debug = False
## The maximum number of digests to remember.
#
# The digest history will be culled when it exceeds this value.
#c.Session.digest_history_size = 65536
## The maximum number of items for a container to be introspected for custom
# serialization. Containers larger than this are pickled outright.
#c.Session.item_threshold = 64
## execution key, for signing messages.
#c.Session.key = b''
## path to file containing execution key.
#c.Session.keyfile = ''
## Metadata dictionary, which serves as the default top-level metadata dict for
# each message.
#c.Session.metadata = {}
## The name of the packer for serializing messages. Should be one of 'json',
# 'pickle', or an import name for a custom callable serializer.
#c.Session.packer = 'json'
## The UUID identifying this session.
#c.Session.session = ''
## The digest scheme used to construct the message signatures. Must have the form
# 'hmac-HASH'.
#c.Session.signature_scheme = 'hmac-sha256'
## The name of the unpacker for unserializing messages. Only used with custom
# functions for `packer`.
#c.Session.unpacker = 'json'
## Username for the Session. Default is your system username.
#c.Session.username = 'marscher'
#------------------------------------------------------------------------------
# MultiKernelManager(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## A class for managing multiple kernels.
## The name of the default kernel to start
#c.MultiKernelManager.default_kernel_name = 'python3'
## The kernel manager class. This is configurable to allow subclassing of the
# KernelManager for customized behavior.
#c.MultiKernelManager.kernel_manager_class = 'jupyter_client.ioloop.IOLoopKernelManager'
#------------------------------------------------------------------------------
# MappingKernelManager(MultiKernelManager) configuration
#------------------------------------------------------------------------------
## A KernelManager that handles notebook mapping and HTTP error handling
## Whether messages from kernels whose frontends have disconnected should be
# buffered in-memory.
#
# When True (default), messages are buffered and replayed on reconnect, avoiding
# lost messages due to interrupted connectivity.
#
# Disable if long-running kernels will produce too much output while no
# frontends are connected.
#c.MappingKernelManager.buffer_offline_messages = True
## Whether to consider culling kernels which are busy. Only effective if
# cull_idle_timeout > 0.
#c.MappingKernelManager.cull_busy = False
## Whether to consider culling kernels which have one or more connections. Only
# effective if cull_idle_timeout > 0.
#c.MappingKernelManager.cull_connected = False
## Timeout (in seconds) after which a kernel is considered idle and ready to be
# culled. Values of 0 or lower disable culling. Very short timeouts may result
# in kernels being culled for users with poor network connections.
#c.MappingKernelManager.cull_idle_timeout = 0
## The interval (in seconds) on which to check for idle kernels exceeding the
# cull timeout value.
#c.MappingKernelManager.cull_interval = 300
##
#c.MappingKernelManager.root_dir = ''
#------------------------------------------------------------------------------
# ContentsManager(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## Base class for serving files and directories.
#
# This serves any text or binary file, as well as directories, with special
# handling for JSON notebook documents.
#
# Most APIs take a path argument, which is always an API-style unicode path, and
# always refers to a directory.
#
# - unicode, not url-escaped
# - '/'-separated
# - leading and trailing '/' will be stripped
# - if unspecified, path defaults to '',
# indicating the root path.
## Allow access to hidden files
#c.ContentsManager.allow_hidden = False
##
#c.ContentsManager.checkpoints = None
##
#c.ContentsManager.checkpoints_class = 'notebook.services.contents.checkpoints.Checkpoints'
##
#c.ContentsManager.checkpoints_kwargs = {}
## handler class to use when serving raw file requests.
#
# Default is a fallback that talks to the ContentsManager API, which may be
# inefficient, especially for large files.
#
# Local files-based ContentsManagers can use a StaticFileHandler subclass, which
# will be much more efficient.
#
# Access to these files should be Authenticated.
#c.ContentsManager.files_handler_class = 'notebook.files.handlers.FilesHandler'
## Extra parameters to pass to files_handler_class.
#
# For example, StaticFileHandlers generally expect a `path` argument specifying
# the root directory from which to serve files.
#c.ContentsManager.files_handler_params = {}
## Glob patterns to hide in file and directory listings.
#c.ContentsManager.hide_globs = ['__pycache__', '*.pyc', '*.pyo', '.DS_Store', '*.so', '*.dylib', '*~']
## Python callable or importstring thereof
#
# To be called on a contents model prior to save.
#
# This can be used to process the structure, such as removing notebook outputs
# or other side effects that should not be saved.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(path=path, model=model, contents_manager=self)
#
# - model: the model to be saved. Includes file contents.
# Modifying this dict will affect the file that is stored.
# - path: the API path of the save destination
# - contents_manager: this ContentsManager instance
#c.ContentsManager.pre_save_hook = None
##
#c.ContentsManager.root_dir = '/'
## The base name used when creating untitled directories.
#c.ContentsManager.untitled_directory = 'Untitled Folder'
## The base name used when creating untitled files.
#c.ContentsManager.untitled_file = 'untitled'
## The base name used when creating untitled notebooks.
#c.ContentsManager.untitled_notebook = 'Untitled'
#------------------------------------------------------------------------------
# FileManagerMixin(Configurable) configuration
#------------------------------------------------------------------------------
## Mixin for ContentsAPI classes that interact with the filesystem.
#
# Provides facilities for reading, writing, and copying both notebooks and
# generic files.
#
# Shared by FileContentsManager and FileCheckpoints.
#
# Note ---- Classes using this mixin must provide the following attributes:
#
# root_dir : unicode
# A directory against against which API-style paths are to be resolved.
#
# log : logging.Logger
## By default notebooks are saved on disk on a temporary file and then if
# succefully written, it replaces the old ones. This procedure, namely
# 'atomic_writing', causes some bugs on file system whitout operation order
# enforcement (like some networked fs). If set to False, the new notebook is
# written directly on the old one which could fail (eg: full filesystem or quota
# )
#c.FileManagerMixin.use_atomic_writing = True
#------------------------------------------------------------------------------
# FileContentsManager(FileManagerMixin,ContentsManager) configuration
#------------------------------------------------------------------------------
## If True (default), deleting files will send them to the platform's
# trash/recycle bin, where they can be recovered. If False, deleting files
# really deletes them.
#c.FileContentsManager.delete_to_trash = True
## Python callable or importstring thereof
#
# to be called on the path of a file just saved.
#
# This can be used to process the file on disk, such as converting the notebook
# to a script or HTML via nbconvert.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(os_path=os_path, model=model, contents_manager=instance)
#
# - path: the filesystem path to the file just written - model: the model
# representing the file - contents_manager: this ContentsManager instance
#c.FileContentsManager.post_save_hook = None
##
#c.FileContentsManager.root_dir = ''
## DEPRECATED, use post_save_hook. Will be removed in Notebook 5.0
#c.FileContentsManager.save_script = False
#------------------------------------------------------------------------------
# NotebookNotary(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## A class for computing and verifying notebook signatures.
## The hashing algorithm used to sign notebooks.
#c.NotebookNotary.algorithm = 'sha256'
## The sqlite file in which to store notebook signatures. By default, this will
# be in your Jupyter data directory. You can set it to ':memory:' to disable
# sqlite writing to the filesystem.
#c.NotebookNotary.db_file = ''
## The secret key with which notebooks are signed.
#c.NotebookNotary.secret = b''
## The file where the secret key is stored.
#c.NotebookNotary.secret_file = ''
## A callable returning the storage backend for notebook signatures. The default
# uses an SQLite database.
#c.NotebookNotary.store_factory = traitlets.Undefined
#------------------------------------------------------------------------------
# KernelSpecManager(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## If there is no Python kernelspec registered and the IPython kernel is
# available, ensure it is added to the spec list.
#c.KernelSpecManager.ensure_native_kernel = True
## The kernel spec class. This is configurable to allow subclassing of the
# KernelSpecManager for customized behavior.
#c.KernelSpecManager.kernel_spec_class = 'jupyter_client.kernelspec.KernelSpec'
## Whitelist of allowed kernel names.
#
# By default, all installed kernels are allowed.
#c.KernelSpecManager.whitelist = set()
|
py | b4029358576843db40ff507346bde29533df2961 | import tensorflow as tf
for gpu in tf.config.experimental.list_physical_devices('GPU'):
tf.config.experimental.set_memory_growth(gpu, True)
|
py | b40293ac197100fead68ab42b11cc5c8a933a144 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import random
import time
from dataBase.noDB import JsonFile
from dataBase.sqlite3 import ProbeData
json = JsonFile.JsonFile()
for _i in range(0,144):
date = time.time()
ozone = random.uniform(40,70)
temperature = random.uniform(20,30)
groudHumidity = random.uniform(10, 60)
airHumidity = random.uniform(10,60)
probeData = ProbeData.ProbeData()
probeData.setValue(_i,
date,
ozone,
temperature,
groudHumidity,
airHumidity)
json.addData(probeData.toJson())
json.writeToJson() |
py | b402945a3fa8106a78d62455088984e2b8308595 | # GGearing
# Simple encryption script for text
# This was one my first versions of this script
# 09/07/2017
from __future__ import print_function
import math
try:
input = raw_input
except NameError:
pass
key = int(math.pi * 1e14)
text = input("Enter text: ")
values =reverse= []
def encryptChar(target):
# encrytion algorithm
target = (((target + 42) * key) - 449)
return target
def decryptChar(target):
target = (((target + 449) / key) - 42)
return target
def encrypt(input_text):
col_values = []
for i in range(len(input_text)):
current = ord(input_text[i])
current = encryptChar(current)
col_values.append(current)
return col_values
def decrypt(enc_text):
col_values = []
for i in range(len(enc_text)):
current = int(decryptChar(enc_text[i]))
current = chr(current)
col_values.append(current)
return col_values
def readAndDecrypt(filename):
file = open(filename, "r")
data = file.read()
datalistint = []
actualdata = []
datalist = data.split(" ")
datalist.remove('')
datalistint=[float(datalist[i]) for i in range(len(datalist))]
for i in range(len(datalist)):
current1 = int(decryptChar(datalistint[i]))
current1 = chr(current1)
actualdata.append(current1)
file.close()
return actualdata
def readAndEncrypt(filename):
file = open(filename, "r")
data = file.read()
datalist = list(data)
encrypted_list = list()
encrypted_list_str = list()
for i in range(len(datalist)):
current = ord(datalist[i])
current = encryptChar(current)
encrypted_list.append(current)
file.close()
return encrypted_list
def readAndEncryptAndSave(inp_file, out_file):
enc_list = readAndEncrypt(inp_file)
output = open(out_file, "w")
for i in range(len(enc_list)):
output.write(str(enc_list[i]) + " ")
output.close()
def readAndDecryptAndSave(inp_file, out_file):
dec_list = readAndDecrypt(inp_file)
output = open(out_file, "w")
for i in range(len(dec_list)):
output.write(str(dec_list[i]))
output.close()
# encryption
for i in range(len(text)):
current = ord(text[i])
current = encryptChar(current)
values.append(current)
# decryption
for i in range(len(text)):
current = int(decryptChar(values[i]))
current = chr(current)
reverse.append(current)
print(reverse)
# saves encrypted in txt file
output = open("encrypted.txt", "w")
for i in range(len(values)):
output.write(str(values[i]) + " ")
output.close()
# read and decrypts
print(readAndDecrypt("encrypted.txt"))
|
py | b4029572d36078d23b6993519bc05d5530fc3138 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class AccountFiltersOperations:
"""AccountFiltersOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.media.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
account_name: str,
**kwargs
) -> AsyncIterable["_models.AccountFilterCollection"]:
"""List Account Filters.
List Account Filters in the Media Services account.
:param resource_group_name: The name of the resource group within the Azure subscription.
:type resource_group_name: str
:param account_name: The Media Services account name.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AccountFilterCollection or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.media.models.AccountFilterCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AccountFilterCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('AccountFilterCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.odata_next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ApiError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/accountFilters'} # type: ignore
async def get(
self,
resource_group_name: str,
account_name: str,
filter_name: str,
**kwargs
) -> Optional["_models.AccountFilter"]:
"""Get an Account Filter.
Get the details of an Account Filter in the Media Services account.
:param resource_group_name: The name of the resource group within the Azure subscription.
:type resource_group_name: str
:param account_name: The Media Services account name.
:type account_name: str
:param filter_name: The Account Filter name.
:type filter_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AccountFilter, or the result of cls(response)
:rtype: ~azure.mgmt.media.models.AccountFilter or None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.AccountFilter"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'filterName': self._serialize.url("filter_name", filter_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ApiError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('AccountFilter', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/accountFilters/{filterName}'} # type: ignore
async def create_or_update(
self,
resource_group_name: str,
account_name: str,
filter_name: str,
parameters: "_models.AccountFilter",
**kwargs
) -> "_models.AccountFilter":
"""Create or update an Account Filter.
Creates or updates an Account Filter in the Media Services account.
:param resource_group_name: The name of the resource group within the Azure subscription.
:type resource_group_name: str
:param account_name: The Media Services account name.
:type account_name: str
:param filter_name: The Account Filter name.
:type filter_name: str
:param parameters: The request parameters.
:type parameters: ~azure.mgmt.media.models.AccountFilter
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AccountFilter, or the result of cls(response)
:rtype: ~azure.mgmt.media.models.AccountFilter
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AccountFilter"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'filterName': self._serialize.url("filter_name", filter_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'AccountFilter')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ApiError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('AccountFilter', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('AccountFilter', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/accountFilters/{filterName}'} # type: ignore
async def delete(
self,
resource_group_name: str,
account_name: str,
filter_name: str,
**kwargs
) -> None:
"""Delete an Account Filter.
Deletes an Account Filter in the Media Services account.
:param resource_group_name: The name of the resource group within the Azure subscription.
:type resource_group_name: str
:param account_name: The Media Services account name.
:type account_name: str
:param filter_name: The Account Filter name.
:type filter_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'filterName': self._serialize.url("filter_name", filter_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ApiError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/accountFilters/{filterName}'} # type: ignore
async def update(
self,
resource_group_name: str,
account_name: str,
filter_name: str,
parameters: "_models.AccountFilter",
**kwargs
) -> "_models.AccountFilter":
"""Update an Account Filter.
Updates an existing Account Filter in the Media Services account.
:param resource_group_name: The name of the resource group within the Azure subscription.
:type resource_group_name: str
:param account_name: The Media Services account name.
:type account_name: str
:param filter_name: The Account Filter name.
:type filter_name: str
:param parameters: The request parameters.
:type parameters: ~azure.mgmt.media.models.AccountFilter
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AccountFilter, or the result of cls(response)
:rtype: ~azure.mgmt.media.models.AccountFilter
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AccountFilter"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'filterName': self._serialize.url("filter_name", filter_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'AccountFilter')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ApiError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('AccountFilter', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/accountFilters/{filterName}'} # type: ignore
|
py | b402963230b9f079cc8364385d5314a47918547d | import numpy as np
import pandas as pd
import gzip
df = pd.read_csv('../233x_sequences_degdata_081120.csv')
fil='predictions/Degscore_XGB_233_preds.csv'
output_array = np.ones([233,1588])*np.nan
for i,x in enumerate(open(fil,'r').readlines()):
dat = [float(k) for k in x.strip().split(',')]
output_array[i,:len(dat)] = dat
np.savetxt('formatted_predictions/Degscore-XGB_FULL_233x.csv',output_array, delimiter=',')
for i, row in df.iterrows():
if not np.isnan(row['startpos']):
output_array[i, :int(row['startpos'])] = np.NaN
output_array[i, int(row['endpos']):] = np.NaN
np.savetxt('formatted_predictions/Degscore-XGB_PCR_233x.csv',output_array, delimiter=',')
|
py | b40296916214d68bc4cc9c06a11d1c313dfcfdc9 | #!/usr/bin/env python3
"""
Create a train-test-validation split for the quranic text file.
Author: Hamzah Khan
"""
from argparse import ArgumentParser
import logging
import os
from typing import Dict
from typing import Tuple
from sklearn.model_selection import train_test_split
from utils.files import write_csv
import utils.text as text_utils
# This gives us a 60-20-20 split by default.
DEFAULT_RANDOM_SEED = 1
TRAIN_SPLIT_FRACTION = 0.6
TEST_SPLIT_FRACTION = 0.2
VALIDATION_SPLIT_FRACTION = 0.2
NUM_SURAHS = 114
parser = ArgumentParser(description='Tarteel Data Train-Test-Validation Splitter')
parser.add_argument('-f', '--path-to-quran-json', type=str, default='data/data-uthmani.json',
help='Path to the Quran text JSON file.')
parser.add_argument('-o', '--output_directory', type=str, default='.cache')
parser.add_argument(
'-g', '--group-identical-text', action='store_true',
help='If True, ayahs with identical text will be grouped into one set, not spread across '
'multiple sets.')
parser.add_argument('--train-frac', type=float, default=TRAIN_SPLIT_FRACTION)
parser.add_argument('--test-frac', type=float, default=TEST_SPLIT_FRACTION)
parser.add_argument('--validation-frac', type=float, default=VALIDATION_SPLIT_FRACTION)
parser.add_argument('-s', '--seed', type=int, default=DEFAULT_RANDOM_SEED)
parser.add_argument(
'--log', choices=['DEBUG', 'INFO', 'WARNING', 'CRITICAL'], default='INFO',
help='Logging level.'
)
args = parser.parse_args()
numeric_level = getattr(logging, args.log, None)
logging.basicConfig(level=numeric_level)
def create_train_test_validation_split(
ayahs_to_text: Dict,
train_test_validate_fractions: Tuple[int, int, int],
should_group_identical_text: bool = True,
random_seed: int = DEFAULT_RANDOM_SEED):
"""
Create a train-test-validation split over ayahs with the same text, given the Quranic data.
Returns a list of lists, each an ayah group, containing the ayah numbers.
"""
train_frac = train_test_validate_fractions[0]
test_frac = train_test_validate_fractions[1]
validate_frac = train_test_validate_fractions[2]
# The fractions should sum to 1.0, or we throw an error.
if abs(sum(train_test_validate_fractions) - 1.0) > 1e-6:
raise Exception("Train-test-validation fractions do not sum to 1.")
if should_group_identical_text:
# Initialize text to ayah group dictionary.
text_to_grouped_ayahs = {}
# Cluster ayahs with the same text.
for ayah_num in ayahs_to_text:
ayah_text = ayahs_to_text[ayah_num]
# Initialize if ayah text is not an entry yet.
if ayah_text not in text_to_grouped_ayahs:
text_to_grouped_ayahs[ayah_text] = []
text_to_grouped_ayahs[ayah_text].append(ayah_num)
# Get grouped list of ayahs.
ayah_groups = list(text_to_grouped_ayahs.values())
# If we want identical-text ayahs to not be grouped (and therefore allow the same text
# in multiple data sets), then extract the ayah numbers.
else:
ayah_groups = [group for group in ayahs_to_text.keys()]
# Splitting will be done in two steps, so identify the proper fractions for them.
first_split_frac = train_frac + validate_frac
second_split_frac = 1.0 - (validate_frac / first_split_frac)
# Perform the actual splits on the indices.
X_train_valid, X_test = train_test_split(range(len(ayah_groups)),
train_size=first_split_frac,
random_state=random_seed,
shuffle=True)
X_train, X_valid = train_test_split(X_train_valid,
train_size=second_split_frac,
random_state=random_seed,
shuffle=True)
# Convert the indices back into ayah groups.
X_train = [ayah_groups[index] for index in X_train]
X_test = [ayah_groups[index] for index in X_test]
X_valid = [ayah_groups[index] for index in X_valid]
return X_train, X_test, X_valid
def save_split_data(output_directory, filename, split_data):
"""Create and saves a file for a specific split.
Each line is a comma separated list of groups of ayah numbers.
"""
output_path = os.path.join(output_directory, filename + ".csv")
headers = ('surah_num', 'ayah_num')
split_data.insert(0, headers)
write_csv(output_path, split_data)
def save_splits(output_directory, random_seed, split_fractions, X_train, X_test, X_valid):
"""Save the train-test-validation splits to three files."""
# Create the filenames.
train_filename = "_".join(
["train", "fraction", str(split_fractions[0]), "seed", str(random_seed)])
test_filename = "_".join(
["test", "fraction", str(split_fractions[1]), "seed", str(random_seed)])
validate_filename = "_".join(
["validate", "fraction", str(split_fractions[2]), "seed", str(random_seed)])
# Save the data to the specified location.
save_split_data(output_directory, train_filename, X_train)
save_split_data(output_directory, test_filename, X_test)
save_split_data(output_directory, validate_filename, X_valid)
if __name__ == '__main__':
# Load the Qur'anic Json data.
quran_json_obj = text_utils.load_quran_obj_from_json(args.path_to_quran_json)
# Convert the Json data to a dictionary of ayah numbers as keys and text as values.
ayahs_to_text = text_utils.convert_quran_json_to_dict(
quran_json_obj, should_include_bismillah=False)
# Run the ayah split, forming groups of ayah numbers with identical text.
split_fractions = (args.train_frac, args.test_frac, args.validation_frac)
X_train, X_test, X_valid = create_train_test_validation_split(
ayahs_to_text, split_fractions, args.group_identical_text, args.seed)
# Save the resulting split to a file.
if args.output_directory is not None:
save_splits(args.output_directory, args.seed, split_fractions, X_train, X_test, X_valid)
logging.info("Split data written to files in " + args.output_directory)
else:
logging.info("Data splitting completed.")
|
py | b40296ee8cb1445021d1bee28607e6565a3ac786 | import torch
import torch.nn as nn
import torch.nn.functional as F
from operations import *
from torch.autograd import Variable
from genotypes import PRIMITIVES
# from utils.darts_utils import drop_path, compute_speed, compute_speed_tensorrt
from pdb import set_trace as bp
from seg_oprs import Head
import numpy as np
# https://github.com/YongfeiYan/Gumbel_Softmax_VAE/blob/master/gumbel_softmax_vae.py
def sample_gumbel(shape, eps=1e-20):
U = torch.rand(shape)
U = U.cuda()
return -torch.log(-torch.log(U + eps) + eps)
def gumbel_softmax_sample(logits, temperature=1):
y = logits + sample_gumbel(logits.size())
return F.softmax(y / temperature, dim=-1)
def gumbel_softmax(logits, temperature=1, hard=False):
"""
ST-gumple-softmax
input: [*, n_class]
return: flatten --> [*, n_class] an one-hot vector
"""
y = gumbel_softmax_sample(logits, temperature)
if not hard:
return y
shape = y.size()
_, ind = y.max(dim=-1)
y_hard = torch.zeros_like(y).view(-1, shape[-1])
y_hard.scatter_(1, ind.view(-1, 1), 1)
y_hard = y_hard.view(*shape)
# Set gradients w.r.t. y_hard gradients w.r.t. y
y_hard = (y_hard - y).detach() + y
return y_hard
class MixedOp(nn.Module):
def __init__(self, C_in, C_out, stride=1, width_mult_list=[1.]):
super(MixedOp, self).__init__()
self._ops = nn.ModuleList()
self._width_mult_list = width_mult_list
for primitive in PRIMITIVES:
op = OPS[primitive](C_in, C_out, stride, True, width_mult_list=width_mult_list)
self._ops.append(op)
def set_prun_ratio(self, ratio):
for op in self._ops:
op.set_ratio(ratio)
def forward(self, x, weights, ratios):
# int: force #channel; tensor: arch_ratio; float(<=1): force width
result = 0
if isinstance(ratios[0], torch.Tensor):
ratio0 = self._width_mult_list[ratios[0].argmax()]
r_score0 = ratios[0][ratios[0].argmax()]
else:
ratio0 = ratios[0]
r_score0 = 1.
if isinstance(ratios[1], torch.Tensor):
ratio1 = self._width_mult_list[ratios[1].argmax()]
r_score1 = ratios[1][ratios[1].argmax()]
else:
ratio1 = ratios[1]
r_score1 = 1.
self.set_prun_ratio((ratio0, ratio1))
for w, op in zip(weights, self._ops):
result = result + op(x) * w * r_score0 * r_score1
return result
def forward_latency(self, size, weights, ratios):
# int: force #channel; tensor: arch_ratio; float(<=1): force width
result = 0
if isinstance(ratios[0], torch.Tensor):
ratio0 = self._width_mult_list[ratios[0].argmax()]
r_score0 = ratios[0][ratios[0].argmax()]
else:
ratio0 = ratios[0]
r_score0 = 1.
if isinstance(ratios[1], torch.Tensor):
ratio1 = self._width_mult_list[ratios[1].argmax()]
r_score1 = ratios[1][ratios[1].argmax()]
else:
ratio1 = ratios[1]
r_score1 = 1.
self.set_prun_ratio((ratio0, ratio1))
for w, op in zip(weights, self._ops):
latency, size_out = op.forward_latency(size)
result = result + latency * w * r_score0 * r_score1
return result, size_out
class Cell(nn.Module):
def __init__(self, C_in, C_out=None, down=True, width_mult_list=[1.]):
super(Cell, self).__init__()
self._C_in = C_in
if C_out is None: C_out = C_in
self._C_out = C_out
self._down = down
self._width_mult_list = width_mult_list
self._op = MixedOp(C_in, C_out, width_mult_list=width_mult_list)
if self._down:
self.downsample = MixedOp(C_in, C_in*2, stride=2, width_mult_list=width_mult_list)
def forward(self, input, alphas, ratios):
# ratios: (in, out, down)
out = self._op(input, alphas, (ratios[0], ratios[1]))
assert (self._down and (ratios[2] is not None)) or ((not self._down) and (ratios[2] is None))
down = self.downsample(input, alphas, (ratios[0], ratios[2])) if self._down else None
return out, down
def forward_latency(self, size, alphas, ratios):
# ratios: (in, out, down)
out = self._op.forward_latency(size, alphas, (ratios[0], ratios[1]))
assert (self._down and (ratios[2] is not None)) or ((not self._down) and (ratios[2] is None))
down = self.downsample.forward_latency(size, alphas, (ratios[0], ratios[2])) if self._down else None
return out, down
class Network_Multi_Path(nn.Module):
def __init__(self, num_classes=19, layers=16, criterion=nn.CrossEntropyLoss(ignore_index=-1), Fch=12, width_mult_list=[1.,], prun_modes=['arch_ratio',], stem_head_width=[(1., 1.),]):
super(Network_Multi_Path, self).__init__()
self._num_classes = num_classes
assert layers >= 3
self._layers = layers
self._criterion = criterion
self._Fch = Fch
self._width_mult_list = width_mult_list
self._prun_modes = prun_modes
self.prun_mode = None # prun_mode is higher priority than _prun_modes
self._stem_head_width = stem_head_width
self._flops = 0
self._params = 0
self.stem = nn.ModuleList([
nn.Sequential(
ConvNorm(3, self.num_filters(2, stem_ratio)*2, kernel_size=3, stride=2, padding=1, bias=False, groups=1, slimmable=False),
BasicResidual2x(self.num_filters(2, stem_ratio)*2, self.num_filters(4, stem_ratio)*2, kernel_size=3, stride=2, groups=1, slimmable=False),
BasicResidual2x(self.num_filters(4, stem_ratio)*2, self.num_filters(8, stem_ratio), kernel_size=3, stride=2, groups=1, slimmable=False)
) for stem_ratio, _ in self._stem_head_width ])
self.cells = nn.ModuleList()
for l in range(layers):
cells = nn.ModuleList()
if l == 0:
# first node has only one input (prev cell's output)
cells.append(Cell(self.num_filters(8), width_mult_list=width_mult_list))
elif l == 1:
cells.append(Cell(self.num_filters(8), width_mult_list=width_mult_list))
cells.append(Cell(self.num_filters(16), width_mult_list=width_mult_list))
elif l < layers - 1:
cells.append(Cell(self.num_filters(8), width_mult_list=width_mult_list))
cells.append(Cell(self.num_filters(16), width_mult_list=width_mult_list))
cells.append(Cell(self.num_filters(32), down=False, width_mult_list=width_mult_list))
else:
cells.append(Cell(self.num_filters(8), down=False, width_mult_list=width_mult_list))
cells.append(Cell(self.num_filters(16), down=False, width_mult_list=width_mult_list))
cells.append(Cell(self.num_filters(32), down=False, width_mult_list=width_mult_list))
self.cells.append(cells)
self.refine32 = nn.ModuleList([
nn.ModuleList([
ConvNorm(self.num_filters(32, head_ratio), self.num_filters(16, head_ratio), kernel_size=1, bias=False, groups=1, slimmable=False),
ConvNorm(self.num_filters(32, head_ratio), self.num_filters(16, head_ratio), kernel_size=3, padding=1, bias=False, groups=1, slimmable=False),
ConvNorm(self.num_filters(16, head_ratio), self.num_filters(8, head_ratio), kernel_size=1, bias=False, groups=1, slimmable=False),
ConvNorm(self.num_filters(16, head_ratio), self.num_filters(8, head_ratio), kernel_size=3, padding=1, bias=False, groups=1, slimmable=False)]) for _, head_ratio in self._stem_head_width ])
self.refine16 = nn.ModuleList([
nn.ModuleList([
ConvNorm(self.num_filters(16, head_ratio), self.num_filters(8, head_ratio), kernel_size=1, bias=False, groups=1, slimmable=False),
ConvNorm(self.num_filters(16, head_ratio), self.num_filters(8, head_ratio), kernel_size=3, padding=1, bias=False, groups=1, slimmable=False)]) for _, head_ratio in self._stem_head_width ])
self.head0 = nn.ModuleList([ Head(self.num_filters(8, head_ratio), num_classes, False) for _, head_ratio in self._stem_head_width ])
self.head1 = nn.ModuleList([ Head(self.num_filters(8, head_ratio), num_classes, False) for _, head_ratio in self._stem_head_width ])
self.head2 = nn.ModuleList([ Head(self.num_filters(8, head_ratio), num_classes, False) for _, head_ratio in self._stem_head_width ])
self.head02 = nn.ModuleList([ Head(self.num_filters(8, head_ratio)*2, num_classes, False) for _, head_ratio in self._stem_head_width ])
self.head12 = nn.ModuleList([ Head(self.num_filters(8, head_ratio)*2, num_classes, False) for _, head_ratio in self._stem_head_width ])
# contains arch_param names: {"alphas": alphas, "betas": betas, "ratios": ratios}
self._arch_names = []
self._arch_parameters = []
for i in range(len(self._prun_modes)):
arch_name, arch_param = self._build_arch_parameters(i)
self._arch_names.append(arch_name)
self._arch_parameters.append(arch_param)
self._reset_arch_parameters(i)
# switch set of arch if we have more than 1 arch
self.arch_idx = 0
def num_filters(self, scale, width=1.0):
return int(np.round(scale * self._Fch * width))
def new(self):
model_new = Network(self._num_classes, self._layers, self._criterion, self._Fch).cuda()
for x, y in zip(model_new.arch_parameters(), self.arch_parameters()):
x.data.copy_(y.data)
return model_new
def sample_prun_ratio(self, mode="arch_ratio"):
'''
mode: "min"|"max"|"random"|"arch_ratio"(default)
'''
assert mode in ["min", "max", "random", "arch_ratio"]
if mode == "arch_ratio":
ratios = self._arch_names[self.arch_idx]["ratios"]
ratios0 = getattr(self, ratios[0])
ratios0_sampled = []
for layer in range(self._layers - 1):
ratios0_sampled.append(gumbel_softmax(F.log_softmax(ratios0[layer], dim=-1), hard=True))
ratios1 = getattr(self, ratios[1])
ratios1_sampled = []
for layer in range(self._layers - 1):
ratios1_sampled.append(gumbel_softmax(F.log_softmax(ratios1[layer], dim=-1), hard=True))
ratios2 = getattr(self, ratios[2])
ratios2_sampled = []
for layer in range(self._layers - 2):
ratios2_sampled.append(gumbel_softmax(F.log_softmax(ratios2[layer], dim=-1), hard=True))
return [ratios0_sampled, ratios1_sampled, ratios2_sampled]
elif mode == "min":
ratios0_sampled = []
for layer in range(self._layers - 1):
ratios0_sampled.append(self._width_mult_list[0])
ratios1_sampled = []
for layer in range(self._layers - 1):
ratios1_sampled.append(self._width_mult_list[0])
ratios2_sampled = []
for layer in range(self._layers - 2):
ratios2_sampled.append(self._width_mult_list[0])
return [ratios0_sampled, ratios1_sampled, ratios2_sampled]
elif mode == "max":
ratios0_sampled = []
for layer in range(self._layers - 1):
ratios0_sampled.append(self._width_mult_list[-1])
ratios1_sampled = []
for layer in range(self._layers - 1):
ratios1_sampled.append(self._width_mult_list[-1])
ratios2_sampled = []
for layer in range(self._layers - 2):
ratios2_sampled.append(self._width_mult_list[-1])
return [ratios0_sampled, ratios1_sampled, ratios2_sampled]
elif mode == "random":
ratios0_sampled = []
for layer in range(self._layers - 1):
ratios0_sampled.append(np.random.choice(self._width_mult_list))
ratios1_sampled = []
for layer in range(self._layers - 1):
ratios1_sampled.append(np.random.choice(self._width_mult_list))
ratios2_sampled = []
for layer in range(self._layers - 2):
ratios2_sampled.append(np.random.choice(self._width_mult_list))
return [ratios0_sampled, ratios1_sampled, ratios2_sampled]
def forward(self, input):
# out_prev: cell-state
# index 0: keep; index 1: down
stem = self.stem[self.arch_idx]
refine16 = self.refine16[self.arch_idx]
refine32 = self.refine32[self.arch_idx]
head0 = self.head0[self.arch_idx]
head1 = self.head1[self.arch_idx]
head2 = self.head2[self.arch_idx]
head02 = self.head02[self.arch_idx]
head12 = self.head12[self.arch_idx]
alphas0 = F.softmax(getattr(self, self._arch_names[self.arch_idx]["alphas"][0]), dim=-1)
alphas1 = F.softmax(getattr(self, self._arch_names[self.arch_idx]["alphas"][1]), dim=-1)
alphas2 = F.softmax(getattr(self, self._arch_names[self.arch_idx]["alphas"][2]), dim=-1)
alphas = [alphas0, alphas1, alphas2]
betas1 = F.softmax(getattr(self, self._arch_names[self.arch_idx]["betas"][0]), dim=-1)
betas2 = F.softmax(getattr(self, self._arch_names[self.arch_idx]["betas"][1]), dim=-1)
betas = [None, betas1, betas2]
if self.prun_mode is not None:
ratios = self.sample_prun_ratio(mode=self.prun_mode)
else:
ratios = self.sample_prun_ratio(mode=self._prun_modes[self.arch_idx])
out_prev = [[stem(input), None]] # stem: one cell
# i: layer | j: scale
for i, cells in enumerate(self.cells):
# layers
out = []
for j, cell in enumerate(cells):
# scales
# out,down -- 0: from down; 1: from keep
out0 = None; out1 = None
down0 = None; down1 = None
alpha = alphas[j][i-j]
# ratio: (in, out, down)
# int: force #channel; tensor: arch_ratio; float(<=1): force width
if i == 0 and j == 0:
# first cell
ratio = (self._stem_head_width[self.arch_idx][0], ratios[j][i-j], ratios[j+1][i-j])
elif i == self._layers - 1:
# cell in last layer
if j == 0:
ratio = (ratios[j][i-j-1], self._stem_head_width[self.arch_idx][1], None)
else:
ratio = (ratios[j][i-j], self._stem_head_width[self.arch_idx][1], None)
elif j == 2:
# cell in last scale: no down ratio "None"
ratio = (ratios[j][i-j], ratios[j][i-j+1], None)
else:
if j == 0:
ratio = (ratios[j][i-j-1], ratios[j][i-j], ratios[j+1][i-j])
else:
ratio = (ratios[j][i-j], ratios[j][i-j+1], ratios[j+1][i-j])
# out,down -- 0: from down; 1: from keep
if j == 0:
out1, down1 = cell(out_prev[0][0], alpha, ratio)
out.append((out1, down1))
else:
if i == j:
out0, down0 = cell(out_prev[j-1][1], alpha, ratio)
out.append((out0, down0))
else:
if betas[j][i-j-1][0] > 0:
out0, down0 = cell(out_prev[j-1][1], alpha, ratio)
if betas[j][i-j-1][1] > 0:
out1, down1 = cell(out_prev[j][0], alpha, ratio)
out.append((
sum(w * out for w, out in zip(betas[j][i-j-1], [out0, out1])),
sum(w * down if down is not None else 0 for w, down in zip(betas[j][i-j-1], [down0, down1])),
))
out_prev = out
###################################
out0 = None; out1 = None; out2 = None
out0 = out[0][0]
out1 = F.interpolate(refine16[0](out[1][0]), scale_factor=2, mode='bilinear', align_corners=True)
out1 = refine16[1](torch.cat([out1, out[0][0]], dim=1))
out2 = F.interpolate(refine32[0](out[2][0]), scale_factor=2, mode='bilinear', align_corners=True)
out2 = refine32[1](torch.cat([out2, out[1][0]], dim=1))
out2 = F.interpolate(refine32[2](out2), scale_factor=2, mode='bilinear', align_corners=True)
out2 = refine32[3](torch.cat([out2, out[0][0]], dim=1))
pred0 = head0(out0)
pred1 = head1(out1)
pred2 = head2(out2)
pred02 = head02(torch.cat([out0, out2], dim=1))
pred12 = head12(torch.cat([out1, out2], dim=1))
if not self.training:
pred0 = F.interpolate(pred0, scale_factor=8, mode='bilinear', align_corners=True)
pred1 = F.interpolate(pred1, scale_factor=8, mode='bilinear', align_corners=True)
pred2 = F.interpolate(pred2, scale_factor=8, mode='bilinear', align_corners=True)
pred02 = F.interpolate(pred02, scale_factor=8, mode='bilinear', align_corners=True)
pred12 = F.interpolate(pred12, scale_factor=8, mode='bilinear', align_corners=True)
return pred0, pred1, pred2, pred02, pred12
###################################
def forward_latency(self, size, alpha=True, beta=True, ratio=True):
# out_prev: cell-state
# index 0: keep; index 1: down
stem = self.stem[self.arch_idx]
if alpha:
alphas0 = F.softmax(getattr(self, self._arch_names[self.arch_idx]["alphas"][0]), dim=-1)
alphas1 = F.softmax(getattr(self, self._arch_names[self.arch_idx]["alphas"][1]), dim=-1)
alphas2 = F.softmax(getattr(self, self._arch_names[self.arch_idx]["alphas"][2]), dim=-1)
alphas = [alphas0, alphas1, alphas2]
else:
alphas = [
torch.ones_like(getattr(self, self._arch_names[self.arch_idx]["alphas"][0])).cuda() * 1./len(PRIMITIVES),
torch.ones_like(getattr(self, self._arch_names[self.arch_idx]["alphas"][1])).cuda() * 1./len(PRIMITIVES),
torch.ones_like(getattr(self, self._arch_names[self.arch_idx]["alphas"][2])).cuda() * 1./len(PRIMITIVES)]
if beta:
betas1 = F.softmax(getattr(self, self._arch_names[self.arch_idx]["betas"][0]), dim=-1)
betas2 = F.softmax(getattr(self, self._arch_names[self.arch_idx]["betas"][1]), dim=-1)
betas = [None, betas1, betas2]
else:
betas = [
None,
torch.ones_like(getattr(self, self._arch_names[self.arch_idx]["betas"][0])).cuda() * 1./2,
torch.ones_like(getattr(self, self._arch_names[self.arch_idx]["betas"][1])).cuda() * 1./2]
if ratio:
# ratios = self.sample_prun_ratio(mode='arch_ratio')
if self.prun_mode is not None:
ratios = self.sample_prun_ratio(mode=self.prun_mode)
else:
ratios = self.sample_prun_ratio(mode=self._prun_modes[self.arch_idx])
else:
ratios = self.sample_prun_ratio(mode='max')
stem_latency = 0
latency, size = stem[0].forward_latency(size); stem_latency = stem_latency + latency
latency, size = stem[1].forward_latency(size); stem_latency = stem_latency + latency
latency, size = stem[2].forward_latency(size); stem_latency = stem_latency + latency
out_prev = [[size, None]] # stem: one cell
latency_total = [[stem_latency, 0], [0, 0], [0, 0]] # (out, down)
# i: layer | j: scale
for i, cells in enumerate(self.cells):
# layers
out = []
latency = []
for j, cell in enumerate(cells):
# scales
# out,down -- 0: from down; 1: from keep
out0 = None; out1 = None
down0 = None; down1 = None
alpha = alphas[j][i-j]
# ratio: (in, out, down)
# int: force #channel; tensor: arch_ratio; float(<=1): force width
if i == 0 and j == 0:
# first cell
ratio = (self._stem_head_width[self.arch_idx][0], ratios[j][i-j], ratios[j+1][i-j])
elif i == self._layers - 1:
# cell in last layer
if j == 0:
ratio = (ratios[j][i-j-1], self._stem_head_width[self.arch_idx][1], None)
else:
ratio = (ratios[j][i-j], self._stem_head_width[self.arch_idx][1], None)
elif j == 2:
# cell in last scale
ratio = (ratios[j][i-j], ratios[j][i-j+1], None)
else:
if j == 0:
ratio = (ratios[j][i-j-1], ratios[j][i-j], ratios[j+1][i-j])
else:
ratio = (ratios[j][i-j], ratios[j][i-j+1], ratios[j+1][i-j])
# out,down -- 0: from down; 1: from keep
if j == 0:
out1, down1 = cell.forward_latency(out_prev[0][0], alpha, ratio)
out.append((out1[1], down1[1] if down1 is not None else None))
latency.append([out1[0], down1[0] if down1 is not None else None])
else:
if i == j:
out0, down0 = cell.forward_latency(out_prev[j-1][1], alpha, ratio)
out.append((out0[1], down0[1] if down0 is not None else None))
latency.append([out0[0], down0[0] if down0 is not None else None])
else:
if betas[j][i-j-1][0] > 0:
# from down
out0, down0 = cell.forward_latency(out_prev[j-1][1], alpha, ratio)
if betas[j][i-j-1][1] > 0:
# from keep
out1, down1 = cell.forward_latency(out_prev[j][0], alpha, ratio)
assert (out0 is None and out1 is None) or out0[1] == out1[1]
assert (down0 is None and down1 is None) or down0[1] == down1[1]
out.append((out0[1], down0[1] if down0 is not None else None))
latency.append([
sum(w * out for w, out in zip(betas[j][i-j-1], [out0[0], out1[0]])),
sum(w * down if down is not None else 0 for w, down in zip(betas[j][i-j-1], [down0[0] if down0 is not None else None, down1[0] if down1 is not None else None])),
])
out_prev = out
for ii, lat in enumerate(latency):
# layer: i | scale: ii
if ii == 0:
# only from keep
if lat[0] is not None: latency_total[ii][0] = latency_total[ii][0] + lat[0]
if lat[1] is not None: latency_total[ii][1] = latency_total[ii][0] + lat[1]
else:
if i == ii:
# only from down
if lat[0] is not None: latency_total[ii][0] = latency_total[ii-1][1] + lat[0]
if lat[1] is not None: latency_total[ii][1] = latency_total[ii-1][1] + lat[1]
else:
if lat[0] is not None: latency_total[ii][0] = betas[j][i-j-1][1] * latency_total[ii][0] + betas[j][i-j-1][0] * latency_total[ii-1][1] + lat[0]
if lat[1] is not None: latency_total[ii][1] = betas[j][i-j-1][1] * latency_total[ii][0] + betas[j][i-j-1][0] * latency_total[ii-1][1] + lat[1]
###################################
latency0 = latency_total[0][0]
latency1 = latency_total[1][0]
latency2 = latency_total[2][0]
latency = sum([latency0, latency1, latency2])
return latency
###################################
def _loss(self, input, target, pretrain=False):
loss = 0
if pretrain is not True:
# "random width": sampled by gambel softmax
self.prun_mode = None
for idx in range(len(self._arch_names)):
self.arch_idx = idx
logits = self(input)
loss = loss + sum(self._criterion(logit, target) for logit in logits)
if len(self._width_mult_list) > 1:
self.prun_mode = "max"
logits = self(input)
loss = loss + sum(self._criterion(logit, target) for logit in logits)
self.prun_mode = "min"
logits = self(input)
loss = loss + sum(self._criterion(logit, target) for logit in logits)
if pretrain == True:
self.prun_mode = "random"
logits = self(input)
loss = loss + sum(self._criterion(logit, target) for logit in logits)
self.prun_mode = "random"
logits = self(input)
loss = loss + sum(self._criterion(logit, target) for logit in logits)
elif pretrain == True and len(self._width_mult_list) == 1:
self.prun_mode = "max"
logits = self(input)
loss = loss + sum(self._criterion(logit, target) for logit in logits)
return loss
def _build_arch_parameters(self, idx):
num_ops = len(PRIMITIVES)
# define names
alphas = [ "alpha_"+str(idx)+"_"+str(scale) for scale in [0, 1, 2] ]
betas = [ "beta_"+str(idx)+"_"+str(scale) for scale in [1, 2] ]
setattr(self, alphas[0], nn.Parameter(Variable(1e-3*torch.ones(self._layers, num_ops), requires_grad=True)))
setattr(self, alphas[1], nn.Parameter(Variable(1e-3*torch.ones(self._layers-1, num_ops), requires_grad=True)))
setattr(self, alphas[2], nn.Parameter(Variable(1e-3*torch.ones(self._layers-2, num_ops), requires_grad=True)))
# betas are now in-degree probs
# 0: from down; 1: from keep
setattr(self, betas[0], nn.Parameter(Variable(1e-3*torch.ones(self._layers-2, 2), requires_grad=True)))
setattr(self, betas[1], nn.Parameter(Variable(1e-3*torch.ones(self._layers-3, 2), requires_grad=True)))
ratios = [ "ratio_"+str(idx)+"_"+str(scale) for scale in [0, 1, 2] ]
if self._prun_modes[idx] == 'arch_ratio':
# prunning ratio
num_widths = len(self._width_mult_list)
else:
num_widths = 1
setattr(self, ratios[0], nn.Parameter(Variable(1e-3*torch.ones(self._layers-1, num_widths), requires_grad=True)))
setattr(self, ratios[1], nn.Parameter(Variable(1e-3*torch.ones(self._layers-1, num_widths), requires_grad=True)))
setattr(self, ratios[2], nn.Parameter(Variable(1e-3*torch.ones(self._layers-2, num_widths), requires_grad=True)))
return {"alphas": alphas, "betas": betas, "ratios": ratios}, [getattr(self, name) for name in alphas] + [getattr(self, name) for name in betas] + [getattr(self, name) for name in ratios]
def _reset_arch_parameters(self, idx):
num_ops = len(PRIMITIVES)
if self._prun_modes[idx] == 'arch_ratio':
# prunning ratio
num_widths = len(self._width_mult_list)
else:
num_widths = 1
getattr(self, self._arch_names[idx]["alphas"][0]).data = Variable(1e-3*torch.ones(self._layers, num_ops), requires_grad=True)
getattr(self, self._arch_names[idx]["alphas"][1]).data = Variable(1e-3*torch.ones(self._layers-1, num_ops), requires_grad=True)
getattr(self, self._arch_names[idx]["alphas"][2]).data = Variable(1e-3*torch.ones(self._layers-2, num_ops), requires_grad=True)
getattr(self, self._arch_names[idx]["betas"][0]).data = Variable(1e-3*torch.ones(self._layers-2, 2), requires_grad=True)
getattr(self, self._arch_names[idx]["betas"][1]).data = Variable(1e-3*torch.ones(self._layers-3, 2), requires_grad=True)
getattr(self, self._arch_names[idx]["ratios"][0]).data = Variable(1e-3*torch.ones(self._layers-1, num_widths), requires_grad=True)
getattr(self, self._arch_names[idx]["ratios"][1]).data = Variable(1e-3*torch.ones(self._layers-1, num_widths), requires_grad=True)
getattr(self, self._arch_names[idx]["ratios"][2]).data = Variable(1e-3*torch.ones(self._layers-2, num_widths), requires_grad=True)
|
py | b40297ddfd0073ca088b1ceecfd40e2997bed60d | from pycoin.intbytes import byte2int, iterbytes
from ..script import tools
from ... import encoding
from ...serialize import b2h
from ..exceptions import SolvingError
from .ScriptType import ScriptType
from pycoin.contrib import segwit_addr
class ScriptPayToAddressWit(ScriptType):
TEMPLATE = tools.compile("OP_0 'PUBKEYHASH'")
def __init__(self, version, hash160):
assert len(version) == 1
assert isinstance(version, bytes)
assert len(hash160) == 20
assert isinstance(hash160, bytes)
version_int = byte2int(version)
assert 0 <= version_int <= 16
self.version = version_int
self.hash160 = hash160
self._address = None
self._script = None
@classmethod
def from_script(cls, script):
r = cls.match(script)
if r:
hash160 = r["PUBKEYHASH_LIST"][0]
if len(hash160) == 20:
s = cls(b'\0', hash160)
return s
raise ValueError("bad script")
def script(self):
if self._script is None:
# create the script
STANDARD_SCRIPT_OUT = "OP_%d %s"
script_text = STANDARD_SCRIPT_OUT % (self.version, b2h(self.hash160))
self._script = tools.compile(script_text)
return self._script
def solve(self, **kwargs):
"""
The kwargs required depend upon the script type.
hash160_lookup:
dict-like structure that returns a secret exponent for a hash160
signature_for_hash_type_f:
function returning sign value for a given signature type
signature_type:
usually SIGHASH_ALL (1)
"""
# we need a hash160 => secret_exponent lookup
db = kwargs.get("hash160_lookup")
if db is None:
raise SolvingError("missing hash160_lookup parameter")
result = db.get(self.hash160)
if result is None:
raise SolvingError("can't find secret exponent for %s" % self.address())
# we got it
script_to_hash = tools.compile(
"OP_DUP OP_HASH160 %s OP_EQUALVERIFY OP_CHECKSIG" % b2h(self.hash160))
signature_for_hash_type_f = kwargs.get("signature_for_hash_type_f").witness
signature_type = kwargs.get("signature_type")
secret_exponent, public_pair, compressed = result
binary_signature = self._create_script_signature(
secret_exponent, signature_for_hash_type_f, signature_type, script_to_hash)
binary_public_pair_sec = encoding.public_pair_to_sec(public_pair, compressed=compressed)
solution = [binary_signature, binary_public_pair_sec]
return (b'', solution)
def info(self, netcode=None):
def address_f(netcode=netcode):
from pycoin.networks import bech32_hrp_for_netcode
from pycoin.networks.default import get_current_netcode
if netcode is None:
netcode = get_current_netcode()
bech32_hrp = bech32_hrp_for_netcode(netcode)
if bech32_hrp:
return segwit_addr.encode(bech32_hrp, self.version, iterbytes(self.hash160))
return None
return dict(type="pay to witness public key hash", address="DEPRECATED call address_f instead",
address_f=address_f, hash160=self.hash160, script=self._script)
def __repr__(self):
return "<Script: pay to %s (segwit)>" % self.address()
|
py | b4029823029d858961461b47c58549903bd86fc0 | from datetime import datetime, timedelta, time
from django.contrib.auth.models import AbstractUser
from django.contrib.postgres.fields import ArrayField
from django.core.urlresolvers import reverse
from django.db import models
from django.db.models import Q
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
from imagekit.models import ImageSpecField
from pilkit.processors import SmartResize
from .user import User
from .organization import Organization
class OrganizationSubscriptionManager(models.Manager):
"""Custom manager for Subscription."""
def create_subscription(self, organization, collaborations, contractors):
"""Method for quick creation of subscription."""
subscription = self.create(
organization=organization,
collaborations=collaborations,
contractors=contractors,
partner_discovery=partner_discovery,
)
return subscription
@python_2_unicode_compatible
class OrganizationSubscription(models.Model):
"""Details of an organization subscription."""
# if subscription is for an org account, associate with that org
# FIXME should be a one to one relationship
organization = models.OneToOneField(
Organization,
help_text='Organization associated with this subscription if Org subscription type.',
on_delete=models.CASCADE,
)
# Organization functionality
collaborations = models.BooleanField(
default=False,
help_text='The organization is using the account for base features of editorial workflow, project management and collaboration.',
)
contractors = models.BooleanField(
default=False,
help_text='The organization is using the account to manage contractors.',
)
partner_discovery = models.BooleanField(
default=True,
help_text='Base level subscription. Allows organization to be publicly listed for search as a potential collaborative partner. Allows org users to see other publicly listed orgs.',
)
objects = OrganizationSubscriptionManager()
def __str__(self):
return "Organization Subscription - {organization}".format(organization=self.organization.name)
class ContractorSubscriptionManager(models.Manager):
"""Custom manager for contractor subscription."""
def create_subscription(self, user, standard):
"""Method for quick creation of subscription."""
subscription = self.create(
user=user,
standard=standard,
)
return subscription
@python_2_unicode_compatible
class ContractorSubscription(models.Model):
"""Details of a contractor subscription.
V1.0 limited utility but future-facing necessity.
"""
# if subscription is for an org account, associate with that org
user = models.ForeignKey(
User,
help_text='User associated with this subscription.',
on_delete=models.CASCADE,
)
# Organization functionality
standard = models.BooleanField(
default=True,
help_text='If an organization is using the account for base features of editorial workflow, project management and collaboration.',
)
objects = ContractorSubscriptionManager()
def __str__(self):
return "Contractor Subscription - {user}".format(user=self.user.credit_name)
|
py | b40298e2d1452d88e5c78999a512bfc95a1675f3 | # buy.py
|
py | b402990dae88aa92c6bc5db38747380bedb4fba4 | from __future__ import absolute_import, division, print_function
#
# Handle multiprocessing with any of the implemented methods so that this step
# is abstracted away from the use case (e.g. cxi_mpi_submit).
#
from libtbx.utils import Sorry
import os
import math
mp_phil_str = '''
mp {
method = local *lsf sge pbs slurm shifter htcondor custom
.type = choice
.help = Computing environment
use_mpi = True
.type = bool
.help = Use mpi multiprocessing
nproc = 1
.type = int
.help = Number of processes total (== nnodes x nproc_per_node). \
If two of the three params (nproc, nnodes, nproc_per_node) are \
specified, the last will be determined by modular arithmetic. \
If all three are specified, nnodes is ignored. nproc alone is \
sufficient for most methods.
nnodes = 1
.type = int
.help = Number of nodes to request
nnodes_index = None
.type = int
.help = If defined, use this many nodes for indexing and integration. \
Currently only works for mp.method=shifter or slurm.
nnodes_scale = None
.type = int
.help = If defined, use this many nodes for scaling. \
Currently only works for mp.method=shifter or slurm.
nnodes_merge = None
.type = int
.help = If defined, use this many nodes for merging. \
Currently only works for mp.method=shifter or slurm.
nproc_per_node = 1
.type = int
.help = Number of processes to allocate per node
queue = None
.type = str
.help = Queue to submit multiprocessing job to (optional for some methods)
memory = None
.type = int
.help = Memory (in MB) to allocate for a job (optional)
wall_time = None
.type = int
.help = Wall time limit (in minutes) to impose (optional)
extra_options = None
.type = str
.multiple = True
.help = Any other options to be included in the job submission command
extra_args = None
.type = str
.multiple = True
.help = Any other arguments to the main command
env_script = None
.type = str
.multiple = True
.help = Path to script sourcing a particular environment (optional)
local {
include_mp_in_command = True
.type = bool
.help = Whether to decorate command with appropiate multiprocessing \
arguments. If False, it's assumed the multiprocessing \
arguments are provided by the calling program.
}
shifter {
submit_command = "sbatch "
.type = str
.help = Command used to run the zero-level script sbatch.sh.
shifter_image = None
.type = str
.help = Name of Shifter image to use for processing, as you would use \
in an sbatch script. Example: docker:dwpaley/cctbx-xfel:fix18
sbatch_script_template = None
.type = path
.help = Script template to be run with sbatch. The script will be copied \
to the trial directory as sbatch.sh and modified. Must contain \
exactly one srun command of the format srun [...] <srun_script> \
(including the <> brackets). May contain additional srun commands. \
May also contain substitutables <walltime>, <partition>, <nnodes> \
and <nproc>.
srun_script_template = None
.type = path
.help = Script template to be run with srun. The script will be copied \
to the trial directory as srun.sh and modified. Must contain \
exactly one instance of the string <command> (including the <> \
brackets) after setting up the necessary environment.
partition = regular
.type = str
.help = Partition to run jobs on, e.g. regular or debug.
jobname = LCLS_EXP
.type = str
.help = Job Name
project = None
.type = str
.help = Name of NERSC project -- formerly "repo"
reservation = None
.type = str
.help = Name of NERSC reservation
constraint = haswell
.type = str
.help = Haswell or KNL
staging = DataWarp *None
.type = choice
.help = Optionally stage logs to the DataWarp burst buffer. Only works \
when writing to Cori cscratch.
}
htcondor {
executable_path = None
.type = path
.help = Path to executable script (should be openmpiscript or mp2script). \
See examples folder that comes with htcondor.
filesystemdomain = sdfarm.kr
.type = str
.help = Domain of shared filesystem (see htcondor docs)
}
custom {
submit_command_template = None
.type = str
.help = Job submission command template. There should be one instance of the \
string <script> (including the <> brackets) in the command template. \
Fields <queue>, <nproc>, <memory>, <walltime>, <outfile>, <errfile>, \
<envscripts>, and <args> will similarly be replaced if present.
submit_script_template = None
.type = path
.help = Submission script template. The script will be copied to the trial \
directory and modified. There should be one instance of the string \
<command> (including the <> brackets) in the template script, which \
will be replaced with the processing command. <queue>, <nproc>, \
<memory>, <walltime>, <outfile>, <errfile>, <envscripts>, and <args> \
will similarly be replaced if present.
wall_time_string = None
.type = str
.help = Fully formatted wall time limit (e.g. 00:30:00). For custom computing \
environments, mp.wall_time is ignored because the format is unknown.
}
encapsulate_submit_script = True
.type = bool
.help = Encapsulate the submission command itself in another script containing \
the job submission command (e.g. qsub, bsub, condor_submit, sbatch \
etc.
}
'''
class get_submit_command(object):
def __init__(self, command, submit_path, stdoutdir, params,
log_name="log.out", err_name="log.err", job_name=None, root_dir=None):
""" Get a submit command for the various compute environments
@param command Any command line program and its arguments
@param submit_path Submit script will be written here
@param stdoutdir Log file will be created in this directory
@param params Multiprocessing phil params (see mp_phil_scope)
@param log_name Filename for stdout (optional).
@param err_name Filename for stderr (if None, combined with the stdout).
@param job_name For applicable queueing systems, identifier for the job (optional).
"""
self.shell_path = "/bin/sh"
self.source_env_scripts = []
self.options_inside_submit_script = []
self.submit_head = "qsub"
self.submit_path = submit_path
self.stdoutdir = stdoutdir
self.log_name = log_name
self.err_name = err_name
self.params = params
self.job_name = job_name
self.root_dir = root_dir
self.command = command
self.options = []
self.args = []
def customize_for_method(self):
pass
def eval_params(self):
pass
def substitute(self, template, marker, value):
if marker in template:
if value is None:
raise Sorry("No value found for %s" % marker)
return template.replace(marker, value)
else:
return template
def delete(self, template, marker):
template_lines = template.split('\n')
return '\n'.join([l for l in template_lines if marker not in l])
def make_executable(self, file):
import stat
st = os.stat(file)
os.chmod(file, st.st_mode | stat.S_IXUSR)
def write_script(self):
command_str = " ".join([self.command] + self.args)
with open(self.submit_path, 'w') as f:
f.write("#! %s\n" % self.shell_path)
for line in self.options_inside_submit_script:
f.write("%s\n" % line)
for line in self.source_env_scripts:
f.write("%s\n" % line)
f.write("\n")
f.write("%s\n" % command_str)
self.make_executable(self.submit_path)
def generate_submit_command(self):
return " ".join([self.submit_head] + self.options + [self.submit_path])
def encapsulate_submit(self):
path, ext = os.path.splitext(self.submit_path)
encapsulate_path = path + "_submit" + ext
with open(encapsulate_path, 'w') as f:
f.write("#! /bin/%s\n\n" % ext[1:])
f.write(self.generate_submit_command())
f.write("\n")
def __call__(self):
self.customize_for_method()
self.eval_params()
self.write_script()
if self.params.encapsulate_submit_script:
self.encapsulate_submit()
return self.generate_submit_command()
class get_local_submit_command(get_submit_command):
def customize_for_method(self):
if self.params.local.include_mp_in_command:
if self.params.use_mpi:
self.command = "mpirun -n %d %s mp.method=mpi" % (self.params.nproc, self.command)
elif self.params.nproc > 1:
self.command += " mp.nproc=%d" % self.params.nproc
def generate_submit_command(self):
return self.submit_path
class get_lsf_submit_command(get_submit_command):
def customize_for_method(self):
self.submit_head = "bsub"
if self.params.use_mpi:
self.command = "mpirun %s mp.method=mpi" % self.command
def eval_params(self):
# -n <nproc>
nproc_str = "-n %d" % self.params.nproc
self.options.append(nproc_str)
# -o <outfile>
out_str = "-o %s" % os.path.join(self.stdoutdir, self.log_name)
self.options.append(out_str)
# -e <errfile> (optional)
if self.err_name is not None:
err_str = "-e %s" % os.path.join(self.stdoutdir, self.err_name)
self.options.append(err_str)
# -q <queue> (optional on LSF)
if self.params.queue is not None:
queue_str = "-q %s" % self.params.queue
self.options.append(queue_str)
# -W <wall_time_limit> (optional)
if self.params.wall_time is not None:
hours = self.params.wall_time // 60
minutes = self.params.wall_time % 60
wt_str = "-W %2d:%02d" % (hours, minutes)
self.options.append(wt_str)
# -R "rusage[mem=<memory_requested>]" (optional)
if self.params.memory is not None:
memory_str = "-R \"rusage[mem=%d]\"" % self.params.memory
self.options.append(memory_str)
# <extra_options> (optional, preceding the command)
for cmd in self.params.extra_options:
self.options.append(cmd)
# source </path/to/env.sh> (optional)
for env in self.params.env_script:
env_str = "source %s\n" % env
self.source_env_scripts.append(env_str)
# <args> (optional, following the command)
for arg in self.params.extra_args:
self.args.append(arg)
class get_sge_submit_command(get_submit_command):
def customize_for_method(self):
self.shell_path += " -q"
self.options.append("-cwd")
# self.options.append("mp.method=sge")
if self.params.use_mpi:
self.command = "mpirun -n ${NSLOTS} %s mp.method=mpi"%(self.command) #This command currently (14/10/2020) has problems at Diamond as it will randomly use incorrect number of cores
else:
self.command = "%s mp.nproc=${NSLOTS}"%(self.command)
def eval_params(self):
# -t 1-<nproc>
if self.params.nproc > 1:
nproc_str = "-pe smp %d" % self.params.nproc #Change the submission command to smp, as the openmpi currently confilicts with mpi of Dials and cctbx.xfel.merge
self.options.append(nproc_str)
# -o <outfile>
out_str = "-o %s" % os.path.join(self.stdoutdir, self.log_name)
self.options.append(out_str)
# -j [y/n] -e <errfile> (optional)
if self.err_name is not None:
err_str = "-j n -e %s" % os.path.join(self.stdoutdir, self.err_name)
self.options.append(err_str)
else:
self.options.append("-j y")
# -q <queue>
if self.params.queue is None:
raise Sorry("Queue not specified.")
queue_str = "-q %s" % self.params.queue
self.options.append(queue_str)
# -l h_rt=<wall_time_limit> (optional)
if self.params.wall_time is not None:
hours = self.params.wall_time // 60
minutes = self.params.wall_time % 60
wt_str = "-l h_rt=%02d:%02d:00" % (hours, minutes)
self.options.append(wt_str)
# -l mem_free=<memory_requested> (optional)
if self.params.memory is not None:
memory_str = "-l mem_free=%dM" % self.params.memory
self.options.append(memory_str)
# -N <job_name>
if self.job_name is not None:
name_str = "-N %s" % self.job_name
self.options.append(name_str)
# <extra_options> (optional, preceding the command)
for cmd in self.params.extra_options:
self.options.append(cmd)
# source </path/to/env.sh> (optional)
for env in self.params.env_script:
env_str = "source %s\n" % env
self.source_env_scripts.append(env_str)
# <args> (optional, following the command)
for arg in self.params.extra_args:
self.args.append(arg)
class get_pbs_submit_command(get_submit_command):
def customize_for_method(self):
if (self.params.nnodes > 1) or (self.params.nproc_per_node > 1):
self.params.nproc = self.params.nnodes * self.params.nproc_per_node
if self.params.use_mpi:
self.command = "mpiexec --hostfile $PBS_NODEFILE %s mp.method=mpi" % (self.command)
def eval_params(self):
# # -t 1-<nproc> # deprecated
# if self.params.nproc > 1:
# nproc_str = "#PBS -l mppwidth=%d" % self.params.nproc
# self.options_inside_submit_script.append(nproc_str)
# -l nodes=<nnodes>:ppn=<procs_per_node>
if max(self.params.nproc, self.params.nproc_per_node, self.params.nnodes) > 1:
# If specified, nproc overrides procs_per_node and procs_per_node overrides
# nnodes. One process per node is requested if only nproc is specified.
if self.params.nproc > 1:
import math
if self.params.nproc <= self.params.nproc_per_node:
procs_per_node = self.params.nproc
nnodes = 1
elif self.params.nproc_per_node > 1:
procs_per_node = self.params.nproc_per_node
nnodes = int(math.ceil(self.params.nproc/procs_per_node))
elif self.params.nnodes > 1:
procs_per_node = int(math.ceil(self.params.nproc/self.params.nnodes))
nnodes = self.params.nnodes
else: # insufficient information; allocate 1 proc per node
procs_per_node = 1
nnodes = self.params.nproc
else:
procs_per_node = self.params.nproc_per_node
nnodes = self.params.nnodes
nproc_str = "#PBS -l nodes=%d:ppn=%d" % (nnodes, procs_per_node)
self.options_inside_submit_script.append(nproc_str)
# -o <outfile>
out_str = "#PBS -o %s" % os.path.join(self.stdoutdir, self.log_name)
self.options_inside_submit_script.append(out_str)
# [-j oe/-e <errfile>] (optional)
if self.err_name is not None:
err_str = "#PBS -e %s" % os.path.join(self.stdoutdir, self.err_name)
self.options_inside_submit_script.append(err_str)
else:
self.options_inside_submit_script.append("#PBS -j oe")
# -q <queue>
if self.params.queue is None:
raise Sorry("Queue not specified.")
queue_str = "#PBS -q %s" % self.params.queue
self.options_inside_submit_script.append(queue_str)
# -l walltime=<wall_time_limit> (optional)
if self.params.wall_time is not None:
hours = self.params.wall_time // 60
minutes = self.params.wall_time % 60
wt_str = "#PBS -l walltime=%2d:%02d:00" % (hours, minutes)
self.options_inside_submit_script.append(wt_str)
# -l mem_free=<memory_requested> (optional)
if self.params.memory is not None:
memory_str = "#PBS -l mem=%dmb" % self.params.memory
self.options_inside_submit_script.append(memory_str)
# -N <job_name>
if self.job_name is not None:
name_str = "#PBS -N %s" % self.job_name
self.options_inside_submit_script.append(name_str)
# <extra_options> (optional, preceding the command)
for cmd in self.params.extra_options:
cmd_str = "#PBS %s" % cmd
self.options_inside_submit_script.append(cmd_str)
if self.root_dir is not None:
cmd_str = "cd %s"%self.root_dir
self.options_inside_submit_script.append(cmd_str)
# source </path/to/env.sh> (optional)
for env in self.params.env_script:
env_str = "source %s\n" % env
self.source_env_scripts.append(env_str)
# <args> (optional, following the command)
for arg in self.params.extra_args:
self.args.append(arg)
class get_slurm_submit_command(get_submit_command):
def customize_for_method(self):
self.submit_head = "sbatch"
if self.params.use_mpi:
self.command = "mpirun %s mp.method=mpi" % (self.command)
def eval_params(self):
nproc = self.params.nnodes * self.params.nproc_per_node
nproc_str = "#SBATCH --nodes %d\n#SBATCH --ntasks-per-node=%d" % (
self.params.nnodes, self.params.nproc_per_node
)
self.options_inside_submit_script.append(nproc_str)
# -o <outfile>
out_str = "#SBATCH --output=%s" % os.path.join(self.stdoutdir, self.log_name)
self.options_inside_submit_script.append(out_str)
# [-j oe/-e <errfile>] (optional)
if self.err_name is not None:
err_str = "#SBATCH --error=%s" % os.path.join(self.stdoutdir, self.err_name)
self.options_inside_submit_script.append(err_str)
# -q <queue>
if self.params.queue is None:
raise Sorry("Queue not specified.")
queue_str = "#SBATCH --partition %s" % self.params.queue
self.options_inside_submit_script.append(queue_str)
# -l walltime=<wall_time_limit> (optional)
if self.params.wall_time is not None:
hours = self.params.wall_time // 60
minutes = self.params.wall_time % 60
wt_str = "#SBATCH --time=%2d:%02d:00" % (hours, minutes)
self.options_inside_submit_script.append(wt_str)
# -l mem_free=<memory_requested> (optional)
if self.params.memory is not None:
memory_str = "#SBATCH --mem=%dmb" % self.params.memory
self.options_inside_submit_script.append(memory_str)
# -N <job_name>
if self.job_name is not None:
name_str = "#SBATCH --job-name=%s" % self.job_name
self.options_inside_submit_script.append(name_str)
# <extra_options> (optional, preceding the command)
for cmd in self.params.extra_options:
cmd_str = "#SBATCH %s" % cmd
self.options_inside_submit_script.append(cmd_str)
# source </path/to/env.sh> (optional)
for env in self.params.env_script:
env_str = "source %s\n" % env
self.source_env_scripts.append(env_str)
# <args> (optional, following the command)
for arg in self.params.extra_args:
self.args.append(arg)
class get_shifter_submit_command(get_submit_command):
# No need for constructor -- the interited constructor is just fine for shifter
def customize_for_method(self):
# template for sbatch.sh
self.sbatch_template = self.params.shifter.sbatch_script_template
self.destination = os.path.dirname(self.submit_path)
if not self.sbatch_template:
from xfel.ui.db.cfgs import shifter_templates
self.sbatch_contents = shifter_templates.sbatch_template
else:
with open(self.sbatch_template, "r") as sb:
self.sbatch_contents = sb.read()
self.sbatch_path = os.path.join(self.destination, "sbatch.sh")
# template for srun.sh
self.srun_template = self.params.shifter.srun_script_template
if not self.srun_template:
from xfel.ui.db.cfgs import shifter_templates
self.srun_contents = shifter_templates.srun_template
else:
with open(self.srun_template, "r") as sr:
self.srun_contents = sr.read()
if self.params.use_mpi:
self.command = "%s mp.method=mpi" % (self.command)
self.srun_path = os.path.join(self.destination, "srun.sh")
def eval_params(self):
# --image <shifter_image>
if self.params.shifter.shifter_image:
self.sbatch_contents = self.substitute(
self.sbatch_contents,
"<shifter_image>",
self.params.shifter.shifter_image
)
else:
raise Sorry("Must supply a shifter image")
# -N <nnodes>
self.sbatch_contents = self.substitute(self.sbatch_contents, "<nnodes>",
str(self.params.nnodes))
# --tasks-per-node <nproc_per_node> (optional)
self.sbatch_contents = self.substitute(self.sbatch_contents, "<nproc_per_node>",
str(self.params.nproc_per_node))
# For now use nproc = nnodes*nproc_per_node
# TODO: find a way for the user to specify _either_ nproc_per_node, or nproc
nproc = self.params.nnodes * self.params.nproc_per_node
# -n <nproc> (optional)
self.sbatch_contents = self.substitute(self.sbatch_contents, "<nproc>",
str(nproc))
# -W <walltime> (optional)
if self.params.wall_time is not None:
hours = self.params.wall_time // 60
minutes = self.params.wall_time % 60
wt_str = "%02d:%02d:00" % (hours, minutes)
self.sbatch_contents = self.substitute(self.sbatch_contents, "<walltime>",
wt_str)
# --qos <queue>
self.sbatch_contents = self.substitute(self.sbatch_contents, "<queue>",
self.params.queue)
# --partition <partition>
self.sbatch_contents = self.substitute(self.sbatch_contents, "<partition>",
self.params.shifter.partition)
# --job-name
self.sbatch_contents = self.substitute(self.sbatch_contents, "<jobname>",
self.params.shifter.jobname)
# -A
self.sbatch_contents = self.substitute(self.sbatch_contents, "<project>",
self.params.shifter.project)
# --reservation
if self.params.shifter.reservation:
self.sbatch_contents = self.substitute(
self.sbatch_contents, "<reservation>", self.params.shifter.reservation
)
else:
self.sbatch_contents = self.delete(self.sbatch_contents, "<reservation>")
# --constraint
self.sbatch_contents = self.substitute(self.sbatch_contents, "<constraint>",
self.params.shifter.constraint)
self.sbatch_contents = self.substitute(self.sbatch_contents, "<out_log>",
os.path.join(self.destination , "out.log"))
self.sbatch_contents = self.substitute(self.sbatch_contents, "<err_log>",
os.path.join(self.destination , "err.log"))
self.sbatch_contents = self.substitute(self.sbatch_contents, "<output_dir>",
self.destination)
# Delete datawarp instructions if we're not staging logs
if self.params.shifter.staging != "DataWarp":
self.sbatch_contents = self.delete(self.sbatch_contents, "#DW")
# <srun_script>
self.sbatch_contents = self.substitute(self.sbatch_contents, "<srun_script>",
self.srun_path)
# <command> and any extra args
if len(self.params.extra_args) > 0:
self.srun_contents = self.substitute(self.srun_contents, "<command>",
"<command> %s" % " ".join(self.params.extra_args))
self.srun_contents = self.substitute(self.srun_contents, "<command>",
self.command)
def generate_submit_command(self):
return self.params.shifter.submit_command + self.sbatch_path
def encapsulate_submit(self):
pass
def generate_sbatch_script(self):
with open(self.sbatch_path, "w") as sb:
sb.write(self.sbatch_contents)
sb.write("\n")
self.make_executable(self.sbatch_path)
def generate_srun_script(self):
with open(self.srun_path, "w") as sr:
sr.write(self.srun_contents)
sr.write("\n")
self.make_executable(self.srun_path)
def write_script(self):
self.generate_sbatch_script()
self.generate_srun_script()
class get_htcondor_submit_command(get_submit_command):
def __init__(self, *args, **kwargs):
super(get_htcondor_submit_command, self).__init__(*args, **kwargs)
self.destination = os.path.dirname(self.submit_path)
self.basename = os.path.splitext(os.path.basename(self.submit_path))[0]
def customize_for_method(self):
self.submit_head = "condor_submit"
if self.params.use_mpi:
self.command = "%s mp.method=mpi" % (self.command)
def generate_submit_command(self):
return "condor_submit " + os.path.join(self.destination, self.basename + "_condorparams")
def eval_params(self):
if self.params.use_mpi:
from libtbx import easy_run
d = dict(executable_path = self.params.htcondor.executable_path,
arguments = self.submit_path,
nproc = self.params.nproc,
working_folder = self.destination,
log_path = os.path.join(self.stdoutdir, self.basename + '_condor.log'),
output_path = os.path.join(self.stdoutdir, self.log_name),
error_path = os.path.join(self.stdoutdir, self.err_name),
requirements = 'target.filesystemdomain == "%s"'% self.params.htcondor.filesystemdomain)
# Find if there is a continguous set of slots available on one node
r = easy_run.fully_buffered('condor_status | grep Unclaimed | grep %s'%self.params.htcondor.filesystemdomain)
machines = {}
for line in r.stdout_lines:
try:
machine = line.split()[0].split('@')[1]
except IndexError: continue
if machine not in machines:
machines[machine] = 0
machines[machine] += 1
for machine in machines:
if machines[machine] >= self.params.nproc:
d['requirements'] += ' && machine == "%s"'%machine
break
condor_params = """
universe = parallel
executable = {executable_path}
arguments = {arguments}
machine_count = {nproc}
initialdir = {working_folder}
when_to_transfer_output = on_exit
log = {log_path}
output = {output_path}
error = {error_path}
requirements = {requirements}
+ParallelShutdownPolicy = "WAIT_FOR_ALL"
RunAsOwner = True
queue
"""
else:
assert self.params.htcondor.executable_path is None
d = dict(executable_path = self.submit_path,
working_folder = self.destination,
log_path = os.path.join(self.stdoutdir, self.basename + '_condor.log'),
output_path = os.path.join(self.stdoutdir, self.log_name),
error_path = os.path.join(self.stdoutdir, self.err_name),
filesystemdomain= self.params.htcondor.filesystemdomain)
condor_params = """
universe = vanilla
executable = {executable_path}
initialdir = {working_folder}
when_to_transfer_output = on_exit
log = {log_path}
output = {output_path}
error = {error_path}
requirements = target.filesystemdomain == "{filesystemdomain}"
RunAsOwner = True
queue
"""
with open(os.path.join(self.destination, self.basename + "_condorparams"), 'w') as f:
f.write(condor_params.format(**d))
# source </path/to/env.sh> (optional)
for env in self.params.env_script:
env_str = "source %s\n" % env
self.source_env_scripts.append(env_str)
class get_custom_submit_command(get_submit_command):
def customize_for_method(self):
# template for the script to be submitted, beginning with #!
self.script_template = self.params.custom.submit_script_template
if not os.path.exists(self.template):
raise Sorry("Custom submission template file not found: %s" % self.template)
# template for the submission command itself, e.g. qsub -n <nproc> -q <queue> script.sh
self.command_template = self.params.custom.submit_command_template
if self.command_template is None:
raise Sorry("Custom submit command must be specified for custom environments.")
def eval_params(self):
# any changes to the script to be submitted
with open(self.script_template, "r") as script:
self.script_contents = script.read()
# <command> and any <args>
if len(self.params.extra_args) > 0:
self.script_contents = self.script_contents.replace("<command>",
"<command> %s" % " ".join(self.params.extra_args))
self.script_contents = self.script_contents.replace("<command>", self.command)
# other changes to the contents of the script
for marker, value in [
("<queue>", self.params.queue),
("<nproc>", self.params.nproc),
("<memory>", self.params.memory),
("<walltime>", self.params.custom.wall_time_string),
("<outfile>", os.path.join(self.stdoutdir, self.log_name)),
("<errfile>", os.path.join(self.stdoutdir, self.err_name)),
("<envscripts>", self.params.env_script)]:
self.script_contents = self.substitute(self.script_contents, marker, value)
# any changes to the submission command
# <script> and any extra <options>
if len(self.params.extra_options) > 0:
self.submit_command_contents = self.params.custom.submit_command_template.replace("<script>",
"%s <script>" % " ".join(self.params.extra_options))
self.submit_command_contents = self.submit_command_contents.replace("<script>",
self.submit_path)
# other changes to the submission command
for marker, value in [
("<queue>", self.params.queue),
("<nproc>", self.params.nproc),
("<memory>", self.params.memory),
("<walltime>", self.params.custom.wall_time_string),
("<outfile>", os.path.join(self.stdoutdir, self.log_name)),
("<errfile>", os.path.join(self.stdoutdir, self.err_name))]:
self.submit_command_contents = self.substitute(self.submit_command_contents, marker, value)
def write_script(self):
with open(self.submit_path, "w") as f:
f.write(self.script_contents)
f.write("\n")
def generate_submit_command(self):
return self.submit_command_contents
def get_submit_command_chooser(command, submit_path, stdoutdir, params,
log_name="log.out", err_name="log.err", job_name=None,
root_dir=None):
if params.method == "local":
choice = get_local_submit_command
elif params.method == "lsf":
choice = get_lsf_submit_command
elif params.method == "sge":
choice = get_sge_submit_command
elif params.method == "pbs":
choice = get_pbs_submit_command
elif params.method == "slurm":
choice = get_slurm_submit_command
elif params.method == "shifter":
choice = get_shifter_submit_command
elif params.method == "htcondor":
choice = get_htcondor_submit_command
elif params.method == "custom":
choice = get_custom_submit_command
else:
raise Sorry("Multiprocessing method %s not recognized" % params.method)
command_generator = choice(command, submit_path, stdoutdir, params,
log_name=log_name, err_name=err_name, job_name=job_name, root_dir=root_dir)
return command_generator()
|
py | b40299a3333a659fdc49e911d586fe15e1bf334f | ##########################################################################
#
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from .SceneViewTest import SceneViewTest
from .ShaderAssignmentUITest import ShaderAssignmentUITest
from .StandardGraphLayoutTest import StandardGraphLayoutTest
from .SceneGadgetTest import SceneGadgetTest
from .SceneInspectorTest import SceneInspectorTest
from .HierarchyViewTest import HierarchyViewTest
from .DocumentationTest import DocumentationTest
from .ShaderViewTest import ShaderViewTest
from .ShaderUITest import ShaderUITest
from .TranslateToolTest import TranslateToolTest
from .ScaleToolTest import ScaleToolTest
from .RotateToolTest import RotateToolTest
from .ContextAlgoTest import ContextAlgoTest
from .CameraToolTest import CameraToolTest
from .VisualiserTest import VisualiserTest
from .TransformToolTest import TransformToolTest
from .CropWindowToolTest import CropWindowToolTest
from .NodeUITest import NodeUITest
if __name__ == "__main__":
unittest.main()
|
py | b4029abae46fa5161e0ba2a43c97da724ec0f086 | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
client_id = '1d048bcd4ddf473b84994ed5e08cb1e1'
client_secret = '2fd5aac456084f0ea4b33e9adf5c0210'
|
py | b4029af9884d0a819ac720fbb7220ca228fdc1b0 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Description
- 将知乎用户的个人信息json存储到csv文件中。
- 实现了一些必要的功能:
- 从已有的csv文件中提取出所有用户,用于程序中断后重启时加载已爬取用户列表。
- 从已有的csv文件中提取指定数目的未爬取用户,用于程序中断后重启时生成任务队列。
- 类DataFile为单例模式,在程序中只有一个实例。
- 线程安全。
Info
- author: "zyk"
- github: "[email protected]"
- date: "2017.11.17"
"""
import threading
import csv
import sys
import os.path
import json
__author__ = "zyk"
# 操作文件时使用的可重入互斥锁,用于保证线程安全
FILELOCK = threading.Lock()
class Singleton(object):
"""
实现单例模式,DataFile在程序中只有一个实例
Attributes:
_instance: 唯一实例的引用。
"""
_instance = None
def __new__(cls, *args, **kw):
if not cls._instance:
cls._instance = super(Singleton, cls).__new__(cls, *args, **kw)
return cls._instance
class DataFile(Singleton):
"""
操作csv文件,保存用户数据。
Attributes:
FILEPATH: 存储数据文件(csv文件)的文件夹绝对路径
PREFIX: 每个csv文件的文件名前缀,包含绝对路径。每个文件名由 “前缀” + 编号 + “后缀” 组成。
SUFFIX: 每个csv文件的文件名后缀,即格式 '.csv'
MAXSIZE: 每个csv文件的最大尺寸,单位Byte
TABLEHEADER: 每个csv文件的表头,也就是第一行内容,方便使用csv库中的DictWriter/DictReader按dict方式存取
__currentfile: 当前操作文件的绝对路径文件名,由于数据较大,分多个文件保存,所以需要变量来指向当前操作的文件
"""
def __init__(self):
self.FILEPATH = os.path.join(os.path.dirname(sys.path[0]), 'datafile') # 此脚本文件路径的上一级路径
self.PREFIX = os.path.join(self.FILEPATH, 'data')
self.SUFFIX = '.csv'
self.MAXSIZE = 100 * 1024 * 1024
self.TABLEHEADER = ['user_url_token', 'user_data_json', 'user_following_list']
self.__currentfile = ''
self.__updatecurrentfile()
pass
def loadusercrawled(self):
"""加载已爬取用户列表。
从已有的csv文件加载已经爬取用户的url token,即每个csv文件的第一列,得到一个列表。
此函数用于爬虫程序中断后重启时的状态恢复。
Args:
None.
Returns:
list: 一个包含已经爬取用户的url token的list。
Raises:
None.
"""
# 数据文件夹不存在,就返回一个空列表
if not os.path.exists(self.FILEPATH):
return list()
FILELOCK.acquire()
# 从存储数据文件的文件夹中找出所有csv文件,得到一个包含所有csv绝对路径文件名的list。
csvfilelist = list()
for filename in os.listdir(self.FILEPATH):
filename = os.path.join(self.FILEPATH, filename)
if os.path.splitext(filename)[1] == self.SUFFIX:
with open(filename, 'r', encoding='utf-8') as csvfile:
reader = csv.DictReader(csvfile)
if reader.fieldnames == self.TABLEHEADER:
csvfilelist.append(os.path.join(self.FILEPATH, filename))
# 从上面的列表中,依次遍历每个文件,得到一个包含已经爬取用户的url token的list。
usercrawled = list()
for filename in csvfilelist:
with open(filename, 'r', encoding='utf-8') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
usercrawled.append(row[self.TABLEHEADER[0]])
FILELOCK.release()
return usercrawled
def loaduseruncrawled(self, usercrawled_set, user_count=100000):
"""加载未爬取用户列表。
从已有的csv文件加载已经爬取用户的关注列表(csv文件的第三列),
并用已爬取用户列表去重,得到一个未爬取用户的列表。
默认加载100000个未爬取用户。
此函数用于爬虫程序中断后重启时的状态恢复。
Args:
None.
Returns:
list: 一个包含未爬取用户的url token的list。
Raises:
None.
"""
if not os.path.exists(self.FILEPATH):
useruncrawled = ()
useruncrawled.append('excited-vczh')
return useruncrawled
FILELOCK.acquire()
# 从存储数据文件的文件夹中找出所有csv文件,得到一个包含所有csv绝对路径文件名的list。
csvfilelist = list()
for filename in os.listdir(self.FILEPATH):
filename = os.path.join(self.FILEPATH, filename)
if os.path.splitext(filename)[1] == self.SUFFIX:
with open(filename, 'r', encoding='utf-8') as csvfile:
reader = csv.DictReader(csvfile)
if reader.fieldnames == self.TABLEHEADER:
csvfilelist.append(os.path.join(self.FILEPATH, filename))
csvfilelist.sort()
# 从上面的列表中,依次遍历每个文件,得到一个不超过100000个未爬取用户的列表。
useruncrawled = list()
for filename in csvfilelist[::-1]:
if len(useruncrawled) >= user_count:
break
with open(filename, 'r', encoding='utf-8') as csvfile:
reader = csv.DictReader(csvfile)
user_following_list = list()
for row in reader:
tempjson = json.loads(row[self.TABLEHEADER[2]])
user_following_list += tempjson['ids']
for user in user_following_list[::-1]:
if len(useruncrawled) >= 100000:
break
if user not in usercrawled_set:
useruncrawled.append(user)
FILELOCK.release()
if len(useruncrawled) == 0:
useruncrawled.append('excited-vczh')
return useruncrawled
def __updatecurrentfile(self):
"""更新当前操作文件。
由于数据较大,分多个文件保存,每个文件不超过100MB,所以需要不断检查已有文件
的大小,当大小达到限制,就创建一个新文件,并更新__currentfile变量的文件名。
Args:
None.
Returns:
None.
Raises:
None.
"""
# 数据文件夹不存在,创建一个数据文件夹
if not os.path.exists(self.FILEPATH):
os.mkdir(self.FILEPATH)
FILELOCK.acquire()
# 从'data0001.csv'开始依次按序号生成文件名,判断目录下是否已存在该文件;
# 若存在该文件:
# 文件大小不到设置的MAXSIZE,就将该文件作为当前操作文件,并退出函数;
# 文件大小已经达到设置的MAXSIZE,就继续生成下一个文件名,重复以上操作;
# 若不存在该文件:
# 用这个文件名创建一个新csv文件,做为当前操作文件,并退出函数。
i = 0
while True:
i += 1
# generate a filename.
filename = self.PREFIX + ("%04d" % i) + self.SUFFIX
if os.path.exists(filename):
if os.path.getsize(filename) < self.MAXSIZE:
# if the file exists and the file is unfilled, set the file to currentfile.
self.__currentfile = filename
break
else:
continue
else:
# if the file doesn't exists, Create a new csv file, and write table header in.
with open(filename, 'w', newline='', encoding='utf-8') as csvfile:
# Create table header.
headerrow = dict()
for x in self.TABLEHEADER:
headerrow[x] = x
# Write in.
writer = csv.DictWriter(csvfile, self.TABLEHEADER)
writer.writerow(headerrow)
self.__currentfile = filename
break
FILELOCK.release()
return None
def __getcurrentfile(self):
"""获取当前操作文件。
由于文件实时更新,所以在每次存取文件前,需要确认__currentfile指向的文件没有过期。
若__currentfile指向的文件存在且文件大小未达到MAXSIZE,则直接返回__currentfile;
若__currentfile指向的文件不存在或者文件大小达到MAXSIZE,则更新__currentfile;
Args:
None.
Returns:
str: 返回指向当前操作文件的文件名(包含绝对路径)。
Raises:
None.
"""
if os.path.exists(self.__currentfile) and os.path.getsize(self.__currentfile) < self.MAXSIZE:
return self.__currentfile
else:
self.__updatecurrentfile()
return self.__currentfile
def saveinfo(self, userinfo):
"""存入用户信息。
传入一个包含用户信息的dict,并写入当前操作文件。
其中dict的key与TABLEHEADER中的每个item一一对应。
Args:
userinfo: 一个包含用户信息的dict, 其中TABLEHEADER中的每个item作为这个dict中的一个key,
value则是每个key对应的用户信息
Returns:
bool: 用户信息已经写入文件.
Raises:
None.
"""
result = True
filename = self.__getcurrentfile()
FILELOCK.acquire()
# filename = self.PREFIX + '0002' + self.SUFFIX
try:
with open(filename, 'a', newline='', encoding='utf-8') as csvfile:
writer = csv.DictWriter(csvfile, self.TABLEHEADER)
writer.writerow(userinfo)
except:
result = False
FILELOCK.release()
return result
def saveinfobatch(self, userinfolist):
"""批量存入用户信息。
传入一个包含多个用户信息的list,每个item与均为dict,表示一个用户,其他同saveinfo函数。
本函数用于提升写入效率,降低操作文件的次数
Args:
userinfolist: 一个包含多个用户信息的list, 每个item与均为dict,表示一个用户,其他同saveinfo函数。
Returns:
bool: 用户信息已经写入文件.
Raises:
None.
"""
result = True
filename = self.__getcurrentfile()
FILELOCK.acquire()
try:
with open(filename, 'a', newline='', encoding='utf-8') as csvfile:
writer = csv.DictWriter(csvfile, self.TABLEHEADER)
for userinfo in userinfolist:
writer.writerow(userinfo)
except:
result = False
FILELOCK.release()
return result
if __name__ == '__main__':
pass
|
py | b4029b1bffa2bf2b330be0de9ace1787555e8593 | __author__ = 'fujun'
path = '/Users/fujun/Downloads/output/system/'
import os
filenames = os.listdir(path)
for name in filenames:
print(name)
prefix = name[0: name.rindex('.')]
#print(prefix)
new_name = 'task'+prefix+'_englishSyssum'+prefix+'.txt'
os.rename(path+name, path+new_name) |
py | b4029bed90ec4cef998b6674f016a3d11be7525d | from flask import render_template
from app import app
from flask_login import login_required, current_user
from .forms import UpdateProfile, GeneralForm, GeneralReviewForm, SaleForm, SaleReviewForm, SeductionForm, SeductionReviewForm, MusicForm, MusicReviewForm, ProjectForm, ProjectReviewForm, InterviewForm, InterviewReviewForm, AdvertisementForm, AdvertisementReviewForm
from ..models import User, Interview, Advertisement, Project, Music, Sale, Seduction, General, ReviewAdvertisement, ReviewGeneral, ReviewInterview, ReviewMusic, ReviewProject, ReviewSale, ReviewSeduction, Upvote, Downvote
# Views
@app.route('/')
def index():
'''
View root page function that returns the index page and its data
'''
title = 'Home - Welcome to the Pitches website'
return render_template('index.html',title = title)
@main.route('/user/category/interviews')
@login_required
def interviews():
title = 'Interview'
posts = Interview.query.all()
return render_template("inter.html", posts=posts, title=title)
@main.route('/user/interview/<int:id>', methods=['GET', 'POST'])
@login_required
def displayinterview(id):
interview = Interview.query.get(id)
form = InterviewReviewForm()
if form.validate_on_submit():
review = form.review.data
new_interviewreview = ReviewInterview(
review=review, interview_id=id, user=current_user)
new_interviewreview.save_reviewinterview()
review = ReviewInterview.query.filter_by(interview_id=id).all()
return render_template('interviewpitch.html', interview=interview, review_form=form, review=review) |
py | b4029c4455aa354b76d1f29f8a956c65aa276929 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from typing import Any, Dict, List, Optional, Tuple
import numpy as np
import plotly.graph_objs as go
from ax.core.batch_trial import BatchTrial
from ax.core.data import Data
from ax.core.experiment import Experiment
from ax.core.multi_type_experiment import MultiTypeExperiment
from ax.core.observation import Observation
from ax.modelbridge.cross_validation import CVResult
from ax.modelbridge.transforms.convert_metric_names import convert_mt_observations
from ax.plot.base import (
AxPlotConfig,
AxPlotTypes,
PlotData,
PlotInSampleArm,
PlotMetric,
Z,
)
from ax.plot.scatter import _error_scatter_data, _error_scatter_trace
from ax.utils.common.typeutils import not_none
from plotly import tools
# type alias
FloatList = List[float]
# Helper functions for plotting model fits
def _get_min_max_with_errors(
x: FloatList, y: FloatList, sd_x: FloatList, sd_y: FloatList
) -> Tuple[float, float]:
"""Get min and max of a bivariate dataset (across variables).
Args:
x: point estimate of x variable.
y: point estimate of y variable.
sd_x: standard deviation of x variable.
sd_y: standard deviation of y variable.
Returns:
min_: minimum of points, including uncertainty.
max_: maximum of points, including uncertainty.
"""
min_ = min(
min(np.array(x) - np.multiply(sd_x, Z)), min(np.array(y) - np.multiply(sd_y, Z))
)
max_ = max(
max(np.array(x) + np.multiply(sd_x, Z)), max(np.array(y) + np.multiply(sd_y, Z))
)
return min_, max_
def _diagonal_trace(min_: float, max_: float, visible: bool = True) -> Dict[str, Any]:
"""Diagonal line trace from (min_, min_) to (max_, max_).
Args:
min_: minimum to be used for starting point of line.
max_: maximum to be used for ending point of line.
visible: if True, trace is set to visible.
"""
return go.Scatter(
x=[min_, max_],
y=[min_, max_],
line=dict(color="black", width=2, dash="dot"), # noqa: C408
mode="lines",
hoverinfo="none",
visible=visible,
showlegend=False,
)
def _obs_vs_pred_dropdown_plot(
data: PlotData,
rel: bool,
show_context: bool = False,
xlabel: str = "Actual Outcome",
ylabel: str = "Predicted Outcome",
) -> Dict[str, Any]:
"""Plot a dropdown plot of observed vs. predicted values from a model.
Args:
data: a name tuple storing observed and predicted data
from a model.
rel: if True, plot metrics relative to the status quo.
show_context: Show context on hover.
xlabel: Label for x-axis.
ylabel: Label for y-axis.
"""
traces = []
metric_dropdown = []
if rel and data.status_quo_name is not None:
if show_context:
raise ValueError(
"This plot does not support both context and relativization at "
"the same time."
)
# pyre-fixme[6]: Expected `str` for 1st param but got `Optional[str]`.
status_quo_arm = data.in_sample[data.status_quo_name]
else:
status_quo_arm = None
for i, metric in enumerate(data.metrics):
y_raw, se_raw, y_hat, se_hat = _error_scatter_data(
# Expected `List[typing.Union[PlotInSampleArm,
# ax.plot.base.PlotOutOfSampleArm]]` for 1st anonymous
# parameter to call `ax.plot.scatter._error_scatter_data` but got
# `List[PlotInSampleArm]`.
# pyre-fixme[6]:
list(data.in_sample.values()),
y_axis_var=PlotMetric(metric, True),
x_axis_var=PlotMetric(metric, False),
rel=rel,
status_quo_arm=status_quo_arm,
)
min_, max_ = _get_min_max_with_errors(y_raw, y_hat, se_raw or [], se_hat)
traces.append(_diagonal_trace(min_, max_, visible=(i == 0)))
traces.append(
_error_scatter_trace(
# Expected `List[typing.Union[PlotInSampleArm,
# ax.plot.base.PlotOutOfSampleArm]]` for 1st parameter
# `arms` to call `ax.plot.scatter._error_scatter_trace`
# but got `List[PlotInSampleArm]`.
# pyre-fixme[6]:
arms=list(data.in_sample.values()),
hoverinfo="text",
rel=rel,
show_arm_details_on_hover=True,
show_CI=True,
show_context=show_context,
status_quo_arm=status_quo_arm,
visible=(i == 0),
x_axis_label=xlabel,
x_axis_var=PlotMetric(metric, False),
y_axis_label=ylabel,
y_axis_var=PlotMetric(metric, True),
)
)
# only the first two traces are visible (corresponding to first outcome
# in dropdown)
is_visible = [False] * (len(data.metrics) * 2)
is_visible[2 * i] = True
is_visible[2 * i + 1] = True
# on dropdown change, restyle
metric_dropdown.append(
{"args": ["visible", is_visible], "label": metric, "method": "restyle"}
)
updatemenus = [
{
"x": 0,
"y": 1.125,
"yanchor": "top",
"xanchor": "left",
"buttons": metric_dropdown,
},
{
"buttons": [
{
"args": [
{
"error_x.width": 4,
"error_x.thickness": 2,
"error_y.width": 4,
"error_y.thickness": 2,
}
],
"label": "Yes",
"method": "restyle",
},
{
"args": [
{
"error_x.width": 0,
"error_x.thickness": 0,
"error_y.width": 0,
"error_y.thickness": 0,
}
],
"label": "No",
"method": "restyle",
},
],
"x": 1.125,
"xanchor": "left",
"y": 0.8,
"yanchor": "middle",
},
]
layout = go.Layout(
annotations=[
{
"showarrow": False,
"text": "Show CI",
"x": 1.125,
"xanchor": "left",
"xref": "paper",
"y": 0.9,
"yanchor": "middle",
"yref": "paper",
}
],
xaxis={
"title": xlabel,
"zeroline": False,
"mirror": True,
"linecolor": "black",
"linewidth": 0.5,
},
yaxis={
"title": ylabel,
"zeroline": False,
"mirror": True,
"linecolor": "black",
"linewidth": 0.5,
},
showlegend=False,
hovermode="closest",
updatemenus=updatemenus,
width=530,
height=500,
)
return go.Figure(data=traces, layout=layout)
def _get_batch_comparison_plot_data(
observations: List[Observation],
batch_x: int,
batch_y: int,
rel: bool = False,
status_quo_name: Optional[str] = None,
) -> PlotData:
"""Compute PlotData for comparing repeated arms across trials.
Args:
observations: List of observations.
batch_x: Batch for x-axis.
batch_y: Batch for y-axis.
rel: Whether to relativize data against status_quo arm.
status_quo_name: Name of the status_quo arm.
Returns:
PlotData: a plot data object.
"""
if rel and status_quo_name is None:
raise ValueError("Experiment status quo must be set for rel=True")
x_observations = {
observation.arm_name: observation
for observation in observations
if observation.features.trial_index == batch_x
}
y_observations = {
observation.arm_name: observation
for observation in observations
if observation.features.trial_index == batch_y
}
# Assume input is well formed and metric_names are consistent across observations
metric_names = observations[0].data.metric_names
insample_data: Dict[str, PlotInSampleArm] = {}
for arm_name, x_observation in x_observations.items():
# Restrict to arms present in both trials
if arm_name not in y_observations:
continue
y_observation = y_observations[arm_name]
arm_data = {
"name": arm_name,
"y": {},
"se": {},
"parameters": x_observation.features.parameters,
"y_hat": {},
"se_hat": {},
"context_stratum": None,
}
for i, mname in enumerate(x_observation.data.metric_names):
# pyre-fixme[16]: Optional type has no attribute `__setitem__`.
arm_data["y"][mname] = x_observation.data.means[i]
arm_data["se"][mname] = np.sqrt(x_observation.data.covariance[i][i])
for i, mname in enumerate(y_observation.data.metric_names):
arm_data["y_hat"][mname] = y_observation.data.means[i]
arm_data["se_hat"][mname] = np.sqrt(y_observation.data.covariance[i][i])
# Expected `str` for 2nd anonymous parameter to call `dict.__setitem__` but got
# `Optional[str]`.
# pyre-fixme[6]:
insample_data[arm_name] = PlotInSampleArm(**arm_data)
return PlotData(
metrics=metric_names,
in_sample=insample_data,
out_of_sample=None,
status_quo_name=status_quo_name,
)
def _get_cv_plot_data(cv_results: List[CVResult]) -> PlotData:
if len(cv_results) == 0:
return PlotData(
metrics=[], in_sample={}, out_of_sample=None, status_quo_name=None
)
# arm_name -> Arm data
insample_data: Dict[str, PlotInSampleArm] = {}
# Assume input is well formed and this is consistent
metric_names = cv_results[0].observed.data.metric_names
for cv_result in cv_results:
arm_name = cv_result.observed.arm_name
arm_data = {
"name": cv_result.observed.arm_name,
"y": {},
"se": {},
"parameters": cv_result.observed.features.parameters,
"y_hat": {},
"se_hat": {},
"context_stratum": None,
}
for i, mname in enumerate(cv_result.observed.data.metric_names):
# pyre-fixme[16]: Optional type has no attribute `__setitem__`.
arm_data["y"][mname] = cv_result.observed.data.means[i]
arm_data["se"][mname] = np.sqrt(cv_result.observed.data.covariance[i][i])
for i, mname in enumerate(cv_result.predicted.metric_names):
arm_data["y_hat"][mname] = cv_result.predicted.means[i]
arm_data["se_hat"][mname] = np.sqrt(cv_result.predicted.covariance[i][i])
# Expected `str` for 2nd anonymous parameter to call `dict.__setitem__` but got
# `Optional[str]`.
# pyre-fixme[6]:
insample_data[arm_name] = PlotInSampleArm(**arm_data)
return PlotData(
metrics=metric_names,
in_sample=insample_data,
out_of_sample=None,
status_quo_name=None,
)
def interact_empirical_model_validation(batch: BatchTrial, data: Data) -> AxPlotConfig:
"""Compare the model predictions for the batch arms against observed data.
Relies on the model predictions stored on the generator_runs of batch.
Args:
batch: Batch on which to perform analysis.
data: Observed data for the batch.
Returns:
AxPlotConfig for the plot.
"""
insample_data: Dict[str, PlotInSampleArm] = {}
metric_names = list(data.df["metric_name"].unique())
for struct in batch.generator_run_structs:
generator_run = struct.generator_run
if generator_run.model_predictions is None:
continue
for i, arm in enumerate(generator_run.arms):
arm_data = {
"name": arm.name_or_short_signature,
"y": {},
"se": {},
"parameters": arm.parameters,
"y_hat": {},
"se_hat": {},
"context_stratum": None,
}
predictions = generator_run.model_predictions
for _, row in data.df[
data.df["arm_name"] == arm.name_or_short_signature
].iterrows():
metric_name = row["metric_name"]
# pyre-fixme[16]: Optional type has no attribute `__setitem__`.
arm_data["y"][metric_name] = row["mean"]
arm_data["se"][metric_name] = row["sem"]
arm_data["y_hat"][metric_name] = predictions[0][metric_name][i]
arm_data["se_hat"][metric_name] = predictions[1][metric_name][
metric_name
][i]
# pyre-fixme[6]: Expected `Optional[Dict[str, Union[float, str]]]` for 1s...
insample_data[arm.name_or_short_signature] = PlotInSampleArm(**arm_data)
if not insample_data:
raise ValueError("No model predictions present on the batch.")
plot_data = PlotData(
metrics=metric_names,
in_sample=insample_data,
out_of_sample=None,
status_quo_name=None,
)
fig = _obs_vs_pred_dropdown_plot(data=plot_data, rel=False)
fig["layout"]["title"] = "Cross-validation"
return AxPlotConfig(data=fig, plot_type=AxPlotTypes.GENERIC)
def interact_cross_validation(
cv_results: List[CVResult], show_context: bool = True
) -> AxPlotConfig:
"""Interactive cross-validation (CV) plotting; select metric via dropdown.
Note: uses the Plotly version of dropdown (which means that all data is
stored within the notebook).
Args:
cv_results: cross-validation results.
show_context: if True, show context on hover.
"""
data = _get_cv_plot_data(cv_results)
fig = _obs_vs_pred_dropdown_plot(data=data, rel=False, show_context=show_context)
fig["layout"]["title"] = "Cross-validation"
return AxPlotConfig(data=fig, plot_type=AxPlotTypes.GENERIC)
def tile_cross_validation(
cv_results: List[CVResult],
show_arm_details_on_hover: bool = True,
show_context: bool = True,
) -> AxPlotConfig:
"""Tile version of CV plots; sorted by 'best fitting' outcomes.
Plots are sorted in decreasing order using the p-value of a Fisher exact
test statistic.
Args:
cv_results: cross-validation results.
include_measurement_error: if True, include
measurement_error metrics in plot.
show_arm_details_on_hover: if True, display
parameterizations of arms on hover. Default is True.
show_context: if True (default), display context on
hover.
"""
data = _get_cv_plot_data(cv_results)
metrics = data.metrics
# make subplots (2 plots per row)
nrows = int(np.ceil(len(metrics) / 2))
ncols = min(len(metrics), 2)
fig = tools.make_subplots(
rows=nrows,
cols=ncols,
print_grid=False,
subplot_titles=tuple(metrics),
horizontal_spacing=0.15,
vertical_spacing=0.30 / nrows,
)
for i, metric in enumerate(metrics):
y_hat = []
se_hat = []
y_raw = []
se_raw = []
for arm in data.in_sample.values():
y_hat.append(arm.y_hat[metric])
se_hat.append(arm.se_hat[metric])
y_raw.append(arm.y[metric])
se_raw.append(arm.se[metric])
min_, max_ = _get_min_max_with_errors(y_raw, y_hat, se_raw, se_hat)
fig.append_trace(
_diagonal_trace(min_, max_), int(np.floor(i / 2)) + 1, i % 2 + 1
)
fig.append_trace(
_error_scatter_trace(
# Expected `List[typing.Union[PlotInSampleArm,
# ax.plot.base.PlotOutOfSampleArm]]` for 1st anonymous
# parameter to call `ax.plot.scatter._error_scatter_trace` but
# got `List[PlotInSampleArm]`.
# pyre-fixme[6]:
list(data.in_sample.values()),
y_axis_var=PlotMetric(metric, True),
x_axis_var=PlotMetric(metric, False),
y_axis_label="Predicted",
x_axis_label="Actual",
hoverinfo="text",
show_arm_details_on_hover=show_arm_details_on_hover,
show_context=show_context,
),
int(np.floor(i / 2)) + 1,
i % 2 + 1,
)
# if odd number of plots, need to manually remove the last blank subplot
# generated by `tools.make_subplots`
if len(metrics) % 2 == 1:
del fig["layout"]["xaxis{}".format(nrows * ncols)]
del fig["layout"]["yaxis{}".format(nrows * ncols)]
# allocate 400 px per plot (equal aspect ratio)
fig["layout"].update(
title="Cross-Validation", # What should I replace this with?
hovermode="closest",
width=800,
height=400 * nrows,
font={"size": 10},
showlegend=False,
)
# update subplot title size and the axis labels
for i, ant in enumerate(fig["layout"]["annotations"]):
ant["font"].update(size=12)
fig["layout"]["xaxis{}".format(i + 1)].update(
title="Actual Outcome", mirror=True, linecolor="black", linewidth=0.5
)
fig["layout"]["yaxis{}".format(i + 1)].update(
title="Predicted Outcome", mirror=True, linecolor="black", linewidth=0.5
)
return AxPlotConfig(data=fig, plot_type=AxPlotTypes.GENERIC)
def interact_batch_comparison(
observations: List[Observation],
experiment: Experiment,
batch_x: int,
batch_y: int,
rel: bool = False,
status_quo_name: Optional[str] = None,
) -> AxPlotConfig:
"""Compare repeated arms from two trials; select metric via dropdown.
Args:
observations: List of observations to compute comparison.
batch_x: Index of batch for x-axis.
batch_y: Index of bach for y-axis.
rel: Whether to relativize data against status_quo arm.
status_quo_name: Name of the status_quo arm.
"""
if isinstance(experiment, MultiTypeExperiment):
observations = convert_mt_observations(observations, experiment)
if not status_quo_name and experiment.status_quo:
status_quo_name = not_none(experiment.status_quo).name
plot_data = _get_batch_comparison_plot_data(
observations, batch_x, batch_y, rel=rel, status_quo_name=status_quo_name
)
fig = _obs_vs_pred_dropdown_plot(
data=plot_data,
rel=rel,
xlabel="Batch {}".format(batch_x),
ylabel="Batch {}".format(batch_y),
)
fig["layout"]["title"] = "Repeated arms across trials"
return AxPlotConfig(data=fig, plot_type=AxPlotTypes.GENERIC)
|
py | b4029c8e21cc98cd78dfd2d278ef01d443a9bf9f | from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from app import app
from models import db
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
|
py | b4029e10f479ea8a1140bf9e8eca5dda6c6347d2 | import tensorflow as tf
import os
import re
import sys
import time
import numpy as np
from model.common import l2_scaling
#from model.svd_tdnn import tdnn_svd6
from model.tdnn import tdnn
#from model.svd_tdnn import tdnn_svd6
#from model.dynamic_tdnn import tdnn_svd
from model.loss import softmax
from model.loss import asoftmax, additive_margin_softmax, additive_angular_margin_softmax
from model.loss import semihard_triplet_loss, angular_triplet_loss, e2e_valid_loss, generalized_angular_triplet_loss
from dataset.data_loader import KaldiDataRandomQueue, KaldiDataSeqQueue, DataOutOfRange
from misc.utils import substring_in_list, activation_summaries
from six.moves import range
class Trainer(object):
"""Handle the training, validation and prediction
Trainer is a simple class that deals with examples having feature-label structure.
TODO: Add different Trainers to deal with feature+aux_feature - label+aux_label structure.
"""
def __init__(self, params, model_dir, single_cpu=False):
"""
Args:
params: Parameters loaded from JSON.
model_dir: The model directory.
single_cpu: Run Tensorflow on one cpu. (default = False)
"""
# The network configuration is set while the loss is left to the build function.
# I think we can switch different loss functions during training epochs.
# Then simple re-build the network can give us a different loss. The main network won't change at that case.
self.network_type = params.network_type
if params.network_type == "tdnn":
self.network = tdnn
elif params.network_type == "tdnn_svd6":
self.network = tdnn_svd6
elif params.network_type == "tdnn_svd":
self.network = tdnn_svd
else:
raise NotImplementedError("Not implement %s network" % params.network_type)
self.loss_type = None
self.loss_network = None
# We have to save all the parameters since the different models may need different parameters
self.params = params
if single_cpu:
self.sess_config = tf.ConfigProto(intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1,
device_count={'CPU': 1},
allow_soft_placement=True)
else:
self.sess_config = tf.ConfigProto(allow_soft_placement=True)
self.sess = tf.Session(config=self.sess_config)
# The model is saved in model/nnet and the evaluation result is saved in model/nnet/eval
self.model = os.path.join(model_dir, "nnet")
# The global step. Note that we don't use tf.train.create_global_step because we may extend the code to
# support adversarial training, in which the global step increases by 1 after `several` updates on the critic
# and encoder. The internal global_step should be carefully handled in that case. So just a placeholder here,
# and use a counter to feed in this value is also an option.
self.global_step = None
# The learning rate is just a placeholder. I use placeholder because it gives me flexibility to dynamically
# change the learning rate during training.
self.learning_rate = None
# Summary for the training and validation
self.train_summary = None
self.valid_summary = None
# The output predictions. Useful in the prediction mode.
self.embeddings = None
self.endpoints = None
# The optimizer used in the training.
# The total loss is useful if we want to change the gradient or variables to optimize (e.g. in fine-tuning)
self.optimizer = None
self.total_loss = None
# Training operation. This is called at each step
self.train_op = None
# Dicts for training and validation inspection.
# In the basic condition, the train_ops contains optimization and training loss.
# And valid loss in the valid_ops. It is possible to add other variables to the dictionaries.
# Note that the valid loss should be computed from tf.metric.mean, so the valid_ops also has the update ops.
# In some steps, the train_ops is required to combine with train_summary to get the summary string.
# These ops are only executed once after several steps (for inspection).
self.train_ops = {}
self.valid_ops = {}
# Model saver and summary writers
# We don't create the saver or writer here, because after reset, they will be unavailable.
self.saver = None
self.summary_writer = None
self.valid_summary_writer = None
# This is an indicator to tell whether the model is built. After building the model, we can only use `reuse`
# to refer to different part of the model.
self.is_built = False
self.is_loaded = False
# In train, valid and prediction modes, we need the inputs. If tf.data is used, the input can be a node in
# the graph. However, we may also use feed_dict mechanism to feed data, in which case the placeholder is placed
# in the graph.
# Now we define the placeholder in the build routines.
self.train_features = None
self.train_labels = None
self.valid_features = None
self.valid_labels = None
self.pred_features = None
def reset(self):
"""Reset the graph so we can create new input pipeline or graph. (Or for other purposes)"""
try:
self.sess.close()
except tf.errors.OpError:
# Maybe the session is closed before
pass
tf.reset_default_graph()
# The session should be created again after the graph is reset.
self.sess = tf.Session(config=self.sess_config)
# After the graph is reset, the flag should be set
self.is_built = False
self.is_loaded = False
# After reset the graph, it is important to reset the seed.
tf.set_random_seed(self.params.seed)
# Reset some variables. The previous ones have become invalid due to the graph reset.
self.saver = None
self.summary_writer = None
self.valid_summary_writer = None
def close(self):
"""Close the session we opened."""
try:
self.sess.close()
except tf.errors.OpError:
pass
def load(self):
"""Load the saved variables.
If the variables have values, the current values will be changed to the saved ones
:return The step of the saved model.
"""
tf.logging.info("Reading checkpoints...")
ckpt = tf.train.get_checkpoint_state(self.model)
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
step = int(next(re.finditer("(\d+)(?!.*\d)", ckpt_name)).group(0))
self.saver.restore(self.sess, os.path.join(self.model, ckpt_name))
tf.logging.info("Succeed to load checkpoint {}".format(ckpt_name))
else:
sys.exit("Failed to find a checkpoint in {}".format(self.model))
self.is_loaded = True
return step
def save(self, step):
"""Save the model.
Args:
step: The global step.
"""
self.saver.save(self.sess, os.path.join(self.model, "model"), global_step=step)
def entire_network(self, features, params, is_training, reuse_variables):
"""The definition of the entire network.
Sometimes, feature normalization is applied after the main network.
We combine them together (except for the loss layer).
Args:
features: The network input.
params: The parameters.
is_training: True if the network is for training.
reuse_variables: Share variables.
:return: The network output and the endpoints (for other usage).
"""
features, endpoints = self.network(features, params, is_training, reuse_variables)
endpoints["output"] = features
# Add more components (post-processing) after the main network.
if "feature_norm" in params.dict and params.feature_norm:
assert "feature_scaling_factor" in params.dict, "If feature normalization is applied, scaling factor is necessary."
features = l2_scaling(features, params.feature_scaling_factor)
endpoints["output"] = features
return features, endpoints
def build(self, mode, dim, loss_type=None, num_speakers=None, noupdate_var_list=None):
""" Build a network.
Currently, I use placeholder in the graph and feed data during sess.run. So no need to parse
features and labels.
Args:
mode: `train`, `valid` or `predict`.
dim: The dimension of the feature.
loss_type: Which loss function do we use. Could be None when mode == predict
num_speakers: The total number of speakers. Used in softmax-like network
noupdate_var_list: In the fine-tuning, some variables are fixed. The list contains their names (or part of their names).
We use `noupdate` rather than `notrain` because some variables are not trainable, e.g.
the mean and var in the batchnorm layers.
"""
assert(mode == "train" or mode == "valid" or mode == "predict")
is_training = (mode == "train")
reuse_variables = True if self.is_built else None
# Create a new path for prediction, since the training may build a tower the support multi-GPUs
if mode == "predict":
self.pred_features = tf.placeholder(tf.float32, shape=[None, None, dim], name="pred_features")
with tf.name_scope("predict") as scope:
tf.logging.info("Extract embedding from node %s" % self.params.embedding_node)
# There is no need to do L2 normalization in this function, because we can do the normalization outside,
# or simply a cosine similarity can do it.
# Note that the output node may be different if we use different loss function. For example, if the
# softmax is used, the output of 2-last layer is used as the embedding. While if the end2end loss is
# used, the output of the last layer may be a better choice. So it is impossible to specify the
# embedding node inside the network structure. The configuration will tell the network to output the
# correct activations as the embeddings.
_, endpoints = self.entire_network(self.pred_features, self.params, is_training, reuse_variables)
self.embeddings = endpoints[self.params.embedding_node]
if self.saver is None:
self.saver = tf.train.Saver()
return
# global_step should be defined before loss function since some loss functions use this value to tune
# some internal parameters.
if self.global_step is None:
self.global_step = tf.placeholder(tf.int32, name="global_step")
self.params.dict["global_step"] = self.global_step
# If new loss function is added, please modify the code.
self.loss_type = loss_type
if loss_type == "softmax":
self.loss_network = softmax
elif loss_type == "asoftmax":
self.loss_network = asoftmax
elif loss_type == "additive_margin_softmax":
self.loss_network = additive_margin_softmax
elif loss_type == "additive_angular_margin_softmax":
self.loss_network = additive_angular_margin_softmax
elif loss_type == "semihard_triplet_loss":
self.loss_network = semihard_triplet_loss
elif loss_type == "angular_triplet_loss":
self.loss_network = angular_triplet_loss
elif loss_type == "generalized_angular_triplet_loss":
self.loss_network = generalized_angular_triplet_loss
else:
raise NotImplementedError("Not implement %s loss" % self.loss_type)
if mode == "valid":
tf.logging.info("Building valid network...")
self.valid_features = tf.placeholder(tf.float32, shape=[None, None, dim], name="valid_features")
self.valid_labels = tf.placeholder(tf.int32, shape=[None,], name="valid_labels")
with tf.name_scope("valid") as scope:
# We can adjust some parameters in the config when we do validation
# TODO: I'm not sure whether it is necssary to change the margin for the valid set.
# TODO: compare the performance!
# Change the margin for the valid set.
if loss_type == "softmax":
pass
elif loss_type == "asoftmax":
train_margin = self.params.asoftmax_m
self.params.asoftmax_m = 1
elif loss_type == "additive_margin_softmax":
train_margin = self.params.amsoftmax_m
self.params.amsoftmax_m = 0
elif loss_type == "additive_angular_margin_softmax":
train_margin = self.params.arcsoftmax_m
self.params.arcsoftmax_m = 0
elif loss_type == "angular_triplet_loss":
# Switch loss to e2e_valid_loss
train_loss_network = self.loss_network
self.loss_network = e2e_valid_loss
else:
pass
if "aux_loss_func" in self.params.dict:
# No auxiliary losses during validation.
train_aux_loss_func = self.params.aux_loss_func
self.params.aux_loss_func = []
features, endpoints = self.entire_network(self.valid_features, self.params, is_training, reuse_variables)
valid_loss, endpoints_loss = self.loss_network(features, self.valid_labels, num_speakers, self.params, is_training, reuse_variables)
endpoints.update(endpoints_loss)
if "aux_loss_func" in self.params.dict:
self.params.aux_loss_func = train_aux_loss_func
# Change the margin back!!!
if loss_type == "softmax":
pass
elif loss_type == "asoftmax":
self.params.asoftmax_m = train_margin
elif loss_type == "additive_margin_softmax":
self.params.amsoftmax_m = train_margin
elif loss_type == "additive_angular_margin_softmax":
self.params.arcsoftmax_m = train_margin
elif loss_type == "angular_triplet_loss":
self.loss_network = train_loss_network
else:
pass
# We can evaluate other stuff in the valid_ops. Just add the new values to the dict.
# We may also need to check other values expect for the loss. Leave the task to other functions.
# During validation, I compute the cosine EER for the final output of the network.
self.embeddings = endpoints["output"]
self.endpoints = endpoints
self.valid_ops["raw_valid_loss"] = valid_loss
mean_valid_loss, mean_valid_loss_op = tf.metrics.mean(valid_loss)
self.valid_ops["valid_loss"] = mean_valid_loss
self.valid_ops["valid_loss_op"] = mean_valid_loss_op
valid_loss_summary = tf.summary.scalar("loss", mean_valid_loss)
self.valid_summary = tf.summary.merge([valid_loss_summary])
if self.saver is None:
self.saver = tf.train.Saver(max_to_keep=self.params.keep_checkpoint_max)
if self.valid_summary_writer is None:
self.valid_summary_writer = tf.summary.FileWriter(os.path.join(self.model, "eval"), self.sess.graph)
return
tf.logging.info("Building training network...")
self.train_features = tf.placeholder(tf.float32, shape=[None, None, dim], name="train_features")
self.train_labels = tf.placeholder(tf.int32, shape=[None, ], name="train_labels")
self.learning_rate = tf.placeholder(tf.float32, name="learning_rate")
if "optimizer" not in self.params.dict:
# The default optimizer is sgd
self.params.dict["optimizer"] = "sgd"
if self.params.optimizer == "sgd":
if "momentum" in self.params.dict:
sys.exit("Using sgd as the optimizer and you should not specify the momentum.")
tf.logging.info("***** Using SGD as the optimizer.")
opt = tf.train.GradientDescentOptimizer(self.learning_rate, name="optimizer")
elif self.params.optimizer == "momentum":
# SGD with momentum
# It is also possible to use other optimizers, e.g. Adam.
tf.logging.info("***** Using Momentum as the optimizer.")
opt = tf.train.MomentumOptimizer(self.learning_rate, self.params.momentum, use_nesterov=self.params.use_nesterov, name="optimizer")
elif self.params.optimizer == "adam":
tf.logging.info("***** Using Adam as the optimizer.")
opt = tf.train.AdamOptimizer(self.learning_rate, name="optimizer")
else:
sys.exit("Optimizer %s is not supported." % self.params.optimizer)
self.optimizer = opt
# Use name_space here. Create multiple name_spaces if multi-gpus
# There is a copy in `set_trainable_variables`
with tf.name_scope("train") as scope:
features, endpoints = self.entire_network(self.train_features, self.params, is_training, reuse_variables)
loss, endpoints_loss = self.loss_network(features, self.train_labels, num_speakers, self.params, is_training, reuse_variables)
self.endpoints = endpoints
endpoints.update(endpoints_loss)
regularization_loss = tf.losses.get_regularization_loss()
total_loss = loss + regularization_loss
# train_summary contains all the summeries we want to inspect.
# Get the summaries define in the network and loss function.
# The summeries in the network and loss function are about the network variables.
self.train_summary = tf.get_collection(tf.GraphKeys.SUMMARIES, scope)
self.train_summary.append(tf.summary.scalar("loss", loss))
self.train_summary.append(tf.summary.scalar("regularization_loss", regularization_loss))
# We may have other losses (i.e. penalty term in attention layer)
penalty_loss = tf.get_collection("PENALTY")
if len(penalty_loss) != 0:
penalty_loss = tf.reduce_sum(penalty_loss)
total_loss += penalty_loss
self.train_summary.append(tf.summary.scalar("penalty_term", penalty_loss))
self.total_loss = total_loss
self.train_summary.append(tf.summary.scalar("total_loss", total_loss))
self.train_summary.append(tf.summary.scalar("learning_rate", self.learning_rate))
# The gradient ops is inside the scope to support multi-gpus
if noupdate_var_list is not None:
old_batchnorm_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope)
batchnorm_update_ops = []
for op in old_batchnorm_update_ops:
if not substring_in_list(op.name, noupdate_var_list):
batchnorm_update_ops.append(op)
tf.logging.info("[Info] Update %s" % op.name)
else:
tf.logging.info("[Info] Op %s will not be executed" % op.name)
else:
batchnorm_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope)
if noupdate_var_list is not None:
variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
train_var_list = []
for v in variables:
if not substring_in_list(v.name, noupdate_var_list):
train_var_list.append(v)
tf.logging.info("[Info] Train %s" % v.name)
else:
tf.logging.info("[Info] Var %s will not be updated" % v.name)
grads = opt.compute_gradients(total_loss, var_list=train_var_list)
else:
grads = opt.compute_gradients(total_loss)
# Once the model has been built (even for a tower), we set the flag
self.is_built = True
if self.params.clip_gradient:
grads, vars = zip(*grads) # compute gradients of variables with respect to loss
grads_clip, _ = tf.clip_by_global_norm(grads, self.params.clip_gradient_norm) # l2 norm clipping
# we follow the instruction in ge2e paper to scale the learning rate for w and b
# Actually, I wonder that we can just simply set a large value for w (e.g. 20) and fix it.
if self.loss_type == "ge2e":
# The parameters w and b must be the last variables in the gradients
grads_clip = grads_clip[:-2] + [0.01 * grad for grad in grads_clip[-2:]]
# Simply check the position of w and b
for var in vars[-2:]:
assert("w" in var.name or "b" in var.name)
grads = zip(grads_clip, vars)
# There are some things we can do to the gradients, i.e. learning rate scaling.
# # The values and gradients are added to summeries
# for grad, var in grads:
# if grad is not None:
# self.train_summary.append(tf.summary.histogram(var.op.name + '/gradients', grad))
# self.train_summary.append(tf.summary.scalar(var.op.name + '/gradients_norm', tf.norm(grad)))
self.train_summary.append(activation_summaries(endpoints))
for var in tf.trainable_variables():
self.train_summary.append(tf.summary.histogram(var.op.name, var))
self.train_summary = tf.summary.merge(self.train_summary)
with tf.control_dependencies(batchnorm_update_ops):
self.train_op = opt.apply_gradients(grads)
# We want to inspect other values during training?
self.train_ops["loss"] = total_loss
self.train_ops["raw_loss"] = loss
# The model saver
if self.saver is None:
self.saver = tf.train.Saver(max_to_keep=self.params.keep_checkpoint_max)
# The training summary writer
if self.summary_writer is None:
self.summary_writer = tf.summary.FileWriter(self.model, self.sess.graph)
return
def train(self, data, spklist, learning_rate, aux_data=None):
"""Train the model.
Args:
data: The training data directory.
spklist: The spklist is a file map speaker name to the index.
learning_rate: The learning rate is passed by the main program. The main program can easily tune the
learning rate according to the validation accuracy or anything else.
aux_data: The auxiliary data (maybe useful in child class.)
"""
# initialize all variables
# graph = tf.get_default_graph()
# kernel_six = graph.get_tensor_by_name('tdnn_svd6/tdnn6.5_dense/kernel:0')
# def get_semi_orthogonal(mat):
#pri# nt(mat.shape)
# M = tf.transpose(mat)
# #M = mat
# I = tf.Variable(np.identity(M.shape[0]), dtype=tf.float32)
# for _ in range(10):
# P = tf.matmul(M, M, transpose_b=True)
# alpha2 = tf.divide(tf.trace(tf.matmul(P, P, transpose_b=True)), tf.trace(P))
# M = M - (1 / (2.0 * alpha2)) * tf.matmul(tf.subtract(P, alpha2 * I), M)
# P = tf.matmul(M, M, transpose_b=True)
# alpha2 = tf.divide(tf.trace(tf.matmul(P, P, transpose_b=True)), tf.trace(P))
# M = M / alpha2
# return tf.transpose(M)
# semi = get_semi_orthogonal(kernel_six)
# semi_op = tf.assign(kernel_six, semi)
self.sess.run(tf.global_variables_initializer())
# curr_step is the real step the training at.
curr_step = 0
# Load the model if we have
if os.path.isfile(os.path.join(self.model, "checkpoint")):
curr_step = self.load()
# The data loader
data_loader = KaldiDataRandomQueue(data, spklist,
num_parallel=self.params.num_parallel_datasets,
max_qsize=self.params.max_queue_size,
num_speakers=self.params.num_speakers_per_batch,
num_segments=self.params.num_segments_per_speaker,
min_len=self.params.min_segment_len,
max_len=self.params.max_segment_len,
shuffle=True)
epoch = int(curr_step / self.params.num_steps_per_epoch)
data_loader.start()
for step in range(curr_step % self.params.num_steps_per_epoch, self.params.num_steps_per_epoch):
try:
# if step % 4 == 0:
# # SEMI ORTHOGONA;
# self.sess.run(semi_op)
if step % self.params.save_summary_steps == 0 or step % self.params.show_training_progress == 0:
train_ops = [self.train_ops, self.train_op]
if step % self.params.save_summary_steps == 0:
train_ops.append(self.train_summary)
start_time = time.time()
features, labels = data_loader.fetch()
train_val = self.sess.run(train_ops, feed_dict={self.train_features: features,
self.train_labels: labels,
self.global_step: curr_step,
self.learning_rate: learning_rate})
end_time = time.time()
tf.logging.info(
"Epoch: [%2d] step: [%2d/%2d] time: %.4f s/step, raw loss: %f, total loss: %f"
% (epoch, step, self.params.num_steps_per_epoch, end_time - start_time,
train_val[0]["raw_loss"], train_val[0]["loss"]))
if step % self.params.save_summary_steps == 0:
self.summary_writer.add_summary(train_val[-1], curr_step)
else:
# Only compute optimizer.
features, labels = data_loader.fetch()
_ = self.sess.run(self.train_op, feed_dict={self.train_features: features,
self.train_labels: labels,
self.global_step: curr_step,
self.learning_rate: learning_rate})
if step % self.params.save_checkpoints_steps == 0 and curr_step != 0:
self.save(curr_step)
curr_step += 1
except DataOutOfRange:
tf.logging.info("Finished reading features.")
break
data_loader.stop()
self.save(curr_step)
return
def train_tune_lr(self, data, spklist, tune_period=100, aux_data=None):
"""Tune the learning rate.
According to: https://www.kdnuggets.com/2017/11/estimating-optimal-learning-rate-deep-neural-network.html
Args:
data: The training data directory.
spklist: The spklist is a file map speaker name to the index.
tune_period: How many steps per learning rate.
aux_data: The auxiliary data directory.
"""
# initialize all variables
self.sess.run(tf.global_variables_initializer())
# We need to load the model sometimes, since we may try to find the learning rate for fine-tuning.
if os.path.isfile(os.path.join(self.model, "checkpoint")):
self.load()
data_loader = KaldiDataRandomQueue(data, spklist,
num_parallel=self.params.num_parallel_datasets,
max_qsize=self.params.max_queue_size,
num_speakers=self.params.num_speakers_per_batch,
num_segments=self.params.num_segments_per_speaker,
min_len=self.params.min_segment_len,
max_len=self.params.max_segment_len,
shuffle=True)
data_loader.start()
# The learning rate normally varies from 1e-5 to 1
# Some common values:
# 1. factor = 1.15
# tune_period = 200
# tune_times = 100
init_learning_rate = 1e-5
factor = 1.15
tune_times = 100
fp_lr = open(os.path.join(self.model, "learning_rate_tuning"), "w")
for step in range(tune_period * tune_times):
lr = init_learning_rate * (factor ** (step // tune_period))
try:
if step % tune_period == 0:
train_ops = [self.train_ops, self.train_op, self.train_summary]
# train_ops = [self.train_ops, self.train_op]
start_time = time.time()
features, labels = data_loader.fetch()
train_val = self.sess.run(train_ops, feed_dict={self.train_features: features,
self.train_labels: labels,
self.global_step: 0,
self.learning_rate: lr})
end_time = time.time()
tf.logging.info(
"Epoch: step: %2d, time: %.4f s/step, lr: %f, raw loss: %f, total loss: %f" \
% (step, end_time - start_time, lr,
train_val[0]["raw_loss"], train_val[0]["loss"]))
fp_lr.write("%d %f %f\n" % (step, lr, train_val[0]["loss"]))
self.summary_writer.add_summary(train_val[-1], step)
else:
features, labels = data_loader.fetch()
_ = self.sess.run(self.train_op, feed_dict={self.train_features: features,
self.train_labels: labels,
self.global_step: 0,
self.learning_rate: lr})
except DataOutOfRange:
tf.logging.info("Finished reading features.")
break
data_loader.stop()
fp_lr.close()
return
def valid(self, data, spklist, batch_type="softmax", output_embeddings=False, aux_data=None):
"""Evaluate on the validation set
Args:
data: The training data directory.
spklist: The spklist is a file map speaker name to the index.
batch_type: `softmax` or `end2end`. The batch is `softmax-like` or `end2end-like`.
If the batch is `softmax-like`, each sample are from different speakers;
if the batch is `end2end-like`, the samples are from N speakers with M segments per speaker.
output_embeddings: Set True to output the corresponding embeddings and labels of the valid set.
If output_embeddings, an additional valid metric (e.g. EER) should be computed outside
the function.
aux_data: The auxiliary data directory.
:return: valid_loss, embeddings and labels (None if output_embeddings is False).
"""
# Initialization will reset all the variables in the graph.
# The local variables are also need to be initialized for metrics function.
self.sess.run(tf.global_variables_initializer())
self.sess.run(tf.local_variables_initializer())
assert batch_type == "softmax" or batch_type == "end2end", "The batch_type can only be softmax or end2end"
curr_step = 0
# Load the model. The valid function can only be called after training (of course...)
if os.path.isfile(os.path.join(self.model, "checkpoint")):
curr_step = self.load()
else:
tf.logging.info("[Warning] Cannot find model in %s. Random initialization is used in validation." % self.model)
embeddings_val = None
labels_val = None
num_batches = 0
if output_embeddings:
# If we want to output embeddings, the features should be loaded in order
data_loader = KaldiDataSeqQueue(data, spklist,
num_parallel=2,
max_qsize=10,
batch_size=self.params.num_speakers_per_batch * self.params.num_segments_per_speaker,
min_len=self.params.min_segment_len,
max_len=self.params.max_segment_len,
shuffle=False)
data_loader.start()
tf.logging.info("Generate valid embeddings.")
# In this mode, the embeddings and labels will be saved and output. It needs more memory and takes longer
# to process these values.
while True:
try:
if num_batches % 100 == 0:
tf.logging.info("valid step: %d" % num_batches)
features, labels = data_loader.fetch()
valid_emb_val, valid_labels_val = self.sess.run([self.embeddings, self.valid_labels], feed_dict={self.valid_features: features,
self.valid_labels: labels,
self.global_step: curr_step})
# Save the embeddings and labels
if embeddings_val is None:
embeddings_val = valid_emb_val
labels_val = valid_labels_val
else:
embeddings_val = np.concatenate((embeddings_val, valid_emb_val), axis=0)
labels_val = np.concatenate((labels_val, valid_labels_val), axis=0)
num_batches += 1
except DataOutOfRange:
break
data_loader.stop()
if batch_type == "softmax":
data_loader = KaldiDataSeqQueue(data, spklist,
num_parallel=2,
max_qsize=10,
batch_size=self.params.num_speakers_per_batch * self.params.num_segments_per_speaker,
min_len=self.params.min_segment_len,
max_len=self.params.max_segment_len,
shuffle=True)
elif batch_type == "end2end":
# The num_valid_speakers_per_batch and num_valid_segments_per_speaker are only required when
# End2End loss is used. Since we switch the loss function to softmax generalized e2e loss
# when the e2e loss is used.
assert "num_valid_speakers_per_batch" in self.params.dict and "num_valid_segments_per_speaker" in self.params.dict, \
"Valid parameters should be set if E2E loss is selected"
data_loader = KaldiDataRandomQueue(data, spklist,
num_parallel=2,
max_qsize=10,
num_speakers=self.params.num_valid_speakers_per_batch,
num_segments=self.params.num_valid_segments_per_speaker,
min_len=self.params.min_segment_len,
max_len=self.params.max_segment_len,
shuffle=True)
else:
raise ValueError
data_loader.start()
num_batches = 0
for _ in range(self.params.valid_max_iterations):
try:
if num_batches % 100 == 0:
tf.logging.info("valid step: %d" % num_batches)
features, labels = data_loader.fetch()
_ = self.sess.run(self.valid_ops["valid_loss_op"], feed_dict={self.valid_features: features,
self.valid_labels: labels,
self.global_step: curr_step})
num_batches += 1
except DataOutOfRange:
break
data_loader.stop()
loss, summary = self.sess.run([self.valid_ops["valid_loss"], self.valid_summary])
# We only save the summary for the last batch.
self.valid_summary_writer.add_summary(summary, curr_step)
# The valid loss is averaged over all the batches.
tf.logging.info("[Validation %d batches] valid loss: %f" % (num_batches, loss))
# The output embeddings and labels can be used to compute EER or other metrics
return loss, embeddings_val, labels_val
def predict(self, features):
"""Output the embeddings
:return: A numpy array which is the embeddings
"""
if not self.is_loaded:
if os.path.isfile(os.path.join(self.model, "checkpoint")):
self.load()
else:
sys.exit("Cannot find model in %s" % self.model)
rank = len(features.shape)
assert(rank == 2 or rank == 3)
# Expand the feature if the rank is 2
if rank == 2:
features = np.expand_dims(features, axis=0)
embeddings = self.sess.run(self.embeddings, feed_dict={self.pred_features: features})
if rank == 2:
embeddings = np.squeeze(embeddings, axis=0)
return embeddings
def set_trainable_variables(self, variable_list=None):
"""Set the variables which we want to optimize.
The optimizer will only optimize the variables which contain sub-string in the variable list.
Basically, this is copied from the training path in `build`.
The batchnorm statistics can always be updated?
Args:
variable_list: The model variable contains sub-string in the list will be optimized.
If None, all variables will be optimized.
"""
add_train_summary = []
variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
trainable_variables = []
if variable_list is None:
tf.logging.info("[Info] Add all trainable variables to the optimizer.")
trainable_variables = None
else:
for v in variables:
if substring_in_list(v.name, variable_list):
trainable_variables.append(v)
tf.logging.info("[Info] Add %s to trainable list" % v.name)
with tf.name_scope("train") as scope:
grads = self.optimizer.compute_gradients(self.total_loss, var_list=trainable_variables)
if self.params.clip_gradient:
grads, vars = zip(*grads) # compute gradients of variables with respect to loss
grads_clip, _ = tf.clip_by_global_norm(grads, self.params.clip_gradient_norm) # l2 norm clipping
grads = zip(grads_clip, vars)
# # The values and gradients are added to summeries
# for grad, var in grads:
# if grad is not None:
# add_train_summary.append(tf.summary.histogram(var.op.name + '/gradients', grad))
# add_train_summary.append(tf.summary.scalar(var.op.name + '/gradients_norm', tf.norm(grad)))
if variable_list is None:
trainable_variables = tf.trainable_variables()
for var in trainable_variables:
add_train_summary.append(tf.summary.histogram(var.op.name, var))
self.train_summary = tf.summary.merge([self.train_summary, tf.summary.merge(add_train_summary)])
batchnorm_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope)
with tf.control_dependencies(batchnorm_update_ops):
self.train_op = self.optimizer.apply_gradients(grads)
def get_finetune_model(self, excluded_list):
"""Start from a pre-trained model and other parameters are initialized using default initializer.
Actually, this function is only called at the first epoch of the fine-tuning, because in succeeded epochs,
we need to fully load the model rather than loading part of the graph.
The pre-trained model is saved in the model directory as index 0.
Backup the pre-trained model and save the new model (with random initialized parameters) as index 0 instead.
Args:
excluded_list: A list. Do NOT restore the parameters in the exclude_list. This is useful in fine-truning
an existing model. We load a part of the pre-trained model and leave the other part
randomly initialized.
Deprecated:
data: The training data directory.
spklist: The spklist is a file map speaker name to the index.
learning_rate: The learning rate is passed by the main program. The main program can easily tune the
learning rate according to the validation accuracy or anything else.
"""
# initialize all variables
self.sess.run(tf.global_variables_initializer())
# Load parts of the model
variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
restore_variables = []
for v in variables:
if not substring_in_list(v.name, excluded_list):
restore_variables.append(v)
else:
tf.logging.info("[Info] Ignore %s when loading the checkpoint" % v.name)
finetune_saver = tf.train.Saver(var_list=restore_variables)
ckpt = tf.train.get_checkpoint_state(self.model)
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
finetune_saver.restore(self.sess, os.path.join(self.model, ckpt_name))
# Backup the old files
import glob, shutil
model_checkpoint_path = ckpt.model_checkpoint_path
for filename in glob.glob(model_checkpoint_path + "*"):
shutil.copyfile(filename, filename + '.bak')
# Save the new model. The new model is basically the same with the pre-trained one, while parameters
# NOT in the pre-trained model are random initialized.
# Set the step to 0.
self.save(0)
return
def insight(self, data, spklist, batch_type="softmax", output_embeddings=False, aux_data=None):
"""Just use to debug the network
"""
self.sess.run(tf.global_variables_initializer())
self.sess.run(tf.local_variables_initializer())
assert batch_type == "softmax" or batch_type == "end2end", "The batch_type can only be softmax or end2end"
embeddings_val = None
labels_val = None
self.load()
if output_embeddings:
# If we want to output embeddings, the features should be loaded in order
data_loader = KaldiDataSeqQueue(data, spklist,
num_parallel=2,
max_qsize=10,
batch_size=self.params.num_speakers_per_batch * self.params.num_segments_per_speaker,
min_len=self.params.min_segment_len,
max_len=self.params.max_segment_len,
shuffle=False)
data_loader.start()
tf.logging.info("Generate valid embeddings.")
# In this mode, the embeddings and labels will be saved and output. It needs more memory and takes longer
# to process these values.
while True:
try:
features, labels = data_loader.fetch()
valid_emb_val, valid_labels_val, endpoints_val = self.sess.run([self.embeddings, self.valid_labels, self.endpoints], feed_dict={self.valid_features: features,
self.valid_labels: labels})
# acc = np.sum(np.equal(np.argmax(endpoints_val['logits'], axis=1), labels, dtype=np.float)) / float(
# labels.shape[0])
# print("Acc: %f" % acc)
# Save the embeddings and labels
if embeddings_val is None:
embeddings_val = valid_emb_val
labels_val = valid_labels_val
else:
embeddings_val = np.concatenate((embeddings_val, valid_emb_val), axis=0)
labels_val = np.concatenate((labels_val, valid_labels_val), axis=0)
except DataOutOfRange:
break
data_loader.stop()
if batch_type == "softmax":
data_loader = KaldiDataSeqQueue(data, spklist,
num_parallel=2,
max_qsize=10,
batch_size=self.params.num_speakers_per_batch * self.params.num_segments_per_speaker*10,
min_len=self.params.min_segment_len,
max_len=self.params.max_segment_len,
shuffle=True)
elif batch_type == "end2end":
# The num_valid_speakers_per_batch and num_valid_segments_per_speaker are only required when
# End2End loss is used. Since we switch the loss function to softmax generalized e2e loss
# when the e2e loss is used.
assert "num_valid_speakers_per_batch" in self.params.dict and "num_valid_segments_per_speaker" in self.params.dict, \
"Valid parameters should be set if E2E loss is selected"
data_loader = KaldiDataRandomQueue(data, spklist,
num_parallel=2,
max_qsize=10,
num_speakers=self.params.num_valid_speakers_per_batch,
num_segments=self.params.num_valid_segments_per_speaker,
min_len=self.params.min_segment_len,
max_len=self.params.max_segment_len,
shuffle=True)
else:
raise ValueError
data_loader.start()
while True:
try:
features, labels = data_loader.fetch()
_, endpoints_val = self.sess.run([self.valid_ops["valid_loss_op"], self.endpoints], feed_dict={self.valid_features: features,
self.valid_labels: labels})
except DataOutOfRange:
break
data_loader.stop()
loss = self.sess.run(self.valid_ops["valid_loss"])
tf.logging.info("Shorter segments are used to test the valid loss (%d-%d)" % (self.params.min_segment_len, self.params.max_segment_len))
tf.logging.info("Loss: %f" % loss)
# while True:
# try:
# features, labels = data_loader.fetch()
# valid_ops, endpoints_val = self.sess.run([self.valid_ops, self.endpoints], feed_dict={self.valid_features: features,
# self.valid_labels: labels})
# loss = valid_ops["valid_loss"]
# except DataOutOfRange:
# break
# data_loader.stop()
# tf.logging.info("Loss: %f" % loss)
acc = np.sum(np.equal(np.argmax(endpoints_val['logits'], axis=1), labels, dtype=np.float)) / float(labels.shape[0])
print("Acc: %f" % acc)
import pdb
pdb.set_trace()
# from model.test_utils import softmax
# with tf.variable_scope("softmax", reuse=True):
# test = tf.get_variable("output/kernel")
# test_val = self.sess.run(test)
return loss, embeddings_val, labels_val
|
py | b4029fc6ea834d686cf27012429bdfbdef665703 | """
Subclasses of unittest.TestCase.
"""
from __future__ import absolute_import
import os
import os.path
import shutil
import threading
import unittest
from .. import config
from .. import core
from .. import logging
from .. import utils
def make_test_case(test_kind, *args, **kwargs):
"""
Factory function for creating TestCase instances.
"""
if test_kind not in _TEST_CASES:
raise ValueError("Unknown test kind '%s'" % (test_kind))
return _TEST_CASES[test_kind](*args, **kwargs)
class TestCase(unittest.TestCase):
"""
A test case to execute.
"""
def __init__(self, logger, test_kind, test_name):
"""
Initializes the TestCase with the name of the test.
"""
unittest.TestCase.__init__(self, methodName="run_test")
if not isinstance(logger, logging.Logger):
raise TypeError("logger must be a Logger instance")
if not isinstance(test_kind, basestring):
raise TypeError("test_kind must be a string")
if not isinstance(test_name, basestring):
raise TypeError("test_name must be a string")
self.logger = logger
self.test_kind = test_kind
self.test_name = test_name
self.fixture = None
self.return_code = None
self.is_configured = False
def long_name(self):
"""
Returns the path to the test, relative to the current working directory.
"""
return os.path.relpath(self.test_name)
def basename(self):
"""
Returns the basename of the test.
"""
return os.path.basename(self.test_name)
def short_name(self):
"""
Returns the basename of the test without the file extension.
"""
return os.path.splitext(self.basename())[0]
def id(self):
return self.test_name
def shortDescription(self):
return "%s %s" % (self.test_kind, self.test_name)
def configure(self, fixture, *args, **kwargs):
"""
Stores 'fixture' as an attribute for later use during execution.
"""
if self.is_configured:
raise RuntimeError("configure can only be called once")
self.is_configured = True
self.fixture = fixture
def run_test(self):
"""
Runs the specified test.
"""
raise NotImplementedError("run_test must be implemented by TestCase subclasses")
def as_command(self):
"""
Returns the command invocation used to run the test.
"""
return self._make_process().as_command()
def _execute(self, process):
"""
Runs the specified process.
"""
if config.INTERNAL_EXECUTOR_NAME is not None:
self.logger.info("Starting %s under executor %s...\n%s",
self.shortDescription(),
config.INTERNAL_EXECUTOR_NAME,
process.as_command())
else:
self.logger.info("Starting %s...\n%s", self.shortDescription(), process.as_command())
process.start()
self.logger.info("%s started with pid %s.", self.shortDescription(), process.pid)
self.return_code = process.wait()
if self.return_code != 0:
raise self.failureException("%s failed" % (self.shortDescription()))
self.logger.info("%s finished.", self.shortDescription())
def _make_process(self):
"""
Returns a new Process instance that could be used to run the
test or log the command.
"""
raise NotImplementedError("_make_process must be implemented by TestCase subclasses")
class CPPUnitTestCase(TestCase):
"""
A C++ unit test to execute.
"""
def __init__(self,
logger,
program_executable,
program_options=None):
"""
Initializes the CPPUnitTestCase with the executable to run.
"""
TestCase.__init__(self, logger, "Program", program_executable)
self.program_executable = program_executable
self.program_options = utils.default_if_none(program_options, {}).copy()
def run_test(self):
try:
program = self._make_process()
self._execute(program)
except self.failureException:
raise
except:
self.logger.exception("Encountered an error running C++ unit test %s.", self.basename())
raise
def _make_process(self):
return core.process.Process(self.logger,
[self.program_executable],
**self.program_options)
class CPPIntegrationTestCase(TestCase):
"""
A C++ integration test to execute.
"""
def __init__(self,
logger,
program_executable,
program_options=None):
"""
Initializes the CPPIntegrationTestCase with the executable to run.
"""
TestCase.__init__(self, logger, "Program", program_executable)
self.program_executable = program_executable
self.program_options = utils.default_if_none(program_options, {}).copy()
def configure(self, fixture, *args, **kwargs):
TestCase.configure(self, fixture, *args, **kwargs)
self.program_options["connectionString"] = self.fixture.get_connection_string()
def run_test(self):
try:
program = self._make_process()
self._execute(program)
except self.failureException:
raise
except:
self.logger.exception("Encountered an error running C++ integration test %s.",
self.basename())
raise
def _make_process(self):
return core.programs.generic_program(self.logger,
[self.program_executable],
**self.program_options)
class DBTestCase(TestCase):
"""
A dbtest to execute.
"""
def __init__(self,
logger,
dbtest_suite,
dbtest_executable=None,
dbtest_options=None):
"""
Initializes the DBTestCase with the dbtest suite to run.
"""
TestCase.__init__(self, logger, "DBTest", dbtest_suite)
# Command line options override the YAML configuration.
self.dbtest_executable = utils.default_if_none(config.DBTEST_EXECUTABLE, dbtest_executable)
self.dbtest_suite = dbtest_suite
self.dbtest_options = utils.default_if_none(dbtest_options, {}).copy()
def configure(self, fixture, *args, **kwargs):
TestCase.configure(self, fixture, *args, **kwargs)
# If a dbpath was specified, then use it as a container for all other dbpaths.
dbpath_prefix = self.dbtest_options.pop("dbpath", DBTestCase._get_dbpath_prefix())
dbpath = os.path.join(dbpath_prefix, "job%d" % (self.fixture.job_num), "unittest")
self.dbtest_options["dbpath"] = dbpath
shutil.rmtree(dbpath, ignore_errors=True)
try:
os.makedirs(dbpath)
except os.error:
# Directory already exists.
pass
def run_test(self):
try:
dbtest = self._make_process()
self._execute(dbtest)
except self.failureException:
raise
except:
self.logger.exception("Encountered an error running dbtest suite %s.", self.basename())
raise
def _make_process(self):
return core.programs.dbtest_program(self.logger,
executable=self.dbtest_executable,
suites=[self.dbtest_suite],
**self.dbtest_options)
@staticmethod
def _get_dbpath_prefix():
"""
Returns the prefix of the dbpath to use for the dbtest
executable.
Order of preference:
1. The --dbpathPrefix specified at the command line.
2. Value of the TMPDIR environment variable.
3. Value of the TEMP environment variable.
4. Value of the TMP environment variable.
5. The /tmp directory.
"""
if config.DBPATH_PREFIX is not None:
return config.DBPATH_PREFIX
for env_var in ("TMPDIR", "TEMP", "TMP"):
if env_var in os.environ:
return os.environ[env_var]
return os.path.normpath("/tmp")
class JSTestCase(TestCase):
"""
A jstest to execute.
"""
# A wrapper for the thread class that lets us propagate exceptions.
class ExceptionThread(threading.Thread):
def __init__(self, my_target, my_args):
threading.Thread.__init__(self, target=my_target, args=my_args)
self.err = None
def run(self):
try:
threading.Thread.run(self)
except Exception as self.err:
raise
else:
self.err = None
def _get_exception(self):
return self.err
DEFAULT_CLIENT_NUM = 1
def __init__(self,
logger,
js_filename,
shell_executable=None,
shell_options=None,
test_kind="JSTest"):
"Initializes the JSTestCase with the JS file to run."
TestCase.__init__(self, logger, test_kind, js_filename)
# Command line options override the YAML configuration.
self.shell_executable = utils.default_if_none(config.MONGO_EXECUTABLE, shell_executable)
self.js_filename = js_filename
self.shell_options = utils.default_if_none(shell_options, {}).copy()
self.num_clients = JSTestCase.DEFAULT_CLIENT_NUM
def configure(self, fixture, num_clients=DEFAULT_CLIENT_NUM, *args, **kwargs):
TestCase.configure(self, fixture, *args, **kwargs)
if self.fixture.port is not None:
self.shell_options["port"] = self.fixture.port
global_vars = self.shell_options.get("global_vars", {}).copy()
data_dir = self._get_data_dir(global_vars)
# Set MongoRunner.dataPath if overridden at command line or not specified in YAML.
if config.DBPATH_PREFIX is not None or "MongoRunner.dataPath" not in global_vars:
# dataPath property is the dataDir property with a trailing slash.
data_path = os.path.join(data_dir, "")
else:
data_path = global_vars["MongoRunner.dataPath"]
global_vars["MongoRunner.dataDir"] = data_dir
global_vars["MongoRunner.dataPath"] = data_path
test_data = global_vars.get("TestData", {}).copy()
test_data["minPort"] = core.network.PortAllocator.min_test_port(fixture.job_num)
test_data["maxPort"] = core.network.PortAllocator.max_test_port(fixture.job_num)
# Marks the main test when multiple test clients are run concurrently, to notify the test
# of any code that should only be run once. If there is only one client, it is the main one.
test_data["isMainTest"] = True
global_vars["TestData"] = test_data
self.shell_options["global_vars"] = global_vars
shutil.rmtree(data_dir, ignore_errors=True)
self.num_clients = num_clients
try:
os.makedirs(data_dir)
except os.error:
# Directory already exists.
pass
def _get_data_dir(self, global_vars):
"""
Returns the value that the mongo shell should set for the
MongoRunner.dataDir property.
"""
# Command line options override the YAML configuration.
data_dir_prefix = utils.default_if_none(config.DBPATH_PREFIX,
global_vars.get("MongoRunner.dataDir"))
data_dir_prefix = utils.default_if_none(data_dir_prefix, config.DEFAULT_DBPATH_PREFIX)
return os.path.join(data_dir_prefix,
"job%d" % (self.fixture.job_num),
config.MONGO_RUNNER_SUBDIR)
def run_test(self):
threads = []
try:
# Don't thread if there is only one client.
if self.num_clients == 1:
shell = self._make_process(self.logger)
self._execute(shell)
else:
# If there are multiple clients, make a new thread for each client.
for i in xrange(self.num_clients):
t = self.ExceptionThread(my_target=self._run_test_in_thread, my_args=[i])
t.start()
threads.append(t)
except self.failureException:
raise
except:
self.logger.exception("Encountered an error running jstest %s.", self.basename())
raise
finally:
for t in threads:
t.join()
for t in threads:
if t._get_exception() is not None:
raise t._get_exception()
def _make_process(self, logger=None, thread_id=0):
# If logger is none, it means that it's not running in a thread and thus logger should be
# set to self.logger.
logger = utils.default_if_none(logger, self.logger)
is_main_test = True
if thread_id > 0:
is_main_test = False
return core.programs.mongo_shell_program(logger,
executable=self.shell_executable,
filename=self.js_filename,
isMainTest=is_main_test,
**self.shell_options)
def _run_test_in_thread(self, thread_id):
# Make a logger for each thread.
logger = logging.loggers.new_logger(self.test_kind + ':' + str(thread_id),
parent=self.logger)
shell = self._make_process(logger, thread_id)
self._execute(shell)
class MongosTestCase(TestCase):
"""
A TestCase which runs a mongos binary with the given parameters.
"""
def __init__(self,
logger,
mongos_options):
"""
Initializes the mongos test and saves the options.
"""
self.mongos_executable = utils.default_if_none(config.MONGOS_EXECUTABLE,
config.DEFAULT_MONGOS_EXECUTABLE)
# Use the executable as the test name.
TestCase.__init__(self, logger, "mongos", self.mongos_executable)
self.options = mongos_options.copy()
def configure(self, fixture, *args, **kwargs):
"""
Ensures the --test option is present in the mongos options.
"""
TestCase.configure(self, fixture, *args, **kwargs)
# Always specify test option to ensure the mongos will terminate.
if "test" not in self.options:
self.options["test"] = ""
def run_test(self):
try:
mongos = self._make_process()
self._execute(mongos)
except self.failureException:
raise
except:
self.logger.exception("Encountered an error running %s.", mongos.as_command())
raise
def _make_process(self):
return core.programs.mongos_program(self.logger,
executable=self.mongos_executable,
**self.options)
_TEST_CASES = {
"cpp_unit_test": CPPUnitTestCase,
"cpp_integration_test": CPPIntegrationTestCase,
"db_test": DBTestCase,
"js_test": JSTestCase,
"mongos_test": MongosTestCase,
}
|
py | b402a13f8cd9881c6c4b538f1998f3a0e52b1e91 | # coding: utf-8
"""
Notification API
The eBay Notification API enables management of the entire end-to-end eBay notification experience by allowing users to:<ul><li>Browse for supported notification topics and retrieve topic details</li><li>Create, configure, and manage notification destination endpionts</li><li>Configure, manage, and test notification subscriptions</li><li>Process eBay notifications and verify the integrity of the message payload</li></ul> # noqa: E501
OpenAPI spec version: v1.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class Error(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'category': 'str',
'domain': 'str',
'error_id': 'int',
'input_ref_ids': 'list[str]',
'long_message': 'str',
'message': 'str',
'output_ref_ids': 'list[str]',
'parameters': 'list[ErrorParameter]',
'subdomain': 'str'
}
attribute_map = {
'category': 'category',
'domain': 'domain',
'error_id': 'errorId',
'input_ref_ids': 'inputRefIds',
'long_message': 'longMessage',
'message': 'message',
'output_ref_ids': 'outputRefIds',
'parameters': 'parameters',
'subdomain': 'subdomain'
}
def __init__(self, category=None, domain=None, error_id=None, input_ref_ids=None, long_message=None, message=None, output_ref_ids=None, parameters=None, subdomain=None): # noqa: E501
"""Error - a model defined in Swagger""" # noqa: E501
self._category = None
self._domain = None
self._error_id = None
self._input_ref_ids = None
self._long_message = None
self._message = None
self._output_ref_ids = None
self._parameters = None
self._subdomain = None
self.discriminator = None
if category is not None:
self.category = category
if domain is not None:
self.domain = domain
if error_id is not None:
self.error_id = error_id
if input_ref_ids is not None:
self.input_ref_ids = input_ref_ids
if long_message is not None:
self.long_message = long_message
if message is not None:
self.message = message
if output_ref_ids is not None:
self.output_ref_ids = output_ref_ids
if parameters is not None:
self.parameters = parameters
if subdomain is not None:
self.subdomain = subdomain
@property
def category(self):
"""Gets the category of this Error. # noqa: E501
Identifies the type of erro. # noqa: E501
:return: The category of this Error. # noqa: E501
:rtype: str
"""
return self._category
@category.setter
def category(self, category):
"""Sets the category of this Error.
Identifies the type of erro. # noqa: E501
:param category: The category of this Error. # noqa: E501
:type: str
"""
self._category = category
@property
def domain(self):
"""Gets the domain of this Error. # noqa: E501
Name for the primary system where the error occurred. This is relevant for application errors. # noqa: E501
:return: The domain of this Error. # noqa: E501
:rtype: str
"""
return self._domain
@domain.setter
def domain(self, domain):
"""Sets the domain of this Error.
Name for the primary system where the error occurred. This is relevant for application errors. # noqa: E501
:param domain: The domain of this Error. # noqa: E501
:type: str
"""
self._domain = domain
@property
def error_id(self):
"""Gets the error_id of this Error. # noqa: E501
A unique number to identify the error. # noqa: E501
:return: The error_id of this Error. # noqa: E501
:rtype: int
"""
return self._error_id
@error_id.setter
def error_id(self, error_id):
"""Sets the error_id of this Error.
A unique number to identify the error. # noqa: E501
:param error_id: The error_id of this Error. # noqa: E501
:type: int
"""
self._error_id = error_id
@property
def input_ref_ids(self):
"""Gets the input_ref_ids of this Error. # noqa: E501
An array of request elements most closely associated to the error. # noqa: E501
:return: The input_ref_ids of this Error. # noqa: E501
:rtype: list[str]
"""
return self._input_ref_ids
@input_ref_ids.setter
def input_ref_ids(self, input_ref_ids):
"""Sets the input_ref_ids of this Error.
An array of request elements most closely associated to the error. # noqa: E501
:param input_ref_ids: The input_ref_ids of this Error. # noqa: E501
:type: list[str]
"""
self._input_ref_ids = input_ref_ids
@property
def long_message(self):
"""Gets the long_message of this Error. # noqa: E501
A more detailed explanation of the error. # noqa: E501
:return: The long_message of this Error. # noqa: E501
:rtype: str
"""
return self._long_message
@long_message.setter
def long_message(self, long_message):
"""Sets the long_message of this Error.
A more detailed explanation of the error. # noqa: E501
:param long_message: The long_message of this Error. # noqa: E501
:type: str
"""
self._long_message = long_message
@property
def message(self):
"""Gets the message of this Error. # noqa: E501
Information on how to correct the problem, in the end user's terms and language where applicable. # noqa: E501
:return: The message of this Error. # noqa: E501
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""Sets the message of this Error.
Information on how to correct the problem, in the end user's terms and language where applicable. # noqa: E501
:param message: The message of this Error. # noqa: E501
:type: str
"""
self._message = message
@property
def output_ref_ids(self):
"""Gets the output_ref_ids of this Error. # noqa: E501
An array of request elements most closely associated to the error. # noqa: E501
:return: The output_ref_ids of this Error. # noqa: E501
:rtype: list[str]
"""
return self._output_ref_ids
@output_ref_ids.setter
def output_ref_ids(self, output_ref_ids):
"""Sets the output_ref_ids of this Error.
An array of request elements most closely associated to the error. # noqa: E501
:param output_ref_ids: The output_ref_ids of this Error. # noqa: E501
:type: list[str]
"""
self._output_ref_ids = output_ref_ids
@property
def parameters(self):
"""Gets the parameters of this Error. # noqa: E501
An array of name/value pairs that describe details the error condition. These are useful when multiple errors are returned. # noqa: E501
:return: The parameters of this Error. # noqa: E501
:rtype: list[ErrorParameter]
"""
return self._parameters
@parameters.setter
def parameters(self, parameters):
"""Sets the parameters of this Error.
An array of name/value pairs that describe details the error condition. These are useful when multiple errors are returned. # noqa: E501
:param parameters: The parameters of this Error. # noqa: E501
:type: list[ErrorParameter]
"""
self._parameters = parameters
@property
def subdomain(self):
"""Gets the subdomain of this Error. # noqa: E501
Further helps indicate which subsystem the error is coming from. System subcategories include: Initialization, Serialization, Security, Monitoring, Rate Limiting, etc. # noqa: E501
:return: The subdomain of this Error. # noqa: E501
:rtype: str
"""
return self._subdomain
@subdomain.setter
def subdomain(self, subdomain):
"""Sets the subdomain of this Error.
Further helps indicate which subsystem the error is coming from. System subcategories include: Initialization, Serialization, Security, Monitoring, Rate Limiting, etc. # noqa: E501
:param subdomain: The subdomain of this Error. # noqa: E501
:type: str
"""
self._subdomain = subdomain
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Error, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Error):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | b402a33ed117c7433007adbb132ef7761e394bef | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from operator import itemgetter
import time
from openerp.osv import fields, osv
from openerp import api
class account_fiscal_position(osv.osv):
_name = 'account.fiscal.position'
_description = 'Fiscal Position'
_order = 'sequence'
_columns = {
'sequence': fields.integer('Sequence'),
'name': fields.char('Fiscal Position', required=True),
'active': fields.boolean('Active', help="By unchecking the active field, you may hide a fiscal position without deleting it."),
'company_id': fields.many2one('res.company', 'Company'),
'account_ids': fields.one2many('account.fiscal.position.account', 'position_id', 'Account Mapping', copy=True),
'tax_ids': fields.one2many('account.fiscal.position.tax', 'position_id', 'Tax Mapping', copy=True),
'note': fields.text('Notes'),
'auto_apply': fields.boolean('Automatic', help="Apply automatically this fiscal position."),
'vat_required': fields.boolean('VAT required', help="Apply only if partner has a VAT number."),
'country_id': fields.many2one('res.country', 'Countries', help="Apply only if delivery or invoicing country match."),
'country_group_id': fields.many2one('res.country.group', 'Country Group', help="Apply only if delivery or invocing country match the group."),
}
_defaults = {
'active': True,
}
def _check_country(self, cr, uid, ids, context=None):
obj = self.browse(cr, uid, ids[0], context=context)
if obj.country_id and obj.country_group_id:
return False
return True
_constraints = [
(_check_country, 'You can not select a country and a group of countries', ['country_id', 'country_group_id']),
]
@api.v7
def map_tax(self, cr, uid, fposition_id, taxes, context=None):
if not taxes:
return []
if not fposition_id:
return map(lambda x: x.id, taxes)
result = set()
for t in taxes:
ok = False
for tax in fposition_id.tax_ids:
if tax.tax_src_id.id == t.id:
if tax.tax_dest_id:
result.add(tax.tax_dest_id.id)
ok=True
if not ok:
result.add(t.id)
return list(result)
@api.v8 # noqa
def map_tax(self, taxes):
result = self.env['account.tax'].browse()
for tax in taxes:
for t in self.tax_ids:
if t.tax_src_id == tax:
if t.tax_dest_id:
result |= t.tax_dest_id
break
else:
result |= tax
return result
@api.v7
def map_account(self, cr, uid, fposition_id, account_id, context=None):
if not fposition_id:
return account_id
for pos in fposition_id.account_ids:
if pos.account_src_id.id == account_id:
account_id = pos.account_dest_id.id
break
return account_id
@api.v8
def map_account(self, account):
for pos in self.account_ids:
if pos.account_src_id == account:
return pos.account_dest_id
return account
def get_fiscal_position(self, cr, uid, company_id, partner_id, delivery_id=None, context=None):
if not partner_id:
return False
# This can be easily overriden to apply more complex fiscal rules
part_obj = self.pool['res.partner']
partner = part_obj.browse(cr, uid, partner_id, context=context)
# partner manually set fiscal position always win
if partner.property_account_position:
return partner.property_account_position.id
# if no delivery use invocing
if delivery_id:
delivery = part_obj.browse(cr, uid, delivery_id, context=context)
else:
delivery = partner
domain = [
('auto_apply', '=', True),
'|', ('vat_required', '=', False), ('vat_required', '=', partner.vat_subjected),
]
fiscal_position_ids = self.search(cr, uid, domain + [('country_id', '=', delivery.country_id.id)], context=context, limit=1)
if fiscal_position_ids:
return fiscal_position_ids[0]
fiscal_position_ids = self.search(cr, uid, domain + [('country_group_id.country_ids', '=', delivery.country_id.id)], context=context, limit=1)
if fiscal_position_ids:
return fiscal_position_ids[0]
fiscal_position_ids = self.search(cr, uid, domain + [('country_id', '=', None), ('country_group_id', '=', None)], context=context, limit=1)
if fiscal_position_ids:
return fiscal_position_ids[0]
return False
class account_fiscal_position_tax(osv.osv):
_name = 'account.fiscal.position.tax'
_description = 'Taxes Fiscal Position'
_rec_name = 'position_id'
_columns = {
'position_id': fields.many2one('account.fiscal.position', 'Fiscal Position', required=True, ondelete='cascade'),
'tax_src_id': fields.many2one('account.tax', 'Tax Source', required=True),
'tax_dest_id': fields.many2one('account.tax', 'Replacement Tax')
}
_sql_constraints = [
('tax_src_dest_uniq',
'unique (position_id,tax_src_id,tax_dest_id)',
'A tax fiscal position could be defined only once time on same taxes.')
]
class account_fiscal_position_account(osv.osv):
_name = 'account.fiscal.position.account'
_description = 'Accounts Fiscal Position'
_rec_name = 'position_id'
_columns = {
'position_id': fields.many2one('account.fiscal.position', 'Fiscal Position', required=True, ondelete='cascade'),
'account_src_id': fields.many2one('account.account', 'Account Source', domain=[('type','<>','view')], required=True),
'account_dest_id': fields.many2one('account.account', 'Account Destination', domain=[('type','<>','view')], required=True)
}
_sql_constraints = [
('account_src_dest_uniq',
'unique (position_id,account_src_id,account_dest_id)',
'An account fiscal position could be defined only once time on same accounts.')
]
class res_partner(osv.osv):
_name = 'res.partner'
_inherit = 'res.partner'
_description = 'Partner'
def _credit_debit_get(self, cr, uid, ids, field_names, arg, context=None):
ctx = context.copy()
ctx['all_fiscalyear'] = True
query = self.pool.get('account.move.line')._query_get(cr, uid, context=ctx)
cr.execute("""SELECT l.partner_id, a.type, SUM(l.debit-l.credit)
FROM account_move_line l
LEFT JOIN account_account a ON (l.account_id=a.id)
WHERE a.type IN ('receivable','payable')
AND l.partner_id IN %s
AND l.reconcile_id IS NULL
AND """ + query + """
GROUP BY l.partner_id, a.type
""",
(tuple(ids),))
maps = {'receivable':'credit', 'payable':'debit' }
res = {}
for id in ids:
res[id] = {}.fromkeys(field_names, 0)
for pid,type,val in cr.fetchall():
if val is None: val=0
res[pid][maps[type]] = (type=='receivable') and val or -val
return res
def _asset_difference_search(self, cr, uid, obj, name, type, args, context=None):
if not args:
return []
having_values = tuple(map(itemgetter(2), args))
where = ' AND '.join(
map(lambda x: '(SUM(bal2) %(operator)s %%s)' % {
'operator':x[1]},args))
query = self.pool.get('account.move.line')._query_get(cr, uid, context=context)
cr.execute(('SELECT pid AS partner_id, SUM(bal2) FROM ' \
'(SELECT CASE WHEN bal IS NOT NULL THEN bal ' \
'ELSE 0.0 END AS bal2, p.id as pid FROM ' \
'(SELECT (debit-credit) AS bal, partner_id ' \
'FROM account_move_line l ' \
'WHERE account_id IN ' \
'(SELECT id FROM account_account '\
'WHERE type=%s AND active) ' \
'AND reconcile_id IS NULL ' \
'AND '+query+') AS l ' \
'RIGHT JOIN res_partner p ' \
'ON p.id = partner_id ) AS pl ' \
'GROUP BY pid HAVING ' + where),
(type,) + having_values)
res = cr.fetchall()
if not res:
return [('id','=','0')]
return [('id','in',map(itemgetter(0), res))]
def _credit_search(self, cr, uid, obj, name, args, context=None):
return self._asset_difference_search(cr, uid, obj, name, 'receivable', args, context=context)
def _debit_search(self, cr, uid, obj, name, args, context=None):
return self._asset_difference_search(cr, uid, obj, name, 'payable', args, context=context)
def _invoice_total(self, cr, uid, ids, field_name, arg, context=None):
result = {}
account_invoice_report = self.pool.get('account.invoice.report')
for partner in self.browse(cr, uid, ids, context=context):
domain = [('partner_id', 'child_of', partner.id)]
invoice_ids = account_invoice_report.search(cr, uid, domain, context=context)
invoices = account_invoice_report.browse(cr, uid, invoice_ids, context=context)
result[partner.id] = sum(inv.user_currency_price_total for inv in invoices)
return result
def _journal_item_count(self, cr, uid, ids, field_name, arg, context=None):
MoveLine = self.pool('account.move.line')
AnalyticAccount = self.pool('account.analytic.account')
return {
partner_id: {
'journal_item_count': MoveLine.search_count(cr, uid, [('partner_id', '=', partner_id)], context=context),
'contracts_count': AnalyticAccount.search_count(cr,uid, [('partner_id', '=', partner_id)], context=context)
}
for partner_id in ids
}
def has_something_to_reconcile(self, cr, uid, partner_id, context=None):
'''
at least a debit, a credit and a line older than the last reconciliation date of the partner
'''
cr.execute('''
SELECT l.partner_id, SUM(l.debit) AS debit, SUM(l.credit) AS credit
FROM account_move_line l
RIGHT JOIN account_account a ON (a.id = l.account_id)
RIGHT JOIN res_partner p ON (l.partner_id = p.id)
WHERE a.reconcile IS TRUE
AND p.id = %s
AND l.reconcile_id IS NULL
AND (p.last_reconciliation_date IS NULL OR l.date > p.last_reconciliation_date)
AND l.state <> 'draft'
GROUP BY l.partner_id''', (partner_id,))
res = cr.dictfetchone()
if res:
return bool(res['debit'] and res['credit'])
return False
def mark_as_reconciled(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'last_reconciliation_date': time.strftime('%Y-%m-%d %H:%M:%S')}, context=context)
_columns = {
'vat_subjected': fields.boolean('VAT Legal Statement', help="Check this box if the partner is subjected to the VAT. It will be used for the VAT legal statement."),
'credit': fields.function(_credit_debit_get,
fnct_search=_credit_search, string='Total Receivable', multi='dc', help="Total amount this customer owes you."),
'debit': fields.function(_credit_debit_get, fnct_search=_debit_search, string='Total Payable', multi='dc', help="Total amount you have to pay to this supplier."),
'debit_limit': fields.float('Payable Limit'),
'total_invoiced': fields.function(_invoice_total, string="Total Invoiced", type='float', groups='account.group_account_invoice'),
'contracts_count': fields.function(_journal_item_count, string="Contracts", type='integer', multi="invoice_journal"),
'journal_item_count': fields.function(_journal_item_count, string="Journal Items", type="integer", multi="invoice_journal"),
'property_account_payable': fields.property(
type='many2one',
relation='account.account',
string="Account Payable",
domain="[('type', '=', 'payable')]",
help="This account will be used instead of the default one as the payable account for the current partner",
required=True),
'property_account_receivable': fields.property(
type='many2one',
relation='account.account',
string="Account Receivable",
domain="[('type', '=', 'receivable')]",
help="This account will be used instead of the default one as the receivable account for the current partner",
required=True),
'property_account_position': fields.property(
type='many2one',
relation='account.fiscal.position',
string="Fiscal Position",
help="The fiscal position will determine taxes and accounts used for the partner.",
),
'property_payment_term': fields.property(
type='many2one',
relation='account.payment.term',
string ='Customer Payment Term',
help="This payment term will be used instead of the default one for sale orders and customer invoices"),
'property_supplier_payment_term': fields.property(
type='many2one',
relation='account.payment.term',
string ='Supplier Payment Term',
help="This payment term will be used instead of the default one for purchase orders and supplier invoices"),
'ref_companies': fields.one2many('res.company', 'partner_id',
'Companies that refers to partner'),
'last_reconciliation_date': fields.datetime(
'Latest Full Reconciliation Date', copy=False,
help='Date on which the partner accounting entries were fully reconciled last time. '
'It differs from the last date where a reconciliation has been made for this partner, '
'as here we depict the fact that nothing more was to be reconciled at this date. '
'This can be achieved in 2 different ways: either the last unreconciled debit/credit '
'entry of this partner was reconciled, either the user pressed the button '
'"Nothing more to reconcile" during the manual reconciliation process.')
}
def _commercial_fields(self, cr, uid, context=None):
return super(res_partner, self)._commercial_fields(cr, uid, context=context) + \
['debit_limit', 'property_account_payable', 'property_account_receivable', 'property_account_position',
'property_payment_term', 'property_supplier_payment_term', 'last_reconciliation_date']
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
py | b402a3cc89102dbb83c7f86534897bc5e5645510 | from __future__ import division, print_function, absolute_import
import tflearn
from tflearn.layers.core import dropout,activation,input_data,utils,fully_connected
from tflearn.layers.estimator import regression
import json
import numpy as np
def predict(data):
input_layer = tflearn.input_data(shape=[None, 20])
dense1 = tflearn.fully_connected(input_layer, 128, activation='relu')
dropout1 = tflearn.dropout(dense1, 0.8)
dense2 = tflearn.fully_connected(dropout1, 32, activation='relu')
dropout2 = tflearn.dropout(dense2, 0.8)
softmax = tflearn.fully_connected(dropout2, 2, activation='softmax')
sgd = tflearn.SGD(learning_rate=0.1, lr_decay=0.96, decay_step=1000)
top_k = tflearn.metrics.Top_k(3)
net = tflearn.regression(softmax, optimizer='adam',loss='categorical_crossentropy')
model = tflearn.DNN(net)
model.load("/home/vasu/HINT/backend/src/job_hell/utlis/tflearn_nn.model")
x = json.loads(data)
temp = []
for ix in x:
temp.append(float(x[ix]))
dum = np.array(temp)
dum = dum.reshape(1,20)
y = model.predict_label(dum)
return np.argmax(y) |
py | b402a42f4abce46e3f7bf55ee9b1dd47e4f1c90e | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import packaging.version
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.dialogflow_v2.services.fulfillments import FulfillmentsAsyncClient
from google.cloud.dialogflow_v2.services.fulfillments import FulfillmentsClient
from google.cloud.dialogflow_v2.services.fulfillments import transports
from google.cloud.dialogflow_v2.services.fulfillments.transports.base import (
_GOOGLE_AUTH_VERSION,
)
from google.cloud.dialogflow_v2.types import fulfillment
from google.cloud.dialogflow_v2.types import fulfillment as gcd_fulfillment
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2 # type: ignore
import google.auth
# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively
# through google-api-core:
# - Delete the auth "less than" test cases
# - Delete these pytest markers (Make the "greater than or equal to" tests the default).
requires_google_auth_lt_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"),
reason="This test requires google-auth < 1.25.0",
)
requires_google_auth_gte_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"),
reason="This test requires google-auth >= 1.25.0",
)
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert FulfillmentsClient._get_default_mtls_endpoint(None) is None
assert (
FulfillmentsClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
)
assert (
FulfillmentsClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
FulfillmentsClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
FulfillmentsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert FulfillmentsClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize("client_class", [FulfillmentsClient, FulfillmentsAsyncClient,])
def test_fulfillments_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "dialogflow.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.FulfillmentsGrpcTransport, "grpc"),
(transports.FulfillmentsGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_fulfillments_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize("client_class", [FulfillmentsClient, FulfillmentsAsyncClient,])
def test_fulfillments_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "dialogflow.googleapis.com:443"
def test_fulfillments_client_get_transport_class():
transport = FulfillmentsClient.get_transport_class()
available_transports = [
transports.FulfillmentsGrpcTransport,
]
assert transport in available_transports
transport = FulfillmentsClient.get_transport_class("grpc")
assert transport == transports.FulfillmentsGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(FulfillmentsClient, transports.FulfillmentsGrpcTransport, "grpc"),
(
FulfillmentsAsyncClient,
transports.FulfillmentsGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
FulfillmentsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FulfillmentsClient)
)
@mock.patch.object(
FulfillmentsAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(FulfillmentsAsyncClient),
)
def test_fulfillments_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(FulfillmentsClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(FulfillmentsClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class()
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(FulfillmentsClient, transports.FulfillmentsGrpcTransport, "grpc", "true"),
(
FulfillmentsAsyncClient,
transports.FulfillmentsGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(FulfillmentsClient, transports.FulfillmentsGrpcTransport, "grpc", "false"),
(
FulfillmentsAsyncClient,
transports.FulfillmentsGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
FulfillmentsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FulfillmentsClient)
)
@mock.patch.object(
FulfillmentsAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(FulfillmentsAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_fulfillments_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(FulfillmentsClient, transports.FulfillmentsGrpcTransport, "grpc"),
(
FulfillmentsAsyncClient,
transports.FulfillmentsGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_fulfillments_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(FulfillmentsClient, transports.FulfillmentsGrpcTransport, "grpc"),
(
FulfillmentsAsyncClient,
transports.FulfillmentsGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_fulfillments_client_client_options_credentials_file(
client_class, transport_class, transport_name
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_fulfillments_client_client_options_from_dict():
with mock.patch(
"google.cloud.dialogflow_v2.services.fulfillments.transports.FulfillmentsGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = FulfillmentsClient(client_options={"api_endpoint": "squid.clam.whelk"})
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_get_fulfillment(
transport: str = "grpc", request_type=fulfillment.GetFulfillmentRequest
):
client = FulfillmentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_fulfillment), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = fulfillment.Fulfillment(
name="name_value",
display_name="display_name_value",
enabled=True,
generic_web_service=fulfillment.Fulfillment.GenericWebService(
uri="uri_value"
),
)
response = client.get_fulfillment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == fulfillment.GetFulfillmentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, fulfillment.Fulfillment)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.enabled is True
def test_get_fulfillment_from_dict():
test_get_fulfillment(request_type=dict)
def test_get_fulfillment_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = FulfillmentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_fulfillment), "__call__") as call:
client.get_fulfillment()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == fulfillment.GetFulfillmentRequest()
@pytest.mark.asyncio
async def test_get_fulfillment_async(
transport: str = "grpc_asyncio", request_type=fulfillment.GetFulfillmentRequest
):
client = FulfillmentsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_fulfillment), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
fulfillment.Fulfillment(
name="name_value", display_name="display_name_value", enabled=True,
)
)
response = await client.get_fulfillment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == fulfillment.GetFulfillmentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, fulfillment.Fulfillment)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.enabled is True
@pytest.mark.asyncio
async def test_get_fulfillment_async_from_dict():
await test_get_fulfillment_async(request_type=dict)
def test_get_fulfillment_field_headers():
client = FulfillmentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = fulfillment.GetFulfillmentRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_fulfillment), "__call__") as call:
call.return_value = fulfillment.Fulfillment()
client.get_fulfillment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_fulfillment_field_headers_async():
client = FulfillmentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = fulfillment.GetFulfillmentRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_fulfillment), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
fulfillment.Fulfillment()
)
await client.get_fulfillment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_fulfillment_flattened():
client = FulfillmentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_fulfillment), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = fulfillment.Fulfillment()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_fulfillment(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
def test_get_fulfillment_flattened_error():
client = FulfillmentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_fulfillment(
fulfillment.GetFulfillmentRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_fulfillment_flattened_async():
client = FulfillmentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_fulfillment), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = fulfillment.Fulfillment()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
fulfillment.Fulfillment()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_fulfillment(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_get_fulfillment_flattened_error_async():
client = FulfillmentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_fulfillment(
fulfillment.GetFulfillmentRequest(), name="name_value",
)
def test_update_fulfillment(
transport: str = "grpc", request_type=gcd_fulfillment.UpdateFulfillmentRequest
):
client = FulfillmentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_fulfillment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_fulfillment.Fulfillment(
name="name_value",
display_name="display_name_value",
enabled=True,
generic_web_service=gcd_fulfillment.Fulfillment.GenericWebService(
uri="uri_value"
),
)
response = client.update_fulfillment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_fulfillment.UpdateFulfillmentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcd_fulfillment.Fulfillment)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.enabled is True
def test_update_fulfillment_from_dict():
test_update_fulfillment(request_type=dict)
def test_update_fulfillment_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = FulfillmentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_fulfillment), "__call__"
) as call:
client.update_fulfillment()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_fulfillment.UpdateFulfillmentRequest()
@pytest.mark.asyncio
async def test_update_fulfillment_async(
transport: str = "grpc_asyncio",
request_type=gcd_fulfillment.UpdateFulfillmentRequest,
):
client = FulfillmentsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_fulfillment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcd_fulfillment.Fulfillment(
name="name_value", display_name="display_name_value", enabled=True,
)
)
response = await client.update_fulfillment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_fulfillment.UpdateFulfillmentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcd_fulfillment.Fulfillment)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.enabled is True
@pytest.mark.asyncio
async def test_update_fulfillment_async_from_dict():
await test_update_fulfillment_async(request_type=dict)
def test_update_fulfillment_field_headers():
client = FulfillmentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcd_fulfillment.UpdateFulfillmentRequest()
request.fulfillment.name = "fulfillment.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_fulfillment), "__call__"
) as call:
call.return_value = gcd_fulfillment.Fulfillment()
client.update_fulfillment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "fulfillment.name=fulfillment.name/value",) in kw[
"metadata"
]
@pytest.mark.asyncio
async def test_update_fulfillment_field_headers_async():
client = FulfillmentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcd_fulfillment.UpdateFulfillmentRequest()
request.fulfillment.name = "fulfillment.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_fulfillment), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcd_fulfillment.Fulfillment()
)
await client.update_fulfillment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "fulfillment.name=fulfillment.name/value",) in kw[
"metadata"
]
def test_update_fulfillment_flattened():
client = FulfillmentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_fulfillment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_fulfillment.Fulfillment()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_fulfillment(
fulfillment=gcd_fulfillment.Fulfillment(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].fulfillment == gcd_fulfillment.Fulfillment(name="name_value")
assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"])
def test_update_fulfillment_flattened_error():
client = FulfillmentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_fulfillment(
gcd_fulfillment.UpdateFulfillmentRequest(),
fulfillment=gcd_fulfillment.Fulfillment(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_fulfillment_flattened_async():
client = FulfillmentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_fulfillment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcd_fulfillment.Fulfillment()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcd_fulfillment.Fulfillment()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_fulfillment(
fulfillment=gcd_fulfillment.Fulfillment(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].fulfillment == gcd_fulfillment.Fulfillment(name="name_value")
assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"])
@pytest.mark.asyncio
async def test_update_fulfillment_flattened_error_async():
client = FulfillmentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_fulfillment(
gcd_fulfillment.UpdateFulfillmentRequest(),
fulfillment=gcd_fulfillment.Fulfillment(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.FulfillmentsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = FulfillmentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.FulfillmentsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = FulfillmentsClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide scopes and a transport instance.
transport = transports.FulfillmentsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = FulfillmentsClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.FulfillmentsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = FulfillmentsClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.FulfillmentsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.FulfillmentsGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.FulfillmentsGrpcTransport,
transports.FulfillmentsGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = FulfillmentsClient(credentials=ga_credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.FulfillmentsGrpcTransport,)
def test_fulfillments_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.FulfillmentsTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_fulfillments_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.dialogflow_v2.services.fulfillments.transports.FulfillmentsTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.FulfillmentsTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"get_fulfillment",
"update_fulfillment",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
@requires_google_auth_gte_1_25_0
def test_fulfillments_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.dialogflow_v2.services.fulfillments.transports.FulfillmentsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.FulfillmentsTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id="octopus",
)
@requires_google_auth_lt_1_25_0
def test_fulfillments_base_transport_with_credentials_file_old_google_auth():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.dialogflow_v2.services.fulfillments.transports.FulfillmentsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.FulfillmentsTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id="octopus",
)
def test_fulfillments_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.dialogflow_v2.services.fulfillments.transports.FulfillmentsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.FulfillmentsTransport()
adc.assert_called_once()
@requires_google_auth_gte_1_25_0
def test_fulfillments_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
FulfillmentsClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id=None,
)
@requires_google_auth_lt_1_25_0
def test_fulfillments_auth_adc_old_google_auth():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
FulfillmentsClient()
adc.assert_called_once_with(
scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.FulfillmentsGrpcTransport,
transports.FulfillmentsGrpcAsyncIOTransport,
],
)
@requires_google_auth_gte_1_25_0
def test_fulfillments_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class",
[
transports.FulfillmentsGrpcTransport,
transports.FulfillmentsGrpcAsyncIOTransport,
],
)
@requires_google_auth_lt_1_25_0
def test_fulfillments_transport_auth_adc_old_google_auth(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus")
adc.assert_called_once_with(
scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.FulfillmentsGrpcTransport, grpc_helpers),
(transports.FulfillmentsGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_fulfillments_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"dialogflow.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
scopes=["1", "2"],
default_host="dialogflow.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[transports.FulfillmentsGrpcTransport, transports.FulfillmentsGrpcAsyncIOTransport],
)
def test_fulfillments_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_fulfillments_host_no_port():
client = FulfillmentsClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="dialogflow.googleapis.com"
),
)
assert client.transport._host == "dialogflow.googleapis.com:443"
def test_fulfillments_host_with_port():
client = FulfillmentsClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="dialogflow.googleapis.com:8000"
),
)
assert client.transport._host == "dialogflow.googleapis.com:8000"
def test_fulfillments_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.FulfillmentsGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_fulfillments_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.FulfillmentsGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.FulfillmentsGrpcTransport, transports.FulfillmentsGrpcAsyncIOTransport],
)
def test_fulfillments_transport_channel_mtls_with_client_cert_source(transport_class):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.FulfillmentsGrpcTransport, transports.FulfillmentsGrpcAsyncIOTransport],
)
def test_fulfillments_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_fulfillment_path():
project = "squid"
expected = "projects/{project}/agent/fulfillment".format(project=project,)
actual = FulfillmentsClient.fulfillment_path(project)
assert expected == actual
def test_parse_fulfillment_path():
expected = {
"project": "clam",
}
path = FulfillmentsClient.fulfillment_path(**expected)
# Check that the path construction is reversible.
actual = FulfillmentsClient.parse_fulfillment_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "whelk"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = FulfillmentsClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "octopus",
}
path = FulfillmentsClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = FulfillmentsClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "oyster"
expected = "folders/{folder}".format(folder=folder,)
actual = FulfillmentsClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "nudibranch",
}
path = FulfillmentsClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = FulfillmentsClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "cuttlefish"
expected = "organizations/{organization}".format(organization=organization,)
actual = FulfillmentsClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "mussel",
}
path = FulfillmentsClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = FulfillmentsClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "winkle"
expected = "projects/{project}".format(project=project,)
actual = FulfillmentsClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "nautilus",
}
path = FulfillmentsClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = FulfillmentsClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "scallop"
location = "abalone"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = FulfillmentsClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "squid",
"location": "clam",
}
path = FulfillmentsClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = FulfillmentsClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.FulfillmentsTransport, "_prep_wrapped_messages"
) as prep:
client = FulfillmentsClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.FulfillmentsTransport, "_prep_wrapped_messages"
) as prep:
transport_class = FulfillmentsClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = FulfillmentsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = FulfillmentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = FulfillmentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
|
py | b402a64499fd09f5328cce0ed16c7f47a5999d60 | #
# Copyright (c) 2011-2014 Exxeleron GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import struct
try:
from cStringIO import BytesIO
except ImportError:
from io import BytesIO
from qpython import MetaData, CONVERSION_OPTIONS
from qpython.qtype import * # @UnusedWildImport
from qpython.qcollection import qlist, QList, QTemporalList, QDictionary, QTable, QKeyedTable, get_list_qtype
from qpython.qtemporal import QTemporal, to_raw_qtemporal, array_to_raw_qtemporal
class QWriterException(Exception):
'''
Indicates an error raised during data serialization.
'''
pass
ENDIANNESS = '\1' if sys.byteorder == 'little' else '\0'
class QWriter(object):
'''
Provides serialization to q IPC protocol.
:Parameters:
- `stream` (`socket` or `None`) - stream for data serialization
- `protocol_version` (`integer`) - version IPC protocol
- `encoding` (`string`) - encoding for characters serialization
:Attrbutes:
- `_writer_map` - stores mapping between Python types and functions
responsible for serializing into IPC representation
'''
_writer_map = {}
serialize = Mapper(_writer_map)
def __init__(self, stream, protocol_version, encoding = 'latin-1'):
self._stream = stream
self._protocol_version = protocol_version
self._encoding = encoding
def write(self, data, msg_type, **options):
'''Serializes and pushes single data object to a wrapped stream.
:Parameters:
- `data` - data to be serialized
- `msg_type` (one of the constants defined in :class:`.MessageType`) -
type of the message
:Options:
- `single_char_strings` (`boolean`) - if ``True`` single char Python
strings are encoded as q strings instead of chars,
**Default**: ``False``
:returns: if wraped stream is ``None`` serialized data,
otherwise ``None``
'''
self._buffer = BytesIO()
self._options = MetaData(**CONVERSION_OPTIONS.union_dict(**options))
# header and placeholder for message size
self._buffer.write(('%s%s\0\0\0\0\0\0' % (ENDIANNESS, chr(msg_type))).encode(self._encoding))
self._write(data)
# update message size
data_size = self._buffer.tell()
self._buffer.seek(4)
self._buffer.write(struct.pack('i', data_size))
# write data to socket
if self._stream:
self._stream.sendall(self._buffer.getvalue())
else:
return self._buffer.getvalue()
def _write(self, data):
if data is None:
self._write_null()
else:
if isinstance(data, Exception) or (type(data) == type and issubclass(data, Exception)):
data_type = Exception
else:
data_type = type(data)
writer = self._get_writer(data_type)
if writer:
writer(self, data)
else:
qtype = Q_TYPE.get(type(data), None)
if qtype:
self._write_atom(data, qtype)
else:
raise QWriterException('Unable to serialize type: %s' % data.__class__ if isinstance(data, object) else type(data))
def _get_writer(self, data_type):
return self._writer_map.get(data_type, None)
def _write_null(self):
self._buffer.write(struct.pack('=bx', QNULL))
@serialize(Exception)
def _write_error(self, data):
self._buffer.write(struct.pack('b', QERROR))
if isinstance(data, Exception):
msg = data.__class__.__name__
if data.args:
msg = data.args[0]
else:
msg = data.__name__
self._buffer.write(msg.encode(self._encoding))
self._buffer.write(b'\0')
def _write_atom(self, data, qtype):
try:
self._buffer.write(struct.pack('b', qtype))
fmt = STRUCT_MAP[qtype]
if type(data) == numpy.bool_:
self._buffer.write(struct.pack(fmt, int(data)))
else:
self._buffer.write(struct.pack(fmt, data))
except KeyError:
raise QWriterException('Unable to serialize type: %s' % data.__class__ if isinstance(data, object) else type(data))
@serialize(tuple, list)
def _write_generic_list(self, data):
self._buffer.write(struct.pack('=bxi', QGENERAL_LIST, len(data)))
for element in data:
self._write(element)
@serialize(str, bytes)
def _write_string(self, data):
if not self._options.single_char_strings and len(data) == 1:
self._write_atom(ord(data), QCHAR)
else:
self._buffer.write(struct.pack('=bxi', QSTRING, len(data)))
if isinstance(data, str):
self._buffer.write(data.encode(self._encoding))
else:
self._buffer.write(data)
@serialize(numpy.string_)
def _write_symbol(self, data):
self._buffer.write(struct.pack('=b', QSYMBOL))
if data:
self._buffer.write(data)
self._buffer.write(b'\0')
@serialize(uuid.UUID)
def _write_guid(self, data):
if self._protocol_version < 3:
raise QWriterException('kdb+ protocol version violation: Guid not supported pre kdb+ v3.0')
self._buffer.write(struct.pack('=b', QGUID))
self._buffer.write(data.bytes)
@serialize(QTemporal)
def _write_temporal(self, data):
try:
if self._protocol_version < 1 and (data.meta.qtype == QTIMESPAN or data.meta.qtype == QTIMESTAMP):
raise QWriterException('kdb+ protocol version violation: data type %s not supported pre kdb+ v2.6' % hex(data.meta.qtype))
self._buffer.write(struct.pack('=b', data.meta.qtype))
fmt = STRUCT_MAP[data.meta.qtype]
self._buffer.write(struct.pack(fmt, to_raw_qtemporal(data.raw, data.meta.qtype)))
except KeyError:
raise QWriterException('Unable to serialize type: %s' % type(data))
@serialize(numpy.datetime64, numpy.timedelta64)
def _write_numpy_temporal(self, data):
try:
qtype = TEMPORAL_PY_TYPE[str(data.dtype)]
if self._protocol_version < 1 and (qtype == QTIMESPAN or qtype == QTIMESTAMP):
raise QWriterException('kdb+ protocol version violation: data type %s not supported pre kdb+ v2.6' % hex(qtype))
self._buffer.write(struct.pack('=b', qtype))
fmt = STRUCT_MAP[qtype]
self._buffer.write(struct.pack(fmt, to_raw_qtemporal(data, qtype)))
except KeyError:
raise QWriterException('Unable to serialize type: %s' % data.dtype)
@serialize(QLambda)
def _write_lambda(self, data):
self._buffer.write(struct.pack('=b', QLAMBDA))
self._buffer.write(b'\0')
self._write_string(data.expression)
@serialize(QProjection)
def _write_projection(self, data):
self._buffer.write(struct.pack('=bi', QPROJECTION, len(data.parameters)))
for parameter in data.parameters:
self._write(parameter)
@serialize(QDictionary, QKeyedTable)
def _write_dictionary(self, data):
self._buffer.write(struct.pack('=b', QDICTIONARY))
self._write(data.keys)
self._write(data.values)
@serialize(QTable)
def _write_table(self, data):
self._buffer.write(struct.pack('=bxb', QTABLE, QDICTIONARY))
self._write(qlist(numpy.array(data.dtype.names), qtype = QSYMBOL_LIST))
self._buffer.write(struct.pack('=bxi', QGENERAL_LIST, len(data.dtype)))
for column in data.dtype.names:
self._write_list(data[column], data.meta[column])
@serialize(numpy.ndarray, QList, QTemporalList)
def _write_list(self, data, qtype = None):
if qtype is not None:
qtype = -abs(qtype)
if qtype is None:
qtype = get_list_qtype(data)
if self._protocol_version < 1 and (abs(qtype) == QTIMESPAN_LIST or abs(qtype) == QTIMESTAMP_LIST):
raise QWriterException('kdb+ protocol version violation: data type %s not supported pre kdb+ v2.6' % hex(data.meta.qtype))
if qtype == QGENERAL_LIST:
self._write_generic_list(data)
elif qtype == QCHAR:
self._write_string(data.tobytes())
else:
self._buffer.write(struct.pack('=bxi', -qtype, len(data)))
if data.dtype.type in (numpy.datetime64, numpy.timedelta64):
# convert numpy temporal to raw q temporal
data = array_to_raw_qtemporal(data, qtype = qtype)
if qtype == QSYMBOL:
for symbol in data:
if symbol:
self._buffer.write(symbol)
self._buffer.write(b'\0')
elif qtype == QGUID:
if self._protocol_version < 3:
raise QWriterException('kdb+ protocol version violation: Guid not supported pre kdb+ v3.0')
for guid in data:
self._buffer.write(guid.bytes)
else:
self._buffer.write(data.tobytes())
|
py | b402a79b5f61462debcc94db0d17b102bf4d8b9f | from .basemodule import BaseModule |
py | b402a7f5ebd59ada5ae94c3f419377a281bf5dc3 | from __future__ import absolute_import
from .base import Struct
import os.path as op
import six
##
# 16.06.2005, c
class Reader( Struct ):
"""
Reads and executes a Python file as a script with execfile(), storing its
locals. Then sets the __dict__ of a new instance of obj_class to the stored
locals.
Example:
>>> class A:
>>> pass
>>> read = Reader( '.' )
>>> instance_of_a = read( A, 'file.py' )
It is equivalent to:
>>> mod = __import__( 'file' )
>>> instance_of_a = A()
>>> instance_of_a.__dict__.update( mod.__dict__ )
The first way does not create the 'file.pyc'...
"""
##
# 16.06.2005, c
def __init__( self, directory ):
self.directory = directory
##
# 16.06.2005, c
# 17.10.2005
# 09.02.2006
def __call__( self, obj_class, name ):
filename = op.join( self.directory, name + '.py' )
aux = {}
execfile( filename, {}, aux )
obj = obj_class()
for key, val in six.iteritems(aux):
obj.__dict__[key] = val
return obj
|
py | b402a834c10ffb54e5a871d8c019b2bad7056cb3 | from allennlp_models.rc.qanet.drop_reader import DropReader
from allennlp_models.rc.qanet.naqanet_model import NumericallyAugmentedQaNet
from allennlp_models.rc.qanet.qanet_model import QaNet
from allennlp_models.rc.qanet.qanet_encoder import QaNetEncoder
from allennlp_models.rc.qanet.qanet_encoder import QaNetEncoderBlock
|
py | b402a8c6e8c36d29c95f7bc35f7774e8e74bfd34 | import graphene
from graphene import relay
from ...page import models
from ..core.connection import CountableDjangoObjectType
from ..translations.enums import LanguageCodeEnum
from ..translations.resolvers import resolve_translation
from ..translations.types import PageTranslation
class Page(CountableDjangoObjectType):
available_on = graphene.Date(
deprecation_reason=("availableOn is deprecated, use publicationDate instead")
)
is_visible = graphene.Boolean(
deprecation_reason=("isVisible is deprecated, use isPublished instead")
)
translation = graphene.Field(
PageTranslation,
language_code=graphene.Argument(
LanguageCodeEnum,
description="A language code to return the translation for.",
required=True,
),
description=("Returns translated Page fields for the given language code."),
resolver=resolve_translation,
)
class Meta:
description = """A static page that can be manually added by a shop
operator through the dashboard."""
only_fields = [
"content",
"content_json",
"created",
"id",
"is_published",
"publication_date",
"seo_description",
"seo_title",
"slug",
"title",
]
interfaces = [relay.Node]
model = models.Page
@staticmethod
def resolve_available_on(root: models.Page, _info):
return root.publication_date
@staticmethod
def resolve_is_visible(root: models.Page, _info):
return root.is_published
|
py | b402a926326ced8406323b0807d32ffae6c7c999 |
#if customizations are required when doing a the packaging of the code for the jpackage, will generate buildnr as well
def main(j,jp):
recipe=jp.getCodeMgmtRecipe()
recipe.package(jp)
|
py | b402aa96f2a22ead078183f7e18951713d32caa2 | # Tencent is pleased to support the open source community by making ncnn available.
#
# Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.act_0 = nn.LeakyReLU()
self.act_1 = nn.LeakyReLU(negative_slope=-0.24)
def forward(self, x, y, z, w):
x = self.act_0(x)
y = self.act_0(y)
z = self.act_1(z)
w = self.act_1(w)
return x, y, z, w
def test():
net = Model()
net.eval()
torch.manual_seed(0)
x = torch.rand(1, 12)
y = torch.rand(1, 12, 64)
z = torch.rand(1, 12, 24, 64)
w = torch.rand(1, 12, 24, 32, 64)
a = net(x, y, z, w)
# export torchscript
mod = torch.jit.trace(net, (x, y, z, w))
mod.save("test_nn_LeakyReLU.pt")
# torchscript to pnnx
import os
os.system("../../src/pnnx test_nn_LeakyReLU.pt inputshape=[1,12],[1,12,64],[1,12,24,64],[1,12,24,32,64]")
# ncnn inference
import test_nn_LeakyReLU_ncnn
b = test_nn_LeakyReLU_ncnn.test_inference()
for a0, b0 in zip(a, b):
if not torch.allclose(a0, b0, 1e-4, 1e-4):
return False
return True
if __name__ == "__main__":
if test():
exit(0)
else:
exit(1)
|
py | b402aaa78538f93c4064f6a4eb93127ad8c8ee8f | import numpy as np
def softmax(x):
"""Compute softmax values for each sets of scores in x.
https://stackoverflow.com/questions/34968722/how-to-implement-the-softmax-function-in-python
"""
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum() |
py | b402ac017903b32265bca6ccf5b14bfb232a412c | from adaptivefiltering.paths import load_schema
import collections
import ipywidgets
import ipywidgets_jsonschema
import jsonschema
import re
class WidgetFormWithLabels(ipywidgets_jsonschema.Form):
def __init__(self, *args, **kwargs):
"""A widget form that creates a label selection widget for arrays of strings
All other functionality is inherited from :code:`ipywidgets_jsonschema.Form`.
"""
super().__init__(*args, **kwargs)
def _construct_array(self, schema, label=None, root=False):
if "items" not in schema:
raise ipywidgets_jsonschema.form.FormError(
"Expecting 'items' key for 'array' type"
)
# Assert a number of conditions that must be true for us
# to create a label widget instead of the regular array
if (
"type" not in schema["items"]
or schema["items"]["type"] != "string"
or "maxItems" in schema["items"]
or "minItems" in schema["items"]
):
return super()._construct_array(schema, label=label, root=root)
# List of widgets for later use in VBox
widgets = []
if "title" in schema:
widgets.append(ipywidgets.Label(schema["title"]))
# Create the relevant widget
widget = ipywidgets.TagsInput(
value=[], allow_duplicates=False, tooltip=schema.get("description", None)
)
widgets.append(widget)
# Function to check a potential given pattern
def _change_checker(change):
if "pattern" in schema["items"]:
for val in change["new"]:
if not re.fullmatch(schema["items"]["pattern"], val):
widget.value = [i for i in widget.value if i != val]
widget.observe(_change_checker, names="value")
def _register_observer(h, n, t):
widget.observe(h, names=n, type=t)
def _setter(_d):
widget.value = _d
def _resetter():
if "default" in schema:
widget.value = schema["default"]
else:
widget.value = widget.trait_defaults()["value"]
_resetter()
return self.construct_element(
getter=lambda: widget.value,
setter=_setter,
resetter=_resetter,
widgets=[ipywidgets.VBox(widgets)],
register_observer=_register_observer,
)
BatchDataWidgetFormElement = collections.namedtuple(
"BatchDataWidgetFormElement",
[
"getter",
"setter",
"resetter",
"widgets",
"subelements",
"batchdata_getter",
"batchdata_setter",
"register_observer",
],
)
class BatchDataWidgetForm(WidgetFormWithLabels):
def __init__(self, *args, nobatch_keys=[], **kwargs):
"""A widget form that wraps additional batch controls around widgets
The additional controls affect all scalar fields (strings, integers, numbers).
"""
self.nobatch_keys = nobatch_keys
self.disable_batching = False
super().__init__(*args, **kwargs)
def construct_element(
self,
getter=lambda: None,
setter=lambda _: None,
resetter=lambda: None,
widgets=[],
subelements=[],
batchdata_getter=lambda: [],
batchdata_setter=lambda _: None,
register_observer=lambda h, n, t: None,
):
return BatchDataWidgetFormElement(
getter=getter,
setter=setter,
resetter=resetter,
widgets=widgets,
subelements=subelements,
batchdata_getter=batchdata_getter,
batchdata_setter=batchdata_setter,
register_observer=register_observer,
)
@property
def batchdata(self):
bdata = self._form_element.batchdata_getter()
schema = load_schema("variability.json")
jsonschema.validate(bdata, schema=schema)
return bdata
@batchdata.setter
def batchdata(self, _data):
self._form_element.batchdata_setter(_data)
def _construct_simple(self, schema, widget, label=None, root=False):
# Call the original implementation to get the basic widget
original = super()._construct_simple(schema, widget, label=label, root=root)
# If we blacklisted this part of the schema, we skip it now
if self.disable_batching:
return original
# If this is something that for some reason did not produce an input
# widget, we skip all the variablity part.
if len(original.widgets) == 0:
return original
# Create additional controls for batch processing and variability
# Two buttons that allow to create the additional input
b1 = ipywidgets.ToggleButton(
icon="layer-group", tooltip="Use a parameter batch for this parameter"
)
b2 = ipywidgets.ToggleButton(
icon="sitemap", tooltip="Add a variability to this parameter"
)
# The widget where the variablility input is specified
var = ipywidgets.Text(
tooltip="Use comma separation to specify a discrete set of parameters or dashes to define a parameter range"
)
# For persisitent variability, we also need some additional information
name = ipywidgets.Text(
tooltip="The parameter name to use for this variability. Will be displayed to the end user."
)
descr = ipywidgets.Text(
tooltip="The description of this parameter that will be displayed to the end user when hovering over the parameter."
)
# A container widget that allows us to easily make the input widget vanish
box = ipywidgets.VBox()
# The handler that unfolds the input widget if necessary
def handler(change):
# Make sure that the two toggle buttons are mutually exclusive
if b1.value and b2.value:
for b in [b1, b2]:
if b is not change.owner:
b.value = False
return
# Make sure that if either button is pressed, we display the input widget
if b1.value:
box.children = (ipywidgets.VBox([ipywidgets.Label("Values:"), var]),)
elif b2.value:
box.children = (
ipywidgets.VBox([ipywidgets.Label("Values:"), var]),
ipywidgets.VBox([ipywidgets.Label("Name:"), name]),
ipywidgets.VBox([ipywidgets.Label("Description:"), descr]),
)
else:
box.children = ()
b1.observe(handler, names="value")
b2.observe(handler, names="value")
# Modify the original widgets to also include our modifications
original.widgets[0].children[-1].layout = ipywidgets.Layout(width="70%")
b1.layout = ipywidgets.Layout(width="15%")
b2.layout = ipywidgets.Layout(width="15%")
original.widgets[0].children = original.widgets[0].children[:-1] + (
ipywidgets.HBox([original.widgets[0].children[-1], b1, b2]),
)
original.widgets[0].children = original.widgets[0].children + (box,)
# Lazy evalution of the batch data
def _getter():
ret = []
# Only record a variation if one of our buttons is pressed
if b1.value or b2.value:
ret.append(
{
"values": var.value,
"persist": b2.value,
"path": [],
"name": name.value,
"description": descr.value,
"type": schema["type"],
}
)
return ret
def _setter(_data):
assert len(_data) == 1
var.value = _data[0]["values"]
name.value = _data[0]["name"]
descr.value = _data[0]["description"]
if _data[0].get("persist", False):
b2.value = True
else:
b1.value = True
def _register_observer(h, n, t):
original.register_observer(h, n, t)
b1.observe(h, names=n, type=t)
b2.observe(h, names=n, type=t)
var.observe(h, names=n, type=t)
name.observe(h, names=n, type=t)
descr.observe(h, names=n, type=t)
def _resetter():
original.resetter()
b1.value = False
b2.value = False
var.value = ""
# Wrap the result in our new form element
return self.construct_element(
getter=original.getter,
setter=original.setter,
resetter=_resetter,
widgets=original.widgets,
batchdata_getter=_getter,
batchdata_setter=_setter,
register_observer=_register_observer,
)
def _construct_object(self, schema, label=None, root=False):
if label in self.nobatch_keys:
self.disable_batching = True
original = super()._construct_object(schema, label=label, root=root)
self.disable_batching = False
def _getter():
ret = []
# Iterate over the subelements and update their path
for key, subel in original.subelements.items():
data = subel.batchdata_getter()
for d in data:
d["path"].append({"key": key})
ret.extend(data)
return ret
def _setter(_data):
for _d in _data:
key = _d["path"][0]["key"]
_d["path"] = _d["path"][1:]
original.subelements[key].batchdata_setter([_d])
return self.construct_element(
getter=original.getter,
setter=original.setter,
resetter=original.resetter,
widgets=original.widgets,
subelements=original.subelements,
batchdata_getter=_getter,
batchdata_setter=_setter,
register_observer=original.register_observer,
)
def _construct_array(self, schema, label=None, root=False):
original = super()._construct_array(schema, label=label, root=root)
def _getter():
ret = []
for i, subel in enumerate(original.subelements[: len(original.getter())]):
data = subel.batchdata_getter()
for d in data:
d["path"].append({"index": i})
ret.extend(data)
return ret
def _setter(_data):
for _d in _data:
index = _d["path"][0]["index"]
_d["path"] = _d["path"][1:]
original.subelements[index].batchdata_setter([_d])
return self.construct_element(
getter=original.getter,
setter=original.setter,
resetter=original.resetter,
widgets=original.widgets,
subelements=original.subelements,
batchdata_getter=_getter,
batchdata_setter=_setter,
register_observer=original.register_observer,
)
def _construct_anyof(self, schema, label=None, key="anyOf"):
original = super()._construct_anyof(schema, label, key)
selector = original.widgets[0].children[-1].children[0]
def _setter(_data):
for subel in original.subelements:
try:
subel.batchdata_setter(_data)
return
except (KeyError, IndexError):
pass
raise ipywidgets_jsonschema.form.FormError(
"Cannot restore batchdata in anyOf schema"
)
return self.construct_element(
getter=original.getter,
setter=original.setter,
resetter=original.resetter,
widgets=original.widgets,
subelements=original.subelements,
batchdata_getter=lambda: original.subelements[
selector.index
].batchdata_getter(),
batchdata_setter=_setter,
register_observer=original.register_observer,
)
|
py | b402ac31aae008407b42c4036c28cc8c3f79236d | from collections import defaultdict, namedtuple
from enum import Enum
from itertools import cycle
import math
import os.path
import sys
from symspellpy.editdistance import DistanceAlgorithm, EditDistance
import symspellpy.helpers as helpers
class Verbosity(Enum):
"""Controls the closeness/quantity of returned spelling suggestions."""
# Top suggestion with the highest term frequency of the suggestions of
# smallest edit distance found.
TOP = 0
# All suggestions of smallest edit distance found, suggestions ordered by
# term frequency.
CLOSEST = 1
# All suggestions within maxEditDistance, suggestions ordered by edit
# distance, then by term frequency (slower, no early termination).
ALL = 2
class SymSpell(object):
def __init__(self, initial_capacity=16, max_dictionary_edit_distance=2,
prefix_length=7, count_threshold=1, compact_level=5):
"""Create a new instance of SymSpell.
Specifying an accurate initial_capacity is not essential, but it can
help speed up processing by aleviating the need for data
restructuring as the size grows.
Keyword arguments:
initial_capacity -- The expected number of words in
dictionary. (default 16)
max_dictionary_edit_distance -- Maximum edit distance for doing
lookups. (default 2)
prefix_length -- The length of word prefixes used for spell
checking. (default 7)
count_threshold -- The minimum frequency count for dictionary words
to be considered correct spellings. (default 1)
compact_level -- Degree of favoring lower memory use over speed
(0=fastest,most memory, 16=slowest,least memory). (default 5)
"""
if initial_capacity < 0:
raise ValueError("initial_capacity cannot be negative")
if max_dictionary_edit_distance < 0:
raise ValueError("max_dictionary_edit_distance cannot be negative")
if prefix_length < 1 or prefix_length <= max_dictionary_edit_distance:
raise ValueError("prefix_length cannot be less than 1 or "
"smaller than max_dictionary_edit_distance")
if count_threshold < 0:
raise ValueError("count_threshold cannot be negative")
if compact_level < 0 or compact_level > 16:
raise ValueError("compact_level must be between 0 and 16")
self._initial_capacity = initial_capacity
self._words = dict()
self._below_threshold_words = dict()
self._deletes = defaultdict(list)
self._max_dictionary_edit_distance = max_dictionary_edit_distance
self._prefix_length = prefix_length
self._count_threshold = count_threshold
self._compact_mask = (0xFFFFFFFF >> (3 + min(compact_level, 16))) << 2
self._distance_algorithm = DistanceAlgorithm.DAMERUAUOSA
self._max_length = 0
self._replaced_words = dict()
def create_dictionary_entry(self, key, count):
"""Create/Update an entry in the dictionary.
For every word there are deletes with an edit distance of
1..max_edit_distance created and added to the dictionary. Every delete
entry has a suggestions list, which points to the original term(s) it
was created from. The dictionary may be dynamically updated (word
frequency and new words) at any time by calling
create_dictionary_entry
Keyword arguments:
key -- The word to add to dictionary.
count -- The frequency count for word.
Return:
True if the word was added as a new correctly spelled word, or
False if the word is added as a below threshold word, or updates an
existing correctly spelled word.
"""
if count <= 0:
# no point doing anything if count is zero, as it can't change
# anything
if self._count_threshold > 0:
return False
count = 0
# look first in below threshold words, update count, and allow
# promotion to correct spelling word if count reaches threshold
# threshold must be >1 for there to be the possibility of low threshold
# words
if self._count_threshold > 1 and key in self._below_threshold_words:
count_previous = self._below_threshold_words[key]
# calculate new count for below threshold word
count = (count_previous + count
if sys.maxsize - count_previous > count
else sys.maxsize)
# has reached threshold - remove from below threshold collection
# (it will be added to correct words below)
if count >= self._count_threshold:
self._below_threshold_words.pop(key)
else:
self._below_threshold_words[key] = count
return False
elif key in self._words:
count_previous = self._words[key]
# just update count if it's an already added above threshold word
count = (count_previous + count
if sys.maxsize - count_previous > count
else sys.maxsize)
self._words[key] = count
return False
elif count < self._count_threshold:
# new or existing below threshold word
self._below_threshold_words[key] = count
return False
# what we have at this point is a new, above threshold word
self._words[key] = count
# edits/suggestions are created only once, no matter how often word
# occurs. edits/suggestions are created as soon as the word occurs
# in the corpus, even if the same term existed before in the
# dictionary as an edit from another word
if len(key) > self._max_length:
self._max_length = len(key)
# create deletes
edits = self._edits_prefix(key)
for delete in edits:
delete_hash = self._get_str_hash(delete)
self._deletes[delete_hash].append(key)
return True
def load_dictionary(self, corpus, term_index, count_index, encoding=None):
"""Load multiple dictionary entries from a file of word/frequency
count pairs. Merges with any dictionary data already loaded.
Keyword arguments:
corpus -- The path+filename of the file.
term_index -- The column position of the word.
count_index -- The column position of the frequency count.
encoding -- Text encoding of the dictionary file
Return:
True if file loaded, or False if file not found.
"""
if not os.path.exists(corpus):
return False
with open(corpus, "r", encoding=encoding) as infile:
for line in infile:
line_parts = line.rstrip().split(" ")
if len(line_parts) >= 2:
key = line_parts[term_index]
count = helpers.try_parse_int64(line_parts[count_index])
if count is not None:
self.create_dictionary_entry(key, count)
return True
def lookup(self, phrase, verbosity, max_edit_distance=None,
include_unknown=False):
"""Find suggested spellings for a given phrase word.
Keyword arguments:
phrase -- The word being spell checked.
verbosity -- The value controlling the quantity/closeness of the
returned suggestions.
max_edit_distance -- The maximum edit distance between phrase and
suggested words.
include_unknown -- Include phrase word in suggestions, if no words
within edit distance found.
Return:
A list of SuggestItem object representing suggested correct spellings
for the phrase word, sorted by edit distance, and secondarily by count
frequency.
"""
if max_edit_distance is None:
max_edit_distance = self._max_dictionary_edit_distance
if max_edit_distance > self._max_dictionary_edit_distance:
raise ValueError("Distance too large")
suggestions = list()
phrase_len = len(phrase)
def early_exit():
if include_unknown and not suggestions:
suggestions.append(SuggestItem(phrase, max_edit_distance + 1,
0))
return suggestions
# early exit - word is too big to possibly match any words
if phrase_len - max_edit_distance > self._max_length:
return early_exit()
# quick look for exact match
suggestion_count = 0
if phrase in self._words:
suggestion_count = self._words[phrase]
suggestions.append(SuggestItem(phrase, 0, suggestion_count))
# early exit - return exact match, unless caller wants all matches
if verbosity != Verbosity.ALL:
return early_exit()
# early termination, if we only want to check if word in dictionary or
# get its frequency e.g. for word segmentation
if max_edit_distance == 0:
return early_exit()
considered_deletes = set()
considered_suggestions = set()
# we considered the phrase already in the 'phrase in self._words' above
considered_suggestions.add(phrase)
max_edit_distance_2 = max_edit_distance
candidate_pointer = 0
candidates = list()
# add original prefix
phrase_prefix_len = phrase_len
if phrase_prefix_len > self._prefix_length:
phrase_prefix_len = self._prefix_length
candidates.append(phrase[: phrase_prefix_len])
else:
candidates.append(phrase)
distance_comparer = EditDistance(self._distance_algorithm)
while candidate_pointer < len(candidates):
candidate = candidates[candidate_pointer]
candidate_pointer += 1
candidate_len = len(candidate)
len_diff = phrase_prefix_len - candidate_len
# early termination: if candidate distance is already higher than
# suggestion distance, than there are no better suggestions to be
# expected
if len_diff > max_edit_distance_2:
# skip to next candidate if Verbosity.ALL, look no
# further if Verbosity.TOP or CLOSEST (candidates are
# ordered by delete distance, so none are closer than current)
if verbosity == Verbosity.ALL:
continue
break
if self._get_str_hash(candidate) in self._deletes:
dict_suggestions = self._deletes[self._get_str_hash(candidate)]
for suggestion in dict_suggestions:
if suggestion == phrase:
continue
suggestion_len = len(suggestion)
# phrase and suggestion lengths diff > allowed/current best
# distance
if (abs(suggestion_len - phrase_len) > max_edit_distance_2
# suggestion must be for a different delete string,
# in same bin only because of hash collision
or suggestion_len < candidate_len
# if suggestion len = delete len, then it either
# equals delete or is in same bin only because of
# hash collision
or (suggestion_len == candidate_len
and suggestion != candidate)):
continue
suggestion_prefix_len = min(suggestion_len,
self._prefix_length)
if (suggestion_prefix_len > phrase_prefix_len
and suggestion_prefix_len - candidate_len > max_edit_distance_2):
continue
# True Damerau-Levenshtein Edit Distance: adjust distance,
# if both distances>0
# We allow simultaneous edits (deletes) of max_edit_distance
# on on both the dictionary and the phrase term.
# For replaces and adjacent transposes the resulting edit
# distance stays <= max_edit_distance.
# For inserts and deletes the resulting edit distance might
# exceed max_edit_distance.
# To prevent suggestions of a higher edit distance, we need
# to calculate the resulting edit distance, if there are
# simultaneous edits on both sides.
# Example: (bank==bnak and bank==bink, but bank!=kanb and
# bank!=xban and bank!=baxn for max_edit_distance=1)
# Two deletes on each side of a pair makes them all equal,
# but the first two pairs have edit distance=1, the others
# edit distance=2.
distance = 0
min_distance = 0
if candidate_len == 0:
# suggestions which have no common chars with phrase
# (phrase_len<=max_edit_distance &&
# suggestion_len<=max_edit_distance)
distance = max(phrase_len, suggestion_len)
if (distance > max_edit_distance_2
or suggestion in considered_suggestions):
continue
elif suggestion_len == 1:
distance = (phrase_len
if phrase.index(suggestion[0]) < 0
else phrase_len - 1)
if (distance > max_edit_distance_2
or suggestion in considered_suggestions):
continue
# number of edits in prefix ==maxediddistance AND no
# identical suffix, then editdistance>max_edit_distance and
# no need for Levenshtein calculation
# (phraseLen >= prefixLength) &&
# (suggestionLen >= prefixLength)
else:
# handles the shortcircuit of min_distance assignment
# when first boolean expression evaluates to False
if self._prefix_length - max_edit_distance == candidate_len:
min_distance = (min(phrase_len, suggestion_len) -
self._prefix_length)
else:
min_distance = 0
# pylint: disable=C0301,R0916
if (self._prefix_length - max_edit_distance == candidate_len
and (min_distance > 1
and phrase[phrase_len + 1 - min_distance :] != suggestion[suggestion_len + 1 - min_distance :])
or (min_distance > 0
and phrase[phrase_len - min_distance] != suggestion[suggestion_len - min_distance]
and (phrase[phrase_len - min_distance - 1] != suggestion[suggestion_len - min_distance]
or phrase[phrase_len - min_distance] != suggestion[suggestion_len - min_distance - 1]))):
continue
else:
# delete_in_suggestion_prefix is somewhat expensive,
# and only pays off when verbosity is TOP or CLOSEST
if ((verbosity != Verbosity.ALL
and not self._delete_in_suggestion_prefix(
candidate, candidate_len, suggestion,
suggestion_len))
or suggestion in considered_suggestions):
continue
considered_suggestions.add(suggestion)
distance = distance_comparer.compare(
phrase, suggestion, max_edit_distance_2)
if distance < 0:
continue
# do not process higher distances than those already found,
# if verbosity<ALL (note: max_edit_distance_2 will always
# equal max_edit_distance when Verbosity.ALL)
if distance <= max_edit_distance_2:
suggestion_count = self._words[suggestion]
si = SuggestItem(suggestion, distance, suggestion_count)
if suggestions:
if verbosity == Verbosity.CLOSEST:
# we will calculate DamLev distance only to the
# smallest found distance so far
if distance < max_edit_distance_2:
suggestions = list()
elif verbosity == Verbosity.TOP:
if (distance < max_edit_distance_2
or suggestion_count > suggestions[0].count):
max_edit_distance_2 = distance
suggestions[0] = si
continue
if verbosity != Verbosity.ALL:
max_edit_distance_2 = distance
suggestions.append(si)
# add edits: derive edits (deletes) from candidate (phrase) and
# add them to candidates list. this is a recursive process until
# the maximum edit distance has been reached
if (len_diff < max_edit_distance
and candidate_len <= self._prefix_length):
# do not create edits with edit distance smaller than
# suggestions already found
if (verbosity != Verbosity.ALL
and len_diff >= max_edit_distance_2):
continue
for i in range(candidate_len):
delete = candidate[: i] + candidate[i + 1 :]
if delete not in considered_deletes:
considered_deletes.add(delete)
candidates.append(delete)
if len(suggestions) > 1:
suggestions.sort()
return suggestions
def lookup_compound(self, phrase, max_edit_distance,
ignore_non_words=False):
"""lookup_compound supports compound aware automatic spelling
correction of multi-word input strings with three cases:
1. mistakenly inserted space into a correct word led to two incorrect
terms
2. mistakenly omitted space between two correct words led to one
incorrect combined term
3. multiple independent input terms with/without spelling errors
Find suggested spellings for a multi-word input string (supports word
splitting/merging).
Keyword arguments:
phrase -- The string being spell checked.
max_edit_distance -- The maximum edit distance between input and
suggested words.
Return:
A List of SuggestItem object representing suggested correct spellings
for the input string.
"""
# Parse input string into single terms
term_list_1 = helpers.parse_words(phrase)
# Second list of single terms with preserved cases so we can ignore
# acronyms (all cap words)
if ignore_non_words:
term_list_2 = helpers.parse_words(phrase, True)
suggestions = list()
suggestion_parts = list()
distance_comparer = EditDistance(self._distance_algorithm)
# translate every item to its best suggestion, otherwise it remains
# unchanged
is_last_combi = False
for i, __ in enumerate(term_list_1):
if ignore_non_words:
if helpers.try_parse_int64(term_list_1[i]) is not None:
suggestion_parts.append(SuggestItem(term_list_1[i], 0, 0))
continue
# if re.match(r"\b[A-Z]{2,}\b", term_list_2[i]):
if helpers.is_acronym(term_list_2[i]):
suggestion_parts.append(SuggestItem(term_list_2[i], 0, 0))
continue
suggestions = self.lookup(term_list_1[i], Verbosity.TOP,
max_edit_distance)
# combi check, always before split
if i > 0 and not is_last_combi:
suggestions_combi = self.lookup(
term_list_1[i - 1] + term_list_1[i], Verbosity.TOP,
max_edit_distance)
if suggestions_combi:
best_1 = suggestion_parts[-1]
if suggestions:
best_2 = suggestions[0]
else:
best_2 = SuggestItem(term_list_1[i],
max_edit_distance + 1, 0)
# make sure we're comparing with the lowercase form of the
# previous word
distance_1 = distance_comparer.compare(
term_list_1[i - 1] + " " + term_list_1[i],
best_1.term.lower() + " " + best_2.term,
max_edit_distance)
if (distance_1 >= 0
and suggestions_combi[0].distance + 1 < distance_1):
suggestions_combi[0].distance += 1
suggestion_parts[-1] = suggestions_combi[0]
is_last_combi = True
continue
is_last_combi = False
# alway split terms without suggestion / never split terms with
# suggestion ed=0 / never split single char terms
if (suggestions and (suggestions[0].distance == 0
or len(term_list_1[i]) == 1)):
# choose best suggestion
suggestion_parts.append(suggestions[0])
else:
# if no perfect suggestion, split word into pairs
suggestions_split = list()
# add original term
if suggestions:
suggestions_split.append(suggestions[0])
if len(term_list_1[i]) > 1:
for j in range(1, len(term_list_1[i])):
part_1 = term_list_1[i][: j]
part_2 = term_list_1[i][j :]
suggestions_1 = self.lookup(part_1, Verbosity.TOP,
max_edit_distance)
if suggestions_1:
# if split correction1 == einzelwort correction
if (suggestions
and suggestions[0].term == suggestions_1[0].term):
break
suggestions_2 = self.lookup(part_2, Verbosity.TOP,
max_edit_distance)
if suggestions_2:
# if split correction1 == einzelwort correction
if (suggestions
and suggestions[0].term == suggestions_2[0].term):
break
# select best suggestion for split pair
tmp_term = (suggestions_1[0].term + " " +
suggestions_2[0].term)
tmp_distance = distance_comparer.compare(
term_list_1[i], tmp_term,
max_edit_distance)
if tmp_distance < 0:
tmp_distance = max_edit_distance + 1
tmp_count = min(suggestions_1[0].count,
suggestions_2[0].count)
suggestion_split = SuggestItem(
tmp_term, tmp_distance, tmp_count)
suggestions_split.append(suggestion_split)
# early termination of split
if suggestion_split.distance == 1:
break
if suggestions_split:
# select best suggestion for split pair
suggestions_split.sort()
suggestion_parts.append(suggestions_split[0])
self._replaced_words[term_list_1[i]] = suggestions_split[0]._term
else:
si = SuggestItem(term_list_1[i],
max_edit_distance + 1, 0)
suggestion_parts.append(si)
self._replaced_words[term_list_1[i]] = si._term
else:
si = SuggestItem(term_list_1[i], max_edit_distance + 1, 0)
suggestion_parts.append(si)
self._replaced_words[term_list_1[i]] = si._term
joined_term = ""
joined_count = sys.maxsize
for si in suggestion_parts:
joined_term += si.term + " "
joined_count = min(joined_count, si.count)
suggestion = SuggestItem(joined_term.rstrip(),
distance_comparer.compare(
phrase, joined_term, 2 ** 31 - 1),
joined_count)
suggestions_line = list()
suggestions_line.append(suggestion)
return suggestions_line
def word_segmentation(self, phrase, max_edit_distance=None,
max_segmentation_word_length=None):
"""word_egmentation divides a string into words by inserting missing
spaces at the appropriate positions misspelled words are corrected
and do not affect segmentation existing spaces are allowed and
considered for optimum segmentation
word_segmentation uses a novel approach *without* recursion.
https://medium.com/@wolfgarbe/fast-word-segmentation-for-noisy-text-2c2c41f9e8da
While each string of length n can be segmented in 2^n−1 possible
compositions https://en.wikipedia.org/wiki/Composition_(combinatorics)
word_segmentation has a linear runtime O(n) to find the optimum
composition
Find suggested spellings for a multi-word input string (supports word
splitting/merging).
Keyword arguments:
phrase -- The string being spell checked.
max_segmentation_word_length -- The maximum word length that should
be considered.
max_edit_distance -- The maximum edit distance between input and
corrected words (0=no correction/segmentation only).
Return:
The word segmented string, the word segmented and spelling corrected
string, the Edit distance sum between input string and corrected
string, the Sum of word occurence probabilities in log scale (a
measure of how common and probable the corrected segmentation is).
"""
# number of all words in the corpus used to generate the frequency
# dictionary. This is used to calculate the word occurrence
# probability p from word counts c : p=c/N. N equals the sum of all
# counts c in the dictionary only if the dictionary is complete, but
# not if the dictionary is truncated or filtered
N = 1024908267229
if max_edit_distance is None:
max_edit_distance = self._max_dictionary_edit_distance
if max_segmentation_word_length is None:
max_segmentation_word_length = self._max_length
array_size = min(max_segmentation_word_length, len(phrase))
compositions = [Composition()] * array_size
circular_index = cycle(range(array_size))
idx = -1
# outer loop (column): all possible part start positions
for j in range(len(phrase)):
# inner loop (row): all possible part lengths (from start
# position): part can't be bigger than longest word in dictionary
# (other than long unknown word)
imax = min(len(phrase) - j, max_segmentation_word_length)
for i in range(1, imax + 1):
# get top spelling correction/ed for part
part = phrase[j : j + i]
separator_len = 0
top_ed = 0
top_log_prob = 0.0
top_result = ""
if part[0].isspace():
# remove space for levensthein calculation
part = part[1 :]
else:
# add ed+1: space did not exist, had to be inserted
separator_len = 1
# remove space from part1, add number of removed spaces to top_ed
top_ed += len(part)
# remove space.
# add number of removed spaces to ed
part = part.replace(" ", "")
top_ed -= len(part)
results = self.lookup(part, Verbosity.TOP, max_edit_distance)
if results:
top_result = results[0].term
top_ed += results[0].distance
# Naive Bayes Rule. We assume the word probabilities of
# two words to be independent. Therefore the resulting
# probability of the word combination is the product of
# the two word probabilities
# Instead of computing the product of probabilities we
# are computing the sum of the logarithm of probabilities
# because the probabilities of words are about 10^-10,
# the product of many such small numbers could exceed
# (underflow) the floating number range and become zero
# log(ab)=log(a)+log(b)
top_log_prob = math.log10(float(results[0].count) /
float(N))
else:
top_result = part
# default, if word not found. otherwise long input text
# would win as long unknown word (with ed=edmax+1),
# although there there should many spaces inserted
top_ed += len(part)
top_log_prob = math.log10(10.0 / N /
math.pow(10.0, len(part)))
dest = (i + idx) % array_size
# set values in first loop
if j == 0:
compositions[dest] = Composition(part, top_result, top_ed,
top_log_prob)
# pylint: disable=C0301,R0916
elif (i == max_segmentation_word_length
# replace values if better probabilityLogSum, if same
# edit distance OR one space difference
or ((compositions[idx].distance_sum + top_ed == compositions[dest].distance_sum
or compositions[idx].distance_sum + separator_len + top_ed == compositions[dest].distance_sum)
and compositions[dest].log_prob_sum < compositions[idx].log_prob_sum + top_log_prob)
# replace values if smaller edit distance
or compositions[idx].distance_sum + separator_len + top_ed < compositions[dest].distance_sum):
compositions[dest] = Composition(
compositions[idx].segmented_string + " " + part,
compositions[idx].corrected_string + " " + top_result,
compositions[idx].distance_sum + separator_len + top_ed,
compositions[idx].log_prob_sum + top_log_prob)
idx = next(circular_index)
return compositions[idx]
def _delete_in_suggestion_prefix(self, delete, delete_len, suggestion,
suggestion_len):
"""check whether all delete chars are present in the suggestion
prefix in correct order, otherwise this is just a hash collision
"""
if delete_len == 0:
return True
if self._prefix_length < suggestion_len:
suggestion_len = self._prefix_length
j = 0
for i in range(delete_len):
del_char = delete[i]
while j < suggestion_len and del_char != suggestion[j]:
j += 1
if j == suggestion_len:
return False
return True
def _edits(self, word, edit_distance, delete_words):
"""inexpensive and language independent: only deletes,
no transposes + replaces + inserts replaces and inserts are expensive
and language dependent
"""
edit_distance += 1
if len(word) > 1:
for i in range(len(word)):
delete = word[: i] + word[i + 1 :]
if delete not in delete_words:
delete_words.add(delete)
# recursion, if maximum edit distance not yet reached
if edit_distance < self._max_dictionary_edit_distance:
self._edits(delete, edit_distance, delete_words)
return delete_words
def _edits_prefix(self, key):
hash_set = set()
if len(key) <= self._max_dictionary_edit_distance:
hash_set.add("")
if len(key) > self._max_dictionary_edit_distance:
key = key[: self._prefix_length]
hash_set.add(key)
return self._edits(key, 0, hash_set)
def _get_str_hash(self, s):
s_len = len(s)
mask_len = min(s_len, 3)
hash_s = 2166136261
for i in range(s_len):
hash_s ^= ord(s[i])
hash_s *= 16777619
hash_s &= self._compact_mask
hash_s |= mask_len
return hash_s
@property
def deletes(self):
return self._deletes
@property
def words(self):
return self._words
@property
def word_count(self):
return len(self._words)
class SuggestItem(object):
"""Spelling suggestion returned from Lookup."""
def __init__(self, term, distance, count):
"""Create a new instance of SuggestItem.
Keyword arguments:
term -- The suggested word.
distance -- Edit distance from search word.
count -- Frequency of suggestion in dictionary.
"""
self._term = term
self._distance = distance
self._count = count
def __eq__(self, other):
"""order by distance ascending, then by frequency count
descending
"""
if self._distance == other.distance:
return self._count == other.count
else:
return self._distance == other.distance
def __lt__(self, other):
if self._distance == other.distance:
return self._count > other.count
else:
return self._distance < other.distance
def __str__(self):
return "{}, {}, {}".format(self._term, self._distance, self._count)
@property
def term(self):
return self._term
@term.setter
def term(self, term):
self._term = term
@property
def distance(self):
return self._distance
@distance.setter
def distance(self, distance):
self._distance = distance
@property
def count(self):
return self._count
@count.setter
def count(self, count):
self._count = count
Composition = namedtuple("Composition", ["segmented_string", "corrected_string",
"distance_sum", "log_prob_sum"])
Composition.__new__.__defaults__ = (None,) * len(Composition._fields)
|
py | b402ac3cf76f36f8400057781b081d6af87c10d7 | #!/usr/bin/env python
#
# Copyright 2007 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Higher-level, semantic data types for the datastore. These types
are expected to be set as attributes of Entities. See "Supported Data Types"
in the API Guide.
Most of these types are based on XML elements from Atom and GData elements
from the atom and gd namespaces. For more information, see:
http://www.atomenabled.org/developers/syndication/
https://developers.google.com/gdata/docs/1.0/elements
The namespace schemas are:
http://www.w3.org/2005/Atom
http://schemas.google.com/g/2005
"""
import array
import base64
import binascii
import calendar
import datetime
import os
import re
import struct
import time
from xml.sax import saxutils
from google.appengine.api import cmp_compat
from google.appengine.api import datastore_errors
from google.appengine.api import full_app_id
from google.appengine.api import namespace_manager
from google.appengine.api import users
from google.appengine.datastore import datastore_pbs
from google.appengine.datastore import entity_v4_pb2
from google.appengine.datastore import sortable_pb_encoder
import six
from six.moves import range
from six.moves import urllib
from six.moves import zip
from google.appengine.datastore import entity_bytes_pb2 as entity_pb2
if datastore_pbs._CLOUD_DATASTORE_ENABLED:
from google.appengine.datastore.datastore_pbs import googledatastore
_MAX_STRING_LENGTH = 1500
_MAX_LINK_PROPERTY_LENGTH = 2083
_MAX_RAW_PROPERTY_BYTES = 1048487
RESERVED_PROPERTY_NAME = re.compile('^__.*__$')
KEY_SPECIAL_PROPERTY = '__key__'
_KEY_SPECIAL_PROPERTY = KEY_SPECIAL_PROPERTY
_UNAPPLIED_LOG_TIMESTAMP_SPECIAL_PROPERTY = '__unapplied_log_timestamp_us__'
SCATTER_SPECIAL_PROPERTY = '__scatter__'
_SPECIAL_PROPERTIES = frozenset(
[KEY_SPECIAL_PROPERTY,
_UNAPPLIED_LOG_TIMESTAMP_SPECIAL_PROPERTY,
SCATTER_SPECIAL_PROPERTY])
_NAMESPACE_SEPARATOR = '!'
_EMPTY_NAMESPACE_ID = 1
_EPOCH = datetime.datetime.utcfromtimestamp(0)
if six.PY2:
_PREFERRED_NUM_TYPE = long
else:
_PREFERRED_NUM_TYPE = int
class UtcTzinfo(datetime.tzinfo):
def utcoffset(self, dt): return datetime.timedelta(0)
def dst(self, dt): return datetime.timedelta(0)
def tzname(self, dt): return 'UTC'
def __repr__(self): return 'datastore_types.UTC'
UTC = UtcTzinfo()
def typename(obj):
"""Returns the type of obj as a string."""
if hasattr(obj, '__class__'):
return getattr(obj, '__class__').__name__
else:
return type(obj).__name__
def ValidateString(value,
name='unused',
exception=datastore_errors.BadValueError,
max_len=_MAX_STRING_LENGTH,
empty_ok=False):
"""Raises an exception if value is not a valid string or a subclass thereof.
A string is valid if it's not empty, no more than _MAX_STRING_LENGTH bytes,
and not a Blob. The exception type can be specified with the exception
argument; it defaults to BadValueError.
Args:
value: the value to validate.
name: the name of this value; used in the exception message.
exception: the type of exception to raise.
max_len: the maximum allowed length, in bytes.
empty_ok: allow empty value.
"""
if value is None and empty_ok:
return
if (
not isinstance(value, (six.text_type, six.binary_type)) or
isinstance(value, Blob)):
raise exception('%s should be a string; received %s (a %s):' %
(name, value, typename(value)))
if not value and not empty_ok:
raise exception('%s must not be empty.' % name)
conversion_kwargs = {}
if six.PY3:
conversion_kwargs = dict(errors='surrogatepass')
if isinstance(value, six.text_type) and len(value.encode('utf-8', **conversion_kwargs)) > max_len:
raise exception('%s must be under %d bytes.' % (name, max_len))
if isinstance(value, str) and len(value) > max_len:
raise exception('%s must be under %d bytes.' % (name, max_len))
def ValidateInteger(value,
name='unused',
exception=datastore_errors.BadValueError,
empty_ok=False,
zero_ok=False,
negative_ok=False):
"""Raises an exception if value is not a valid integer.
An integer is valid if it's not negative or empty and is an integer
(either int or long). The exception type raised can be specified
with the exception argument; it defaults to BadValueError.
Args:
value: the value to validate.
name: the name of this value; used in the exception message.
exception: the type of exception to raise.
empty_ok: allow None value.
zero_ok: allow zero value.
negative_ok: allow negative value.
"""
if value is None and empty_ok:
return
if not isinstance(value, six.integer_types):
raise exception('%s should be an integer; received %s (a %s).' %
(name, value, typename(value)))
if not value and not zero_ok:
raise exception('%s must not be 0 (zero)' % name)
if value < 0 and not negative_ok:
raise exception('%s must not be negative.' % name)
def ResolveAppId(app):
"""Validate app id, providing a default.
Args:
app: The app id argument value to be validated.
Returns:
The value of app, or the substituted default. Always a non-empty string.
Raises:
BadArgumentError if the value is empty or not a string.
"""
if app is None:
app = full_app_id.get()
ValidateString(app, 'app', datastore_errors.BadArgumentError)
return app
def ResolveNamespace(namespace):
"""Validate app namespace, providing a default.
If the argument is None, namespace_manager.get_namespace() is substituted.
Args:
namespace: The namespace argument value to be validated.
Returns:
The value of namespace, or the substituted default. The empty string is used
to denote the empty namespace.
Raises:
BadArgumentError if the value is not a string.
"""
if namespace is None:
namespace = namespace_manager.get_namespace()
else:
namespace_manager.validate_namespace(
namespace, datastore_errors.BadArgumentError)
return namespace
def EncodeAppIdNamespace(app_id, namespace):
"""Concatenates app id and namespace into a single string.
This method is needed for xml and datastore_file_stub.
Args:
app_id: The application id to encode
namespace: The namespace to encode
Returns:
The string encoding for the app_id, namespace pair.
"""
if not namespace:
return app_id
else:
return app_id + _NAMESPACE_SEPARATOR + namespace
def DecodeAppIdNamespace(app_namespace_str):
"""Decodes app_namespace_str into an (app_id, namespace) pair.
This method is the reverse of EncodeAppIdNamespace and is needed for
datastore_file_stub.
Args:
app_namespace_str: An encoded app_id, namespace pair created by
EncodeAppIdNamespace
Returns:
(app_id, namespace) pair encoded in app_namespace_str
"""
sep = app_namespace_str.find(_NAMESPACE_SEPARATOR)
if sep < 0:
return (app_namespace_str, '')
else:
return (app_namespace_str[0:sep], app_namespace_str[sep + 1:])
def SetNamespace(proto, namespace):
"""Sets the namespace for a protocol buffer or clears the field.
Args:
proto: An entity_pb2.Reference to update
namespace: the new namespace (None or an empty string will clear out the
field).
"""
if not namespace:
proto.ClearField('name_space')
else:
proto.name_space = namespace
def PartitionString(value, separator):
"""Equivalent to python2.5 str.partition()
TODO use str.partition() when python 2.5 is adopted.
Args:
value: String to be partitioned
separator: Separator string
"""
index = value.find(separator)
if index == -1:
return (value, '', value[0:0])
else:
return (value[0:index], separator, value[index+len(separator):len(value)])
@cmp_compat.total_ordering_from_cmp
class Key(object):
"""The primary key for a datastore entity.
A datastore GUID. A Key instance uniquely identifies an entity across all
apps, and includes all information necessary to fetch the entity from the
datastore with Get().
Key implements __hash__, and key instances are immutable, so Keys may be
used in sets and as dictionary keys.
"""
__reference = None
def __init__(self, encoded=None):
"""Constructor. Creates a Key from a string.
Args:
# a base64-encoded primary key, generated by Key.__str__
encoded: str
"""
self._bytes = None
if encoded is not None:
if isinstance(encoded, bytes):
pass
elif isinstance(encoded, six.text_type):
encoded = encoded.encode('utf-8')
else:
try:
repr_encoded = repr(encoded)
except:
repr_encoded = "<couldn't encode>"
raise datastore_errors.BadArgumentError(
'Key() expects a string; received %s (a %s).' %
(repr_encoded, typename(encoded)))
try:
modulo = len(encoded) % 4
if modulo != 0:
encoded += (b'=' * (4 - modulo))
self._bytes = encoded
encoded_pb = base64.urlsafe_b64decode(self._bytes)
self.__reference = entity_pb2.Reference.FromString(encoded_pb)
assert self.__reference.IsInitialized()
self._bytes = self._bytes.rstrip(b'=')
except (AssertionError, TypeError, binascii.Error) as e:
raise datastore_errors.BadKeyError(
'Invalid string key %s. Details: %s' % (encoded, e))
except Exception as e:
if (e.__class__.__name__ == 'ProtocolBufferDecodeError' or
e.__class__.__name__ == 'DecodeError'):
raise datastore_errors.BadKeyError('Invalid string key %s.' % encoded)
else:
raise
else:
self.__reference = entity_pb2.Reference()
def to_path(self, _default_id=None, _decode=True, _fail=True):
"""Construct the "path" of this key as a list.
Returns:
A list [kind_1, id_or_name_1, ..., kind_n, id_or_name_n] of the key path.
Raises:
datastore_errors.BadKeyError if this key does not have a valid path.
"""
path = []
for path_element in self.__reference.path.element:
path.append(path_element.type)
if path_element.HasField('name'):
path.append(path_element.name)
elif path_element.HasField('id'):
path.append(path_element.id)
elif _default_id is not None:
path.append(_default_id)
else:
raise datastore_errors.BadKeyError('Incomplete key found in to_path')
return path
@staticmethod
def from_path(*args, **kwds):
"""Static method to construct a Key out of a "path" (kind, id or name, ...).
This is useful when an application wants to use just the id or name portion
of a key in e.g. a URL, where the rest of the URL provides enough context to
fill in the rest, i.e. the app id (always implicit), the entity kind, and
possibly an ancestor key. Since ids and names are usually small, they're
more attractive for use in end-user-visible URLs than the full string
representation of a key.
Args:
kind: the entity kind (a str or unicode instance)
id_or_name: the id (an int or long) or name (a str or unicode instance)
parent: optional parent Key; default None.
namespace: optional namespace to use otherwise namespace_manager's
default namespace is used.
Returns:
A new Key instance whose .kind() and .id() or .name() methods return
the *last* kind and id or name positional arguments passed.
Raises:
BadArgumentError for invalid arguments.
BadKeyError if the parent key is incomplete.
"""
parent = kwds.pop('parent', None)
app_id = ResolveAppId(kwds.pop('_app', None))
namespace = kwds.pop('namespace', None)
if kwds:
raise datastore_errors.BadArgumentError(
'Excess keyword arguments ' + repr(kwds))
if not args or len(args) % 2:
raise datastore_errors.BadArgumentError(
'A non-zero even number of positional arguments is required '
'(kind, id or name, kind, id or name, ...); received %s' % repr(args))
if parent is not None:
if not isinstance(parent, Key):
raise datastore_errors.BadArgumentError(
'Expected None or a Key as parent; received %r (a %s).' %
(parent, typename(parent)))
if namespace is None:
namespace = parent.namespace()
if not parent.has_id_or_name():
raise datastore_errors.BadKeyError(
'The parent Key is incomplete.')
if app_id != parent.app() or namespace != parent.namespace():
raise datastore_errors.BadArgumentError(
'The app/namespace arguments (%s/%s) should match '
'parent.app/namespace() (%s/%s)' %
(app_id, namespace, parent.app(), parent.namespace()))
namespace = ResolveNamespace(namespace)
key = Key()
ref = key.__reference
if parent is not None:
ref.CopyFrom(parent.__reference)
else:
ref.app = app_id
SetNamespace(ref, namespace)
path = ref.path
for i in range(0, len(args), 2):
kind, id_or_name = args[i:i+2]
if isinstance(kind, six.string_types):
kind = kind.encode('utf-8')
else:
raise datastore_errors.BadArgumentError(
'Expected a string kind as argument %d; received %r (a %s).' %
(i + 1, kind, typename(kind)))
elem = path.element.add()
elem.type = kind
if isinstance(id_or_name, six.integer_types):
elem.id = id_or_name
elif isinstance(id_or_name, six.string_types):
ValidateString(id_or_name, 'name')
elem.name = id_or_name.encode('utf-8')
else:
raise datastore_errors.BadArgumentError(
'Expected an integer id or string name as argument %d; '
'received %r (a %s).' % (i + 2, id_or_name, typename(id_or_name)))
assert ref.IsInitialized()
return key
def app(self):
"""Returns this entity's app id, a string."""
if self.__reference.app:
return self.__reference.app
else:
return None
def namespace(self):
"""Returns this entity's namespace, a string."""
if self.__reference.HasField('name_space'):
return self.__reference.name_space
else:
return ''
def kind(self):
"""Returns this entity's kind, as a string."""
if self.__reference.path.element:
return self.__reference.path.element[-1].type
else:
return None
def id(self):
"""Returns this entity's id, or None if it doesn't have one."""
elems = self.__reference.path.element
if elems and elems[-1].HasField('id') and elems[-1].id:
return elems[-1].id
else:
return None
def name(self):
"""Returns this entity's name, or None if it doesn't have one."""
elems = self.__reference.path.element
if elems and elems[-1].HasField('name') and elems[-1].name:
return elems[-1].name
else:
return None
def id_or_name(self):
"""Returns this entity's id or name, whichever it has, or None."""
if self.id() is not None:
return self.id()
else:
return self.name()
def has_id_or_name(self):
"""Returns True if this entity has an id or name, False otherwise.
"""
elems = self.__reference.path.element
if elems:
e = elems[-1]
return bool(e.name or e.id)
else:
return False
def parent(self):
"""Returns this entity's parent, as a Key. If this entity has no parent,
returns None."""
if len(self.__reference.path.element) > 1:
parent = Key()
parent.__reference.CopyFrom(self.__reference)
del parent.__reference.path.element[-1]
return parent
else:
return None
def ToTagUri(self):
"""Returns a tag: URI for this entity for use in XML output.
Foreign keys for entities may be represented in XML output as tag URIs.
RFC 4151 describes the tag URI scheme. From http://taguri.org/:
The tag algorithm lets people mint - create - identifiers that no one
else using the same algorithm could ever mint. It is simple enough to do
in your head, and the resulting identifiers can be easy to read, write,
and remember. The identifiers conform to the URI (URL) Syntax.
Tag URIs for entities use the app's auth domain and the date that the URI
is generated. The namespace-specific part is <kind>[<key>].
For example, here is the tag URI for a Kitten with the key "Fluffy" in the
catsinsinks app:
tag:catsinsinks.googleapps.com,2006-08-29:Kitten[Fluffy]
Raises a BadKeyError if this entity's key is incomplete.
"""
if not self.has_id_or_name():
raise datastore_errors.BadKeyError(
'ToTagUri() called for an entity with an incomplete key.')
return u'tag:%s.%s,%s:%s[%s]' % (
saxutils.escape(EncodeAppIdNamespace(self.app(), self.namespace())),
os.environ['AUTH_DOMAIN'],
datetime.date.today().isoformat(),
saxutils.escape(self.kind()),
saxutils.escape(str(self)))
ToXml = ToTagUri
def entity_group(self):
"""Returns this key's entity group as a Key.
Note that the returned Key will be incomplete if this Key is for a root
entity and it is incomplete.
"""
group = Key._FromPb(self.__reference)
del group.__reference.path.element[1:]
return group
@staticmethod
def _FromPb(pb):
"""Static factory method.
Creates a Key from an entity_pb2.Reference.
Not intended to be used by application developers. Enforced by hiding the
entity_pb2 classes.
Args:
pb: entity_pb2.Reference
Returns:
A datastore_types.Key.
"""
if not isinstance(pb, entity_pb2.Reference):
raise datastore_errors.BadArgumentError(
'Key constructor takes an entity_pb2.Reference; received %s (a %s).' %
(pb, typename(pb)))
key = Key()
key.__reference = entity_pb2.Reference()
key.__reference.CopyFrom(pb)
return key
def _ToPb(self):
"""Converts this Key to its protocol buffer representation.
Not intended to be used by application developers. Enforced by hiding the
entity_pb classes.e
Returns:
# the Reference PB representation of this Key
entity_pb2.Reference
"""
pb = entity_pb2.Reference()
pb.CopyFrom(self.__reference)
return pb
def __str__(self):
"""Encodes this Key as an opaque string.
Returns a string representation of this key, suitable for use in HTML,
URLs, and other similar use cases. If the entity's key is incomplete,
raises a BadKeyError.
Unfortunately, this string encoding isn't particularly compact, and its
length varies with the length of the path. If you want a shorter identifier
and you know the kind and parent (if any) ahead of time, consider using just
the entity's id or name.
Returns:
string
"""
try:
if self._bytes is not None:
return self._bytes.decode('utf-8')
except AttributeError:
pass
if (self.has_id_or_name()):
encoded = base64.urlsafe_b64encode(self.__reference.SerializeToString())
self._bytes = encoded.replace(b'=', b'')
else:
raise datastore_errors.BadKeyError(
'Cannot string encode an incomplete key!\n%s' % self.__reference)
return self._bytes.decode('utf-8')
def __repr__(self):
"""Returns an eval()able string representation of this key.
Returns a Python string of the form 'datastore_types.Key.from_path(...)'
that can be used to recreate this key.
Returns:
string
"""
args = []
for elem in self.__reference.path.element:
args.append(six.text_type(repr(elem.type)))
if elem.HasField('name'):
args.append(six.text_type(repr(elem.name)))
else:
args.append(repr(elem.id))
args.append('_app=%r' % self.__reference.app)
if self.__reference.HasField('name_space'):
args.append('namespace=%r' % six.ensure_text(self.__reference.name_space))
return u'datastore_types.Key.from_path(%s)' % ', '.join(args)
def __cmp__(self, other):
"""Returns negative, zero, or positive when comparing two keys.
TODO: for API v2, we should change this to make incomplete keys, ie
keys without an id or name, not equal to any other keys.
Args:
other: Key to compare to.
Returns:
Negative if self is less than "other"
Zero if "other" is equal to self
Positive if self is greater than "other"
"""
if not isinstance(other, Key):
return -2
self_args = [self.__reference.app, self.__reference.name_space]
self_args += self.to_path(_default_id=0, _decode=False)
other_args = [other.__reference.app, other.__reference.name_space]
other_args += other.to_path(_default_id=0, _decode=False)
for self_component, other_component in zip(self_args, other_args):
comparison = cmp_compat.cmp(self_component, other_component)
if comparison != 0:
return comparison
return cmp_compat.cmp(len(self_args), len(other_args))
def __hash__(self):
"""Returns an integer hash of this key.
Implements Python's hash protocol so that Keys may be used in sets and as
dictionary keys.
Returns:
int
"""
args = self.to_path(_default_id=0, _fail=False)
args.append(self.__reference.app)
return hash(type(args)) ^ hash(tuple(args))
class _OverflowDateTime(_PREFERRED_NUM_TYPE):
"""Container for GD_WHEN values that don't fit into a datetime.datetime.
This class only exists to safely round-trip GD_WHEN values that are too large
to fit in a datetime.datetime instance e.g. that were created by Java
applications. It should not be created directly.
"""
pass
def _EmptyList(val):
if val is not None:
raise datastore_errors.BadValueError('value should be None.')
return []
def _When(val):
"""Coverts a GD_WHEN value to the appropriate type."""
try:
return _EPOCH + datetime.timedelta(microseconds=val)
except OverflowError:
return _OverflowDateTime(val)
class Category(six.text_type):
"""A tag, ie a descriptive word or phrase. Entities may be tagged by users,
and later returned by a queries for that tag. Tags can also be used for
ranking results (frequency), photo captions, clustering, activity, etc.
Here's a more in-depth description: http://www.zeldman.com/daily/0405d.shtml
This is the Atom "category" element. In XML output, the tag is provided as
the term attribute. See:
http://www.atomenabled.org/developers/syndication/#category
Raises BadValueError if tag is not a string or subtype.
"""
TERM = 'user-tag'
def __init__(self, tag):
super(Category, self).__init__()
ValidateString(tag, 'tag')
def ToXml(self):
return u'<category term="%s" label=%s />' % (Category.TERM,
saxutils.quoteattr(self))
class Link(six.text_type):
"""A fully qualified URL. Usually http: scheme, but may also be file:, ftp:,
news:, among others.
If you have email (mailto:) or instant messaging (aim:, xmpp:) links,
consider using the Email or IM classes instead.
This is the Atom "link" element. In XML output, the link is provided as the
href attribute. See:
http://www.atomenabled.org/developers/syndication/#link
Raises BadValueError if link is not a fully qualified, well-formed URL.
"""
def __init__(self, link):
super(Link, self).__init__()
ValidateString(link, 'link', max_len=_MAX_LINK_PROPERTY_LENGTH)
scheme, domain, path, _, _, _ = urllib.parse.urlparse(link)
if (not scheme or (scheme != 'file' and not domain) or
(scheme == 'file' and not path)):
raise datastore_errors.BadValueError('Invalid URL: %s' % link)
def ToXml(self):
return u'<link href=%s />' % saxutils.quoteattr(self)
class Email(six.text_type):
"""An RFC2822 email address. Makes no attempt at validation; apart from
checking MX records, email address validation is a rathole.
This is the gd:email element. In XML output, the email address is provided as
the address attribute. See:
https://developers.google.com/gdata/docs/1.0/elements#gdEmail
Raises BadValueError if email is not a valid email address.
"""
def __init__(self, email):
super(Email, self).__init__()
ValidateString(email, 'email')
def ToXml(self):
return u'<gd:email address=%s />' % saxutils.quoteattr(self)
@cmp_compat.total_ordering_from_cmp
class GeoPt(object):
"""A geographical point, specified by floating-point latitude and longitude
coordinates. Often used to integrate with mapping sites like Google Maps.
May also be used as ICBM coordinates.
This is the georss:point element. In XML output, the coordinates are
provided as the lat and lon attributes. See: http://georss.org/
Serializes to '<lat>,<lon>'. Raises BadValueError if it's passed an invalid
serialized string, or if lat and lon are not valid floating points in the
ranges [-90, 90] and [-180, 180], respectively.
"""
lat = None
lon = None
def __init__(self, lat, lon=None):
if lon is None:
try:
split = lat.split(',')
lat, lon = split
except (AttributeError, ValueError):
raise datastore_errors.BadValueError(
'Expected a "lat,long" formatted string; received %s (a %s).' %
(lat, typename(lat)))
try:
lat = float(lat)
lon = float(lon)
if abs(lat) > 90:
raise datastore_errors.BadValueError(
'Latitude must be between -90 and 90; received %f' % lat)
if abs(lon) > 180:
raise datastore_errors.BadValueError(
'Longitude must be between -180 and 180; received %f' % lon)
except (TypeError, ValueError):
raise datastore_errors.BadValueError(
'Expected floats for lat and long; received %s (a %s) and %s (a %s).' %
(lat, typename(lat), lon, typename(lon)))
self.lat = lat
self.lon = lon
def __cmp__(self, other):
"""Returns negative, zero, or positive when comparing two GeoPts."""
if not isinstance(other, GeoPt):
try:
other = GeoPt(other)
except datastore_errors.BadValueError:
return NotImplemented
lat_cmp = cmp_compat.cmp(self.lat, other.lat)
if lat_cmp != 0:
return lat_cmp
else:
return cmp_compat.cmp(self.lon, other.lon)
def __hash__(self):
"""Returns an integer hash of this point.
Implements Python's hash protocol so that GeoPts may be used in sets and
as dictionary keys.
Returns:
int
"""
return hash((self.lat, self.lon))
def __repr__(self):
"""Returns an eval()able string representation of this GeoPt.
The returned string is of the form 'datastore_types.GeoPt([lat], [lon])'.
Returns:
string
"""
return 'datastore_types.GeoPt(%r, %r)' % (self.lat, self.lon)
def __unicode__(self):
return u'%s,%s' % (six.text_type(self.lat),
six.text_type(self.lon))
__str__ = __unicode__
def ToXml(self):
return u'<georss:point>%s %s</georss:point>' % (six.text_type(
self.lat), six.text_type(self.lon))
@cmp_compat.total_ordering_from_cmp
class IM(object):
"""An instant messaging handle. Includes both an address and its protocol.
The protocol value is either a standard IM scheme or a URL identifying the
IM network for the protocol. Possible values include:
Value Description
sip SIP/SIMPLE
unknown Unknown or unspecified
xmpp XMPP/Jabber
http://aim.com/ AIM
http://icq.com/ ICQ
http://talk.google.com/ Google Talk
http://messenger.msn.com/ MSN Messenger
http://messenger.yahoo.com/ Yahoo Messenger
http://sametime.com/ Lotus Sametime
http://gadu-gadu.pl/ Gadu-Gadu
This is the gd:im element. In XML output, the address and protocol are
provided as the address and protocol attributes, respectively. See:
https://developers.google.com/gdata/docs/1.0/elements#gdIm
Serializes to '<protocol> <address>'. Raises BadValueError if tag is not a
standard IM scheme or a URL.
"""
PROTOCOLS = [ 'sip', 'unknown', 'xmpp' ]
protocol = None
address = None
def __init__(self, protocol, address=None):
if address is None:
try:
split = protocol.split(' ', 1)
protocol, address = split
except (AttributeError, ValueError):
raise datastore_errors.BadValueError(
'Expected string of format "protocol address"; received %s' %
(protocol,))
ValidateString(address, 'address')
if protocol not in self.PROTOCOLS:
Link(protocol)
self.address = address
self.protocol = protocol
def __cmp__(self, other):
"""Returns negative, zero, or positive when comparing two IMs."""
if not isinstance(other, IM):
try:
other = IM(other)
except datastore_errors.BadValueError:
return NotImplemented
return cmp_compat.cmp((self.address, self.protocol),
(other.address, other.protocol))
def __repr__(self):
"""Returns an eval()able string representation of this IM.
The returned string is of the form:
datastore_types.IM('address', 'protocol')
Returns:
string
"""
return 'datastore_types.IM(%r, %r)' % (self.protocol, self.address)
def __unicode__(self):
return u'%s %s' % (self.protocol, self.address)
__str__ = __unicode__
def ToXml(self):
return (u'<gd:im protocol=%s address=%s />' %
(saxutils.quoteattr(self.protocol),
saxutils.quoteattr(self.address)))
def __len__(self):
return len(six.text_type(self))
class PhoneNumber(six.text_type):
"""A human-readable phone number or address.
No validation is performed. Phone numbers have many different formats -
local, long distance, domestic, international, internal extension, TTY,
VOIP, SMS, and alternative networks like Skype, XFire and Roger Wilco. They
all have their own numbering and addressing formats.
This is the gd:phoneNumber element. In XML output, the phone number is
provided as the text of the element. See:
https://developers.google.com/gdata/docs/1.0/elements#gdPhoneNumber
Raises BadValueError if phone is not a string or subtype.
"""
def __init__(self, phone):
super(PhoneNumber, self).__init__()
ValidateString(phone, 'phone')
def ToXml(self):
return u'<gd:phoneNumber>%s</gd:phoneNumber>' % saxutils.escape(self)
class PostalAddress(six.text_type):
"""A human-readable mailing address. Again, mailing address formats vary
widely, so no validation is performed.
This is the gd:postalAddress element. In XML output, the address is provided
as the text of the element. See:
https://developers.google.com/gdata/docs/1.0/elements#gdPostalAddress
Raises BadValueError if address is not a string or subtype.
"""
def __init__(self, address):
super(PostalAddress, self).__init__()
ValidateString(address, 'address')
def ToXml(self):
return u'<gd:postalAddress>%s</gd:postalAddress>' % saxutils.escape(self)
class Rating(_PREFERRED_NUM_TYPE):
"""A user-provided integer rating for a piece of content. Normalized to a
0-100 scale.
This is the gd:rating element. In XML output, the address is provided
as the text of the element. See:
https://developers.google.com/gdata/docs/1.0/elements#gdRating
Serializes to the decimal string representation of the rating. Raises
BadValueError if the rating is not an integer in the range [0, 100].
"""
MIN = 0
MAX = 100
def __init__(self, rating):
super(Rating, self).__init__()
if isinstance(rating, float) or isinstance(rating, complex):
raise datastore_errors.BadValueError(
'Expected int or long; received %s (a %s).' %
(rating, typename(rating)))
try:
if (_PREFERRED_NUM_TYPE(rating) < Rating.MIN
or _PREFERRED_NUM_TYPE(rating) > Rating.MAX):
raise datastore_errors.BadValueError()
except ValueError:
raise datastore_errors.BadValueError(
'Expected int or long; received %s (a %s).' %
(rating, typename(rating)))
def ToXml(self):
return (u'<gd:rating value="%d" min="%d" max="%d" />' %
(self, Rating.MIN, Rating.MAX))
class Text(six.text_type):
"""A long string type.
Strings of any length can be stored in the datastore using this
type. It behaves identically to the Python unicode type, except for
the constructor, which only accepts str and unicode arguments.
"""
def __new__(cls, arg=None, encoding=None):
"""Constructor.
We only accept unicode and str instances, the latter with encoding.
Args:
arg: optional unicode or str instance; default u''
encoding: optional encoding; disallowed when isinstance(arg, unicode),
defaults to 'ascii' when isinstance(arg, str);
"""
if arg is None:
arg = u''
if isinstance(arg, six.text_type):
if encoding is not None:
raise TypeError('Text() with a unicode argument '
'should not specify an encoding')
return super(Text, cls).__new__(cls, arg)
if isinstance(arg, bytes):
if encoding is None:
encoding = 'ascii'
return super(Text, cls).__new__(cls, arg, encoding)
raise TypeError('Text() argument should be str or unicode, not %s' %
type(arg).__name__)
class _BaseByteType(bytes):
"""A base class for datastore types that are encoded as bytes.
This behaves identically to the Python bytes type, except for the
constructor, which only accepts bytes arguments.
"""
def __new__(cls, arg=None):
"""Constructor.
We only accept bytes instances.
Args:
arg: optional bytes instance (default b'')
"""
if arg is None:
arg = b''
if isinstance(arg, bytes):
return super(_BaseByteType, cls).__new__(cls, arg)
raise TypeError('%s() argument should be bytes instance, not %s' %
(cls.__name__, type(arg).__name__))
def ToXml(self):
"""Output bytes as XML.
Returns:
Base64 encoded version of itself for safe insertion in to an XML document.
"""
encoded = base64.urlsafe_b64encode(self).decode('utf-8')
return saxutils.escape(encoded)
if six.PY3:
def __str__(self):
return self.decode('utf-8')
class Blob(_BaseByteType):
"""A blob type, appropriate for storing binary data of any length.
This behaves identically to the Python bytes type, except for the
constructor, which only accepts bytes arguments.
"""
def __new__(cls, *args, **kwargs):
self = super(Blob, cls).__new__(cls, *args, **kwargs)
self._meaning_uri = None
return self
@property
def meaning_uri(self):
return self._meaning_uri
@meaning_uri.setter
def meaning_uri(self, value):
self._meaning_uri = value
class EmbeddedEntity(_BaseByteType):
"""A proto encoded EntityProto.
This behaves identically to Blob, except for the
constructor, which accepts a bytes or EntityProto argument.
Can be decoded using datastore.Entity.FromProto(), db.model_from_protobuf() or
ndb.LocalStructuredProperty.
"""
def __new__(cls, arg=None):
"""Constructor.
Args:
arg: optional str or EntityProto instance (default '')
"""
if isinstance(arg, entity_pb2.EntityProto):
arg = arg.SerializePartialToString()
return super(EmbeddedEntity, cls).__new__(cls, arg)
class ByteString(_BaseByteType):
"""A byte-string type, appropriate for storing short amounts of indexed data.
This behaves identically to Blob, except it's used only for short, indexed
byte strings.
"""
pass
@cmp_compat.total_ordering_from_cmp
class BlobKey(object):
"""Key used to identify a blob in Blobstore.
This object wraps a string that gets used internally by the Blobstore API
to identify application blobs. The BlobKey corresponds to the entity name
of the underlying BlobReference entity.
This class is exposed in the API in both google.appengine.ext.db and
google.appengine.ext.blobstore.
"""
def __init__(self, blob_key):
"""Constructor.
Used to convert a string to a BlobKey. Normally used internally by
Blobstore API.
Args:
blob_key: Key name of BlobReference that this key belongs to.
"""
ValidateString(blob_key, 'blob-key', empty_ok=True)
self.__blob_key = blob_key
def __str__(self):
"""Convert to string."""
return six.ensure_str(self.__blob_key)
def __repr__(self):
"""Returns an eval()able string representation of this key.
Returns a Python string of the form 'datastore_types.BlobKey(...)'
that can be used to recreate this key.
Returns:
string
"""
return 'datastore_types.%s(%r)' % (type(self).__name__, self.__blob_key)
def __cmp__(self, other):
if type(other) is type(self):
return cmp_compat.cmp(str(self), str(other))
elif isinstance(other, six.string_types):
return cmp_compat.cmp(self.__blob_key, other)
else:
return NotImplemented
def __hash__(self):
return hash(self.__blob_key)
def ToXml(self):
return str(self)
_PROPERTY_MEANINGS = {
Blob: entity_pb2.Property.BLOB,
EmbeddedEntity: entity_pb2.Property.ENTITY_PROTO,
ByteString: entity_pb2.Property.BYTESTRING,
Text: entity_pb2.Property.TEXT,
datetime.datetime: entity_pb2.Property.GD_WHEN,
datetime.date: entity_pb2.Property.GD_WHEN,
datetime.time: entity_pb2.Property.GD_WHEN,
_OverflowDateTime: entity_pb2.Property.GD_WHEN,
Category: entity_pb2.Property.ATOM_CATEGORY,
Link: entity_pb2.Property.ATOM_LINK,
Email: entity_pb2.Property.GD_EMAIL,
GeoPt: entity_pb2.Property.GEORSS_POINT,
IM: entity_pb2.Property.GD_IM,
PhoneNumber: entity_pb2.Property.GD_PHONENUMBER,
PostalAddress: entity_pb2.Property.GD_POSTALADDRESS,
Rating: entity_pb2.Property.GD_RATING,
BlobKey: entity_pb2.Property.BLOBKEY,
}
_PROPERTY_TYPES = frozenset([
Blob,
EmbeddedEntity,
ByteString,
bool,
Category,
datetime.datetime,
_OverflowDateTime,
Email,
float,
GeoPt,
IM,
int,
Key,
Link,
_PREFERRED_NUM_TYPE,
PhoneNumber,
PostalAddress,
Rating,
str,
Text,
type(None),
six.text_type,
users.User,
BlobKey,
bytes,
])
_RAW_PROPERTY_TYPES = (Blob, Text, EmbeddedEntity)
_RAW_PROPERTY_MEANINGS = (entity_pb2.Property.BLOB, entity_pb2.Property.TEXT,
entity_pb2.Property.ENTITY_PROTO)
def ValidatePropertyInteger(name, value):
"""Raises an exception if the supplied integer is invalid.
Args:
name: Name of the property this is for.
value: Integer value.
Raises:
OverflowError if the value does not fit within a signed int64.
"""
if not (-0x8000000000000000 <= value <= 0x7fffffffffffffff):
raise OverflowError('%d is out of bounds for int64' % value)
def ValidateStringLength(name, value, max_len):
"""Raises an exception if the supplied string is too long.
Args:
name: Name of the property this is for.
value: String value.
max_len: Maximum length the string may be.
Raises:
OverflowError if the value is larger than the maximum length.
"""
if isinstance(value, six.text_type):
value = value.encode('utf-8')
if len(value) > max_len:
raise datastore_errors.BadValueError(
'Property %s is %d bytes long; it must be %d or less. '
'Consider Text instead, which can store strings of any length.' %
(name, len(value), max_len))
def ValidatePropertyString(name, value):
"""Validates the length of an indexed string property.
Args:
name: Name of the property this is for.
value: String value.
"""
ValidateStringLength(name, value, max_len=_MAX_STRING_LENGTH)
def ValidatePropertyLink(name, value):
"""Validates the length of an indexed Link property.
Args:
name: Name of the property this is for.
value: String value.
"""
ValidateStringLength(name, value, max_len=_MAX_LINK_PROPERTY_LENGTH)
def ValidatePropertyNothing(name, value):
"""No-op validation function.
Args:
name: Name of the property this is for.
value: Not used.
"""
pass
def ValidatePropertyKey(name, value):
"""Raises an exception if the supplied datastore.Key instance is invalid.
Args:
name: Name of the property this is for.
value: A datastore.Key instance.
Raises:
datastore_errors.BadValueError if the value is invalid.
"""
if not value.has_id_or_name():
raise datastore_errors.BadValueError(
'Incomplete key found for reference property %s.' % name)
_VALIDATE_PROPERTY_VALUES = {
Blob: ValidatePropertyNothing,
EmbeddedEntity: ValidatePropertyNothing,
ByteString: ValidatePropertyNothing,
bool: ValidatePropertyNothing,
Category: ValidatePropertyNothing,
datetime.datetime: ValidatePropertyNothing,
_OverflowDateTime: ValidatePropertyInteger,
Email: ValidatePropertyNothing,
float: ValidatePropertyNothing,
GeoPt: ValidatePropertyNothing,
IM: ValidatePropertyNothing,
int: ValidatePropertyInteger,
Key: ValidatePropertyKey,
Link: ValidatePropertyNothing,
_PREFERRED_NUM_TYPE: ValidatePropertyInteger,
PhoneNumber: ValidatePropertyNothing,
PostalAddress: ValidatePropertyNothing,
Rating: ValidatePropertyInteger,
str: ValidatePropertyNothing,
Text: ValidatePropertyNothing,
type(None): ValidatePropertyNothing,
six.text_type: ValidatePropertyNothing,
bytes: ValidatePropertyNothing,
users.User: ValidatePropertyNothing,
BlobKey: ValidatePropertyNothing,
}
_PROPERTY_TYPE_TO_INDEX_VALUE_TYPE = {
six.text_type: bytes,
Blob: bytes,
EmbeddedEntity: bytes,
ByteString: bytes,
bool: bool,
Category: bytes,
datetime.datetime: _PREFERRED_NUM_TYPE,
datetime.date: _PREFERRED_NUM_TYPE,
datetime.time: _PREFERRED_NUM_TYPE,
_OverflowDateTime: _PREFERRED_NUM_TYPE,
Email: six.binary_type,
float: float,
GeoPt: GeoPt,
IM: six.binary_type,
int: _PREFERRED_NUM_TYPE,
Key: Key,
Link: six.binary_type,
_PREFERRED_NUM_TYPE: _PREFERRED_NUM_TYPE,
PhoneNumber: six.binary_type,
PostalAddress: six.binary_type,
Rating: _PREFERRED_NUM_TYPE,
bytes: bytes,
Text: bytes,
type(None): type(None),
users.User: users.User,
BlobKey: bytes,
}
if six.PY2:
_PROPERTY_TYPE_TO_INDEX_VALUE_TYPE[basestring] = bytes
assert set(_VALIDATE_PROPERTY_VALUES.keys()) == _PROPERTY_TYPES
def ValidateProperty(name, values, read_only=False):
"""Helper function for validating property values.
Args:
name: Name of the property this is for.
value: Value for the property as a Python native type.
read_only: deprecated
Raises:
BadPropertyError if the property name is invalid. BadValueError if the
property did not validate correctly or the value was an empty list. Other
exception types (like OverflowError) if the property value does not meet
type-specific criteria.
"""
ValidateString(name, 'property name', datastore_errors.BadPropertyError)
values_type = type(values)
if values_type is tuple:
raise datastore_errors.BadValueError(
'May not use tuple property value; property %s is %s.' %
(name, repr(values)))
if values_type is not list:
values = [values]
try:
for v in values:
prop_validator = _VALIDATE_PROPERTY_VALUES.get(v.__class__)
if prop_validator is None:
raise datastore_errors.BadValueError(
'Unsupported type for property %s: %s' % (name, v.__class__))
prop_validator(name, v)
except (KeyError, ValueError, TypeError, IndexError, AttributeError) as msg:
raise datastore_errors.BadValueError(
'Error type checking values for property %s: %s' % (name, msg))
ValidateReadProperty = ValidateProperty
def PackBlob(name, value, pbvalue):
"""Packs a Blob property into a entity_pb2.PropertyValue.
Args:
name: The name of the property as a string.
value: A Blob instance.
pbvalue: The entity_pbs.PropertyValue to pack this value into.
"""
pbvalue.stringValue = value
def PackString(name, value, pbvalue):
"""Packs a string-typed property into a entity_pb2.PropertyValue.
Args:
name: The name of the property as a string.
value: A string, unicode, or string-like value instance.
pbvalue: The entity_pb2.PropertyValue to pack this value into.
"""
if isinstance(value, bytes):
value.decode('ascii')
pbvalue.stringValue = value
else:
pbvalue.stringValue = six.text_type(value).encode('utf-8')
def PackDatetime(name, value, pbvalue):
"""Packs a datetime-typed property into a entity_pb2.PropertyValue.
Args:
name: The name of the property as a string.
value: A datetime.datetime instance.
pbvalue: The entity_pb2.PropertyValue to pack this value into.
"""
pbvalue.int64Value = DatetimeToTimestamp(value)
def DatetimeToTimestamp(value):
"""Converts a datetime.datetime to microseconds since the epoch, as a float.
Args:
value: datetime.datetime
Returns: value as a long
"""
if value.tzinfo:
value = value.astimezone(UTC)
return _PREFERRED_NUM_TYPE(
calendar.timegm(value.timetuple()) * 1000000) + value.microsecond
def PackGeoPt(name, value, pbvalue):
"""Packs a GeoPt property into a entity_pb2.PropertyValue.
Args:
name: The name of the property as a string.
value: A GeoPt instance.
pbvalue: The entity_pb2.PropertyValue to pack this value into.
"""
pbvalue.pointvalue.x = value.lat
pbvalue.pointvalue.y = value.lon
def PackUser(name, value, pbvalue):
"""Packs a User property into a entity_pb2.PropertyValue.
Args:
name: The name of the property as a string.
value: A users.User instance.
pbvalue: The entity_pb2.PropertyValue to pack this value into.
"""
pbvalue.uservalue.email = value.email().encode('utf-8')
pbvalue.uservalue.auth_domain = value.auth_domain().encode('utf-8')
pbvalue.uservalue.gaiaid = 0
if value.user_id() is not None:
pbvalue.uservalue.obfuscated_gaiaid = value.user_id().encode('utf-8')
if value.federated_identity() is not None:
pbvalue.uservalue.federated_identity = value.federated_identity().encode(
'utf-8')
if value.federated_provider() is not None:
pbvalue.uservalue.federated_provider = value.federated_provider().encode(
'utf-8')
def PackKey(name, value, pbvalue):
"""Packs a reference property into a entity_pb2.PropertyValue.
Args:
name: The name of the property as a string.
value: A Key instance.
pbvalue: The entity_pb2.PropertyValue to pack this value into.
"""
ref = value._Key__reference
pbvalue.referencevalue.app = ref.app
SetNamespace(pbvalue.referencevalue, ref.name_space)
for elem in ref.path.element:
elementCopy = pbvalue.referencevalue.pathelement.add()
datastore_pbs.copy_path_element(elem, elementCopy)
def PackBool(name, value, pbvalue):
"""Packs a boolean property into a entity_pb2.PropertyValue.
Args:
name: The name of the property as a string.
value: A boolean instance.
pbvalue: The entity_pb2.PropertyValue to pack this value into.
"""
pbvalue.booleanValue = value
def PackInteger(name, value, pbvalue):
"""Packs an integer property into a entity_pb2.PropertyValue.
Args:
name: The name of the property as a string.
value: An int or long instance.
pbvalue: The entity_pb2.PropertyValue to pack this value into.
"""
pbvalue.int64Value = value
def PackFloat(name, value, pbvalue):
"""Packs a float property into a entity_pb2.PropertyValue.
Args:
name: The name of the property as a string.
value: A float instance.
pbvalue: The entity_pb2.PropertyValue to pack this value into.
"""
pbvalue.doubleValue = value
_PACK_PROPERTY_VALUES = {
Blob: PackBlob,
EmbeddedEntity: PackBlob,
ByteString: PackBlob,
bool: PackBool,
Category: PackString,
datetime.datetime: PackDatetime,
_OverflowDateTime: PackInteger,
Email: PackString,
float: PackFloat,
GeoPt: PackGeoPt,
IM: PackString,
int: PackInteger,
Key: PackKey,
Link: PackString,
_PREFERRED_NUM_TYPE: PackInteger,
PhoneNumber: PackString,
PostalAddress: PackString,
Rating: PackInteger,
str: PackString,
Text: PackString,
type(None): lambda name, value, pbvalue: pbvalue.ClearField('stringValue'),
six.text_type: PackString,
users.User: PackUser,
BlobKey: PackString,
bytes: PackString,
}
assert set(_PACK_PROPERTY_VALUES.keys()) == _PROPERTY_TYPES
def ToPropertyPb(name, values):
"""Creates type-specific entity_pb2.PropertyValues.
Determines the type and meaning of the PropertyValue based on the Python
type of the input value(s).
NOTE: This function does not validate anything!
Args:
name: string or unicode; the property name
values: The values for this property, either a single one or a list of them.
All values must be a supported type. Lists of values must all be of the
same type.
Returns:
A list of entity_pb2.Property instances.
"""
encoded_name = six.ensure_str(name)
values_type = type(values)
if values_type is list and len(values) == 0:
pb = entity_pb2.Property()
pb.meaning = entity_pb2.Property.EMPTY_LIST
pb.name = encoded_name
pb.multiple = False
pb.value.ClearField('stringValue')
return pb
elif values_type is list:
multiple = True
else:
multiple = False
values = [values]
pbs = []
for v in values:
pb = entity_pb2.Property()
pb.name = encoded_name
pb.multiple = multiple
meaning = _PROPERTY_MEANINGS.get(v.__class__)
if meaning is not None:
pb.meaning = meaning
if hasattr(v, 'meaning_uri') and v.meaning_uri:
pb.meaning_uri = v.meaning_uri
pack_prop = _PACK_PROPERTY_VALUES[v.__class__]
pack_prop(name, v, pb.value)
pbs.append(pb)
if multiple:
return pbs
else:
return pbs[0]
def FromReferenceProperty(value):
"""Converts a reference PropertyValue to a Key.
Args:
value: entity_pb2.PropertyValue
Returns:
Key
Raises:
BadValueError if the value is not a PropertyValue.
"""
assert isinstance(value, entity_pb2.PropertyValue)
assert value.HasField('referencevalue')
ref = value.referencevalue
key = Key()
key_ref = key._Key__reference
key_ref.app = ref.app
SetNamespace(key_ref, ref.name_space)
for pathelem in ref.pathelement:
element = key_ref.path.element.add()
datastore_pbs.copy_path_element(pathelem, element)
return key
_PROPERTY_CONVERSIONS = {
entity_pb2.Property.GD_WHEN: _When,
entity_pb2.Property.ATOM_CATEGORY: Category,
entity_pb2.Property.ATOM_LINK: Link,
entity_pb2.Property.GD_EMAIL: Email,
entity_pb2.Property.GD_IM: IM,
entity_pb2.Property.GD_PHONENUMBER: PhoneNumber,
entity_pb2.Property.GD_POSTALADDRESS: PostalAddress,
entity_pb2.Property.GD_RATING: Rating,
entity_pb2.Property.BLOB: Blob,
entity_pb2.Property.ENTITY_PROTO: EmbeddedEntity,
entity_pb2.Property.BYTESTRING: ByteString,
entity_pb2.Property.TEXT: Text,
entity_pb2.Property.BLOBKEY: BlobKey,
entity_pb2.Property.EMPTY_LIST: _EmptyList,
}
_NON_UTF8_MEANINGS = frozenset(
(entity_pb2.Property.BLOB, entity_pb2.Property.ENTITY_PROTO,
entity_pb2.Property.BYTESTRING, entity_pb2.Property.INDEX_VALUE))
def FromPropertyPb(pb):
"""Converts a property PB to a python value.
Args:
pb: entity_pb2.Property
Returns:
# return type is determined by the type of the argument
string, int, bool, double, users.User, or one of the atom or gd types
"""
pbval = pb.value
meaning = pb.meaning
if pbval.HasField('stringValue'):
value = pbval.stringValue
if not pb.HasField('meaning') or meaning not in _NON_UTF8_MEANINGS:
value = value.decode('utf-8')
elif pbval.HasField('int64Value'):
value = _PREFERRED_NUM_TYPE(pbval.int64Value)
elif pbval.HasField('booleanValue'):
value = bool(pbval.booleanValue)
elif pbval.HasField('doubleValue'):
value = pbval.doubleValue
elif pbval.HasField('referencevalue'):
value = FromReferenceProperty(pbval)
elif pbval.HasField('pointvalue'):
value = GeoPt(pbval.pointvalue.x, pbval.pointvalue.y)
elif pbval.HasField('uservalue'):
email = pbval.uservalue.email
auth_domain = pbval.uservalue.auth_domain
obfuscated_gaiaid = pbval.uservalue.obfuscated_gaiaid
federated_identity = None
if pbval.uservalue.HasField('federated_identity'):
federated_identity = pbval.uservalue.federated_identity
value = users.User(email=email,
_auth_domain=auth_domain,
_user_id=obfuscated_gaiaid,
federated_identity=federated_identity,
_strict_mode=False)
else:
value = None
try:
if pb.HasField('meaning') and meaning in _PROPERTY_CONVERSIONS:
conversion = _PROPERTY_CONVERSIONS[meaning]
value = conversion(value)
if (meaning == entity_pb2.Property.BLOB and pb.HasField('meaning_uri')):
value.meaning_uri = pb.meaning_uri
except (KeyError, ValueError, IndexError, TypeError, AttributeError) as msg:
raise datastore_errors.BadValueError(
'Error converting pb: %s\nException was: %s' % (pb, msg))
return value
def RestoreFromIndexValue(index_value, data_type):
"""Restores a index value to the correct datastore type.
Projection queries return property values directly from a datastore index.
These values are the native datastore values, one of str, bool, long, float,
GeoPt, Key or User. This function restores the original value when the
original type is known.
This function returns the value type returned when decoding a normal entity,
not necessarily of type data_type. For example, data_type=int returns a
long instance.
Args:
index_value: The value returned by FromPropertyPb for the projected
property.
data_type: The type of the value originally given to ToPropertyPb
Returns:
The restored property value.
Raises:
datastore_errors.BadValueError if the value cannot be restored.
"""
raw_type = _PROPERTY_TYPE_TO_INDEX_VALUE_TYPE.get(data_type)
if raw_type is None:
raise datastore_errors.BadValueError(
'Unsupported data type (%r)' % data_type)
if index_value is None:
return index_value
if not isinstance(index_value, raw_type):
raise datastore_errors.BadValueError(
'Unsupported conversion. Expected %r got %r' %
(type(index_value), raw_type))
meaning = _PROPERTY_MEANINGS.get(data_type)
if isinstance(index_value, bytes) and meaning not in _NON_UTF8_MEANINGS:
index_value = six.text_type(index_value, 'utf-8')
conv = _PROPERTY_CONVERSIONS.get(meaning)
if not conv:
return index_value
try:
value = conv(index_value)
except (KeyError, ValueError, IndexError, TypeError, AttributeError) as msg:
raise datastore_errors.BadValueError(
'Error converting value: %r\nException was: %s' % (index_value, msg))
return value
def PropertyTypeName(value):
"""Returns the name of the type of the given property value, as a string.
Raises BadValueError if the value is not a valid property type.
Args:
value: any valid property value
Returns:
string
"""
if value.__class__ in _PROPERTY_MEANINGS:
meaning = _PROPERTY_MEANINGS[value.__class__]
name = entity_pb2.Property.Meaning.DESCRIPTOR.values_by_number[meaning].name
return name.lower().replace('_', ':')
elif isinstance(value, six.string_types):
return 'string'
elif isinstance(value, users.User):
return 'user'
elif isinstance(value, bool):
return 'bool'
elif isinstance(value, _PREFERRED_NUM_TYPE):
return 'int'
elif value is None:
return 'null'
else:
return typename(value).lower()
_PROPERTY_TYPE_STRINGS = {
'string': six.text_type,
'bool': bool,
'int': _PREFERRED_NUM_TYPE,
'null': type(None),
'float': float,
'key': Key,
'blob': Blob,
'entity:proto': EmbeddedEntity,
'bytestring': ByteString,
'text': Text,
'user': users.User,
'atom:category': Category,
'atom:link': Link,
'gd:email': Email,
'gd:when': datetime.datetime,
'georss:point': GeoPt,
'gd:im': IM,
'gd:phonenumber': PhoneNumber,
'gd:postaladdress': PostalAddress,
'gd:rating': Rating,
'blobkey': BlobKey,
}
def FromPropertyTypeName(type_name):
"""Returns the python type given a type name.
Args:
type_name: A string representation of a datastore type name.
Returns:
A python type.
"""
return _PROPERTY_TYPE_STRINGS[type_name]
def PropertyValueFromString(type_,
value_string,
_auth_domain=None):
"""Returns an instance of a property value given a type and string value.
The reverse of this method is just str() and type() of the python value.
Note that this does *not* support non-UTC offsets in ISO 8601-formatted
datetime strings, e.g. the -08:00 suffix in '2002-12-25 00:00:00-08:00'.
It only supports -00:00 and +00:00 suffixes, which are UTC.
Args:
type_: A python class.
value_string: A string representation of the value of the property.
Returns:
An instance of 'type'.
Raises:
ValueError if type_ is datetime and value_string has a timezone offset.
"""
if type_ == datetime.datetime:
value_string = value_string.strip()
if value_string[-6] in ('+', '-'):
if value_string[-5:] == '00:00':
value_string = value_string[:-6]
else:
raise ValueError('Non-UTC offsets in datetimes are not supported.')
split = value_string.split('.')
iso_date = split[0]
microseconds = 0
if len(split) > 1:
microseconds = int(split[1])
time_struct = time.strptime(iso_date, '%Y-%m-%d %H:%M:%S')[0:6]
value = datetime.datetime(*(time_struct + (microseconds,)))
return value
elif type_ == Rating:
return Rating(int(value_string))
elif type_ == bool:
return value_string == 'True'
elif type_ == users.User:
return users.User(value_string, _auth_domain)
elif type_ == type(None):
return None
elif type_ in (Blob, EmbeddedEntity, ByteString):
return type_(value_string.encode('utf-8'))
return type_(value_string)
def ReferenceToKeyValue(key, id_resolver=None):
"""Converts a key into a comparable hashable "key" value.
Args:
key: The entity_pb2.Reference or googledatastore.Key from which to construct
the key value.
id_resolver: An optional datastore_pbs.IdResolver. Only necessary for
googledatastore.Key values.
Returns:
A comparable and hashable representation of the given key that is
compatible with one derived from a key property value.
"""
if (datastore_pbs._CLOUD_DATASTORE_ENABLED
and isinstance(key, googledatastore.Key)):
v1_key = key
key = entity_pb2.Reference()
datastore_pbs.get_entity_converter(id_resolver).v1_to_v3_reference(v1_key,
key)
elif isinstance(key, entity_v4_pb2.Key):
v4_key = key
key = entity_pb2.Reference()
datastore_pbs.get_entity_converter().v4_to_v3_reference(v4_key, key)
if isinstance(key, entity_pb2.Reference):
element_list = key.path.element
elif isinstance(key, entity_pb2.PropertyValue.ReferenceValue):
element_list = key.pathelement
else:
raise datastore_errors.BadArgumentError(
'key arg expected to be entity_pb2.Reference or googledatastore.Key (%r)'
% (key,))
result = [
entity_pb2.PropertyValue.REFERENCEVALUE_FIELD_NUMBER, key.app,
key.name_space
]
for element in element_list:
result.append(element.type)
if element.HasField('name'):
result.append(element.name)
else:
result.append(element.id)
return tuple(result)
def _isFloatNegative(value, encoded):
if value == 0:
return encoded[0] == 128
return value < 0
def _encodeDoubleSortably(value):
"""Encode a double into a sortable byte buffer."""
encoded = array.array('B')
encoded.fromstring(struct.pack('>d', value))
if _isFloatNegative(value, encoded):
encoded[0] ^= 0xFF
encoded[1] ^= 0xFF
encoded[2] ^= 0xFF
encoded[3] ^= 0xFF
encoded[4] ^= 0xFF
encoded[5] ^= 0xFF
encoded[6] ^= 0xFF
encoded[7] ^= 0xFF
else:
encoded[0] ^= 0x80
return encoded
def PropertyValueToKeyValue(prop_value):
"""Converts a entity_pb2.PropertyValue into a comparable hashable "key" value.
The values produces by this function mimic the native ording of the datastore
and uniquely identify the given PropertyValue.
Args:
prop_value: The entity_pb2.PropertyValue from which to construct the key
value.
Returns:
A comparable and hashable representation of the given property value.
"""
if not isinstance(prop_value, entity_pb2.PropertyValue):
raise datastore_errors.BadArgumentError(
'prop_value arg expected to be entity_pb2.PropertyValue (%r)' %
(prop_value,))
if prop_value.HasField('stringValue'):
return (entity_pb2.PropertyValue.STRINGVALUE_FIELD_NUMBER,
prop_value.stringValue)
if prop_value.HasField('int64Value'):
return (entity_pb2.PropertyValue.INT64VALUE_FIELD_NUMBER,
prop_value.int64Value)
if prop_value.HasField('booleanValue'):
return (entity_pb2.PropertyValue.BOOLEANVALUE_FIELD_NUMBER,
prop_value.booleanValue)
if prop_value.HasField('doubleValue'):
return (entity_pb2.PropertyValue.DOUBLEVALUE_FIELD_NUMBER,
tuple(sortable_pb_encoder.EncodeDouble(prop_value.doubleValue)))
if prop_value.HasField('pointvalue'):
return (entity_pb2.PropertyValue.POINTVALUE_FIELD_NUMBER,
prop_value.pointvalue.x, prop_value.pointvalue.y)
if prop_value.HasField('referencevalue'):
return ReferenceToKeyValue(prop_value.referencevalue)
if prop_value.HasField('uservalue'):
result = []
uservalue = prop_value.uservalue
if uservalue.HasField('email'):
result.append((entity_pb2.PropertyValue.UserValue.EMAIL_FIELD_NUMBER,
uservalue.email))
if uservalue.HasField('auth_domain'):
result.append(
(entity_pb2.PropertyValue.UserValue.AUTH_DOMAIN_FIELD_NUMBER,
uservalue.auth_domain))
if uservalue.HasField('nickname'):
result.append((entity_pb2.PropertyValue.UserValue.NICKNAME_FIELD_NUMBER,
uservalue.nickname))
if uservalue.HasField('gaiaid'):
result.append((entity_pb2.PropertyValue.UserValue.GAIAID_FIELD_NUMBER,
uservalue.gaiaid))
if uservalue.HasField('obfuscated_gaiaid'):
result.append(
(entity_pb2.PropertyValue.UserValue.OBFUSCATED_GAIAID_FIELD_NUMBER,
uservalue.obfuscated_gaiaid))
if uservalue.HasField('federated_identity'):
result.append(
(entity_pb2.PropertyValue.UserValue.FEDERATED_IDENTITY_FIELD_NUMBER,
uservalue.federated_identity))
if uservalue.HasField('federated_provider'):
result.append(
(entity_pb2.PropertyValue.UserValue.FEDERATED_PROVIDER_FIELD_NUMBER,
uservalue.federated_provider))
result.sort()
return (entity_pb2.PropertyValue.USERVALUE_FIELD_NUMBER, tuple(result))
return ()
def GetPropertyValueTag(value_pb):
"""Returns the tag constant associated with the given entity_pb2.PropertyValue."""
if value_pb.HasField('booleanValue'):
return entity_pb2.PropertyValue.BOOLEANVALUE_FIELD_NUMBER
elif value_pb.HasField('doubleValue'):
return entity_pb2.PropertyValue.DOUBLEVALUE_FIELD_NUMBER
elif value_pb.HasField('int64Value'):
return entity_pb2.PropertyValue.INT64VALUE_FIELD_NUMBER
elif value_pb.HasField('pointvalue'):
return entity_pb2.PropertyValue.POINTVALUE_FIELD_NUMBER
elif value_pb.HasField('referencevalue'):
return entity_pb2.PropertyValue.REFERENCEVALUE_FIELD_NUMBER
elif value_pb.HasField('stringValue'):
return entity_pb2.PropertyValue.STRINGVALUE_FIELD_NUMBER
elif value_pb.HasField('uservalue'):
return entity_pb2.PropertyValue.USERVALUE_FIELD_NUMBER
else:
return 0
|
py | b402ac62e6203d079bd61007391dd77cb62e2b0a | import django_filters
from django_filters import DateFilter, CharFilter
from .models import *
class FoodFilter(django_filters.FilterSet):
# start_date = DateFilter(field_name="date_created", lookup_expr='gte')
# end_date = DateFilter(field_name="date_created", lookup_expr='lte')
# note = CharFilter(field_name='note', lookup_expr='icontains')
class Meta:
model = Food
fields = '__all__'
#exclude = ['customer', 'date_created'] |
py | b402ad3415f8de9e1402997dc46f8ccf82f0e13c | #!/usr/bin/env python
#
# Copyright (C) 2017 Andrew Chow
import util
import sys
from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QLineEdit, QMessageBox, QInputDialog, QGroupBox, QHBoxLayout, QVBoxLayout, QGridLayout, QLabel
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import pyqtSlot, Qt
class App(QWidget):
def __init__(self):
super().__init__()
self.title = 'Bitcoin Payment Protocol Interface'
self.left = 10
self.top = 10
self.width = 700
self.height = 500
self.initUI()
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.uri_box = QLineEdit(self)
go_button = QPushButton('Go!', self)
go_button.clicked.connect(self.handle_entered_uri)
self.main_box = QGroupBox("Bitcoin Payment Protocol Interface")
main_layout = QGridLayout()
main_layout.addWidget(QLabel("Bitcoin URI:"), 0, 0)
main_layout.addWidget(self.uri_box, 0, 1)
main_layout.addWidget(go_button, 0, 2)
self.payment_data_box = QGroupBox()
main_layout.addWidget(self.payment_data_box, 1, 1)
self.main_box.setLayout(main_layout)
windowLayout = QVBoxLayout()
windowLayout.addWidget(self.main_box)
self.setLayout(windowLayout)
self.show()
def display_pr(self, pr):
if pr.error:
print(pr.error)
exit()
else:
pr.verify()
self.payment_data_box.setTitle("Payment Request Data")
pr_data_layout = QGridLayout()
pr_data_layout.addWidget(QLabel("Network:"), 0, 0)
network_lbl = QLabel(pr.details.network)
network_lbl.setTextInteractionFlags(Qt.TextSelectableByMouse)
pr_data_layout.addWidget(network_lbl, 0, 1)
pr_data_layout.addWidget(QLabel("Requestor:"), 1, 0)
requestor_lbl = QLabel(pr.get_requestor())
requestor_lbl.setTextInteractionFlags(Qt.TextSelectableByMouse)
pr_data_layout.addWidget(requestor_lbl, 1, 1)
pr_data_layout.addWidget(QLabel("Memo:"), 2, 0)
memo_lbl = QLabel(pr.get_memo())
memo_lbl.setTextInteractionFlags(Qt.TextSelectableByMouse)
pr_data_layout.addWidget(memo_lbl, 2, 1)
pr_data_layout.addWidget(QLabel("Expiration:"), 3, 0)
expire_lbl = QLabel(util.format_time(pr.get_expiration_date()))
expire_lbl.setTextInteractionFlags(Qt.TextSelectableByMouse)
pr_data_layout.addWidget(expire_lbl, 3, 1)
pr_data_layout.addWidget(QLabel("Creation Time:"), 4, 0)
creation_lbl = QLabel(util.format_time(pr.details.time))
creation_lbl.setTextInteractionFlags(Qt.TextSelectableByMouse)
pr_data_layout.addWidget(creation_lbl, 4, 1)
pr_data_layout.addWidget(QLabel("Verification status:"), 5, 0)
verification_lbl = QLabel(pr.get_verify_status())
verification_lbl.setTextInteractionFlags(Qt.TextSelectableByMouse)
pr_data_layout.addWidget(verification_lbl, 5, 1)
pr_data_layout.addWidget(QLabel("Merchant Data:"), 6, 0)
merch_lbl = QLabel(str(pr.details.merchant_data))
merch_lbl.setTextInteractionFlags(Qt.TextSelectableByMouse)
pr_data_layout.addWidget(merch_lbl, 6, 1)
pr_data_layout.addWidget(QLabel("Outputs:"), 7, 0)
i = 0
for out in pr.get_outputs():
type_lbl = QLabel()
if out[0] == util.TYPE_ADDRESS:
pr_data_layout.addWidget(QLabel(" Type:"), 8 + i, 0)
type_lbl.setText("Address")
pr_data_layout.addWidget(QLabel(" Address:"), 8 + i + 1, 0)
elif out[0] == util.TYPE_PUBKEY:
pr_data_layout.addWidget(QLabel(" Type:"), 8 + i, 0)
type_lbl.setText("Public Key")
pr_data_layout.addWidget(QLabel(" Public Key:"), 8 + i + 1, 0)
elif out[0] == util.TYPE_SCRIPT:
pr_data_layout.addWidget(QLabel(" Type:"), 8 + i, 0)
type_lbl.setText("Script")
pr_data_layout.addWidget(QLabel(" Script:"), 8 + i + 1, 0)
else:
pr_data_layout.addWidget(QLabel(" Type:"), 8 + i, 0)
type_lbl.setText("Unknown")
pr_data_layout.addWidget(QLabel(" Data:"), 8 + i + 1, 0)
type_lbl.setTextInteractionFlags(Qt.TextSelectableByMouse)
pr_data_layout.addWidget(type_lbl, 8 + i, 1)
data_lbl = QLabel(out[1])
data_lbl.setTextInteractionFlags(Qt.TextSelectableByMouse)
pr_data_layout.addWidget(data_lbl, 8 + i + 1, 1)
amt_lbl = QLabel(util.format_satoshis(out[2]) + " BTC")
amt_lbl.setTextInteractionFlags(Qt.TextSelectableByMouse)
pr_data_layout.addWidget(QLabel(" Amount:"), 8 + i + 2, 0)
pr_data_layout.addWidget(amt_lbl, 8 + i + 2, 1)
i += 3
next_button = QPushButton("Next")
next_button.clicked.connect(self.make_further_instructions(pr))
pr_data_layout.addWidget(next_button, 8 + i, 0)
self.payment_data_box.setLayout(pr_data_layout)
@pyqtSlot()
def handle_entered_uri(self):
uri = self.uri_box.text().strip()
util.parse_URI(uri, self.display_pr)
def make_further_instructions(self, pr):
def further_instructions():
response = QMessageBox.information(self, "Next Step", "To continue, send the necessary amounts of Bitcoin to the addresses specified in the 'Outputs' field above. Once broadcast, press Yes to Continue or Cancel to quit.", QMessageBox.Cancel | QMessageBox.Yes, QMessageBox.Cancel)
if response == QMessageBox.Cancel:
sys.exit()
elif response == QMessageBox.Yes:
if pr.details.payment_url:
raw_tx, okPressed1 = QInputDialog.getText(self, "Enter Raw Transaction","Enter the hex of the transaction that was just made:", QLineEdit.Normal, "")
if okPressed1 and raw_tx != '':
ref_addr, okPressed2 = QInputDialog.getText(self, "Enter Refund Address","Enter a refund address:", QLineEdit.Normal, "")
if okPressed2 and ref_addr != '':
try:
result = pr.send_ack(raw_tx.strip(), ref_addr.strip())
if result[0]:
QMessageBox.information(self, "Complete!", "Payment request successful: " + result[1] + "\n\nClick Ok to exit", QMessageBox.Ok, QMessageBox.Ok)
sys.exit()
else:
QMessageBox.error(self, "Error!", "Payment request was not successful: " + result[1] + "\n\nClick Ok to exit", QMessageBox.Ok, QMessageBox.Ok)
sys.exit()
except:
QMessageBox.error(self, "Error!", "There was an error parsing the raw transaction or address. Please restart and try again.\n\nClick Ok to exit", QMessageBox.Ok, QMessageBox.Ok)
sys.exit()
return further_instructions
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
sys.exit(app.exec_())
|
py | b402ae63c7e270ae78f59bbb0957a27d3c8f000f | # encoding: utf-8
from __future__ import unicode_literals
import mimetypes
import os
import re
import sys
from tempfile import NamedTemporaryFile
from unicodedata import normalize
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
import requests
from twitter import TwitterError
import twitter
if sys.version_info < (3,):
range = xrange
if sys.version_info > (3,):
unicode = str
CHAR_RANGES = [
range(0, 4351),
range(8192, 8205),
range(8208, 8223),
range(8242, 8247)]
TLDS = [
"ac", "ad", "ae", "af", "ag", "ai", "al", "am", "an", "ao", "aq", "ar",
"as", "at", "au", "aw", "ax", "az", "ba", "bb", "bd", "be", "bf", "bg",
"bh", "bi", "bj", "bl", "bm", "bn", "bo", "bq", "br", "bs", "bt", "bv",
"bw", "by", "bz", "ca", "cc", "cd", "cf", "cg", "ch", "ci", "ck", "cl",
"cm", "cn", "co", "cr", "cu", "cv", "cw", "cx", "cy", "cz", "de", "dj",
"dk", "dm", "do", "dz", "ec", "ee", "eg", "eh", "er", "es", "et", "eu",
"fi", "fj", "fk", "fm", "fo", "fr", "ga", "gb", "gd", "ge", "gf", "gg",
"gh", "gi", "gl", "gm", "gn", "gp", "gq", "gr", "gs", "gt", "gu", "gw",
"gy", "hk", "hm", "hn", "hr", "ht", "hu", "id", "ie", "il", "im", "in",
"io", "iq", "ir", "is", "it", "je", "jm", "jo", "jp", "ke", "kg", "kh",
"ki", "km", "kn", "kp", "kr", "kw", "ky", "kz", "la", "lb", "lc", "li",
"lk", "lr", "ls", "lt", "lu", "lv", "ly", "ma", "mc", "md", "me", "mf",
"mg", "mh", "mk", "ml", "mm", "mn", "mo", "mp", "mq", "mr", "ms", "mt",
"mu", "mv", "mw", "mx", "my", "mz", "na", "nc", "ne", "nf", "ng", "ni",
"nl", "no", "np", "nr", "nu", "nz", "om", "pa", "pe", "pf", "pg", "ph",
"pk", "pl", "pm", "pn", "pr", "ps", "pt", "pw", "py", "qa", "re", "ro",
"rs", "ru", "rw", "sa", "sb", "sc", "sd", "se", "sg", "sh", "si", "sj",
"sk", "sl", "sm", "sn", "so", "sr", "ss", "st", "su", "sv", "sx", "sy",
"sz", "tc", "td", "tf", "tg", "th", "tj", "tk", "tl", "tm", "tn", "to",
"tp", "tr", "tt", "tv", "tw", "tz", "ua", "ug", "uk", "um", "us", "uy",
"uz", "va", "vc", "ve", "vg", "vi", "vn", "vu", "wf", "ws", "ye", "yt",
"za", "zm", "zw", "ελ", "бел", "мкд", "мон", "рф", "срб", "укр", "қаз",
"հայ", "الاردن", "الجزائر", "السعودية", "المغرب", "امارات", "ایران", "بھارت",
"تونس", "سودان", "سورية", "عراق", "عمان", "فلسطين", "قطر", "مصر",
"مليسيا", "پاکستان", "भारत", "বাংলা", "ভারত", "ਭਾਰਤ", "ભારત",
"இந்தியா", "இலங்கை", "சிங்கப்பூர்", "భారత్", "ලංකා", "ไทย",
"გე", "中国", "中國", "台湾", "台灣", "新加坡", "澳門", "香港", "한국", "neric:",
"abb", "abbott", "abogado", "academy", "accenture", "accountant",
"accountants", "aco", "active", "actor", "ads", "adult", "aeg", "aero",
"afl", "agency", "aig", "airforce", "airtel", "allfinanz", "alsace",
"amsterdam", "android", "apartments", "app", "aquarelle", "archi", "army",
"arpa", "asia", "associates", "attorney", "auction", "audio", "auto",
"autos", "axa", "azure", "band", "bank", "bar", "barcelona", "barclaycard",
"barclays", "bargains", "bauhaus", "bayern", "bbc", "bbva", "bcn", "beer",
"bentley", "berlin", "best", "bet", "bharti", "bible", "bid", "bike",
"bing", "bingo", "bio", "biz", "black", "blackfriday", "bloomberg", "blue",
"bmw", "bnl", "bnpparibas", "boats", "bond", "boo", "boots", "boutique",
"bradesco", "bridgestone", "broker", "brother", "brussels", "budapest",
"build", "builders", "business", "buzz", "bzh", "cab", "cafe", "cal",
"camera", "camp", "cancerresearch", "canon", "capetown", "capital",
"caravan", "cards", "care", "career", "careers", "cars", "cartier",
"casa", "cash", "casino", "cat", "catering", "cba", "cbn", "ceb", "center",
"ceo", "cern", "cfa", "cfd", "chanel", "channel", "chat", "cheap",
"chloe", "christmas", "chrome", "church", "cisco", "citic", "city",
"claims", "cleaning", "click", "clinic", "clothing", "cloud", "club",
"coach", "codes", "coffee", "college", "cologne", "com", "commbank",
"community", "company", "computer", "condos", "construction", "consulting",
"contractors", "cooking", "cool", "coop", "corsica", "country", "coupons",
"courses", "credit", "creditcard", "cricket", "crown", "crs", "cruises",
"cuisinella", "cymru", "cyou", "dabur", "dad", "dance", "date", "dating",
"datsun", "day", "dclk", "deals", "degree", "delivery", "delta",
"democrat", "dental", "dentist", "desi", "design", "dev", "diamonds",
"diet", "digital", "direct", "directory", "discount", "dnp", "docs",
"dog", "doha", "domains", "doosan", "download", "drive", "durban", "dvag",
"earth", "eat", "edu", "education", "email", "emerck", "energy",
"engineer", "engineering", "enterprises", "epson", "equipment", "erni",
"esq", "estate", "eurovision", "eus", "events", "everbank", "exchange",
"expert", "exposed", "express", "fage", "fail", "faith", "family", "fan",
"fans", "farm", "fashion", "feedback", "film", "finance", "financial",
"firmdale", "fish", "fishing", "fit", "fitness", "flights", "florist",
"flowers", "flsmidth", "fly", "foo", "football", "forex", "forsale",
"forum", "foundation", "frl", "frogans", "fund", "furniture", "futbol",
"fyi", "gal", "gallery", "game", "garden", "gbiz", "gdn", "gent",
"genting", "ggee", "gift", "gifts", "gives", "giving", "glass", "gle",
"global", "globo", "gmail", "gmo", "gmx", "gold", "goldpoint", "golf",
"goo", "goog", "google", "gop", "gov", "graphics", "gratis", "green",
"gripe", "group", "guge", "guide", "guitars", "guru", "hamburg", "hangout",
"haus", "healthcare", "help", "here", "hermes", "hiphop", "hitachi", "hiv",
"hockey", "holdings", "holiday", "homedepot", "homes", "honda", "horse",
"host", "hosting", "hoteles", "hotmail", "house", "how", "hsbc", "ibm",
"icbc", "ice", "icu", "ifm", "iinet", "immo", "immobilien", "industries",
"infiniti", "info", "ing", "ink", "institute", "insure", "int",
"international", "investments", "ipiranga", "irish", "ist", "istanbul",
"itau", "iwc", "java", "jcb", "jetzt", "jewelry", "jlc", "jll", "jobs",
"joburg", "jprs", "juegos", "kaufen", "kddi", "kim", "kitchen", "kiwi",
"koeln", "komatsu", "krd", "kred", "kyoto", "lacaixa", "lancaster", "land",
"lasalle", "lat", "latrobe", "law", "lawyer", "lds", "lease", "leclerc",
"legal", "lexus", "lgbt", "liaison", "lidl", "life", "lighting", "limited",
"limo", "link", "live", "lixil", "loan", "loans", "lol", "london", "lotte",
"lotto", "love", "ltda", "lupin", "luxe", "luxury", "madrid", "maif",
"maison", "man", "management", "mango", "market", "marketing", "markets",
"marriott", "mba", "media", "meet", "melbourne", "meme", "memorial", "men",
"menu", "miami", "microsoft", "mil", "mini", "mma", "mobi", "moda", "moe",
"mom", "monash", "money", "montblanc", "mormon", "mortgage", "moscow",
"motorcycles", "mov", "movie", "movistar", "mtn", "mtpc", "museum",
"nadex", "nagoya", "name", "navy", "nec", "net", "netbank", "network",
"neustar", "new", "news", "nexus", "ngo", "nhk", "nico", "ninja", "nissan",
"nokia", "nra", "nrw", "ntt", "nyc", "office", "okinawa", "omega", "one",
"ong", "onl", "online", "ooo", "oracle", "orange", "org", "organic",
"osaka", "otsuka", "ovh", "page", "panerai", "paris", "partners", "parts",
"party", "pet", "pharmacy", "philips", "photo", "photography", "photos",
"physio", "piaget", "pics", "pictet", "pictures", "pink", "pizza", "place",
"play", "plumbing", "plus", "pohl", "poker", "porn", "post", "praxi",
"press", "pro", "prod", "productions", "prof", "properties", "property",
"pub", "qpon", "quebec", "racing", "realtor", "realty", "recipes", "red",
"redstone", "rehab", "reise", "reisen", "reit", "ren", "rent", "rentals",
"repair", "report", "republican", "rest", "restaurant", "review",
"reviews", "rich", "ricoh", "rio", "rip", "rocks", "rodeo", "rsvp", "ruhr",
"run", "ryukyu", "saarland", "sakura", "sale", "samsung", "sandvik",
"sandvikcoromant", "sanofi", "sap", "sarl", "saxo", "sca", "scb",
"schmidt", "scholarships", "school", "schule", "schwarz", "science",
"scor", "scot", "seat", "seek", "sener", "services", "sew", "sex", "sexy",
"shiksha", "shoes", "show", "shriram", "singles", "site", "ski", "sky",
"skype", "sncf", "soccer", "social", "software", "sohu", "solar",
"solutions", "sony", "soy", "space", "spiegel", "spreadbetting", "srl",
"starhub", "statoil", "studio", "study", "style", "sucks", "supplies",
"supply", "support", "surf", "surgery", "suzuki", "swatch", "swiss",
"sydney", "systems", "taipei", "tatamotors", "tatar", "tattoo", "tax",
"taxi", "team", "tech", "technology", "tel", "telefonica", "temasek",
"tennis", "thd", "theater", "tickets", "tienda", "tips", "tires", "tirol",
"today", "tokyo", "tools", "top", "toray", "toshiba", "tours", "town",
"toyota", "toys", "trade", "trading", "training", "travel", "trust", "tui",
"ubs", "university", "uno", "uol", "vacations", "vegas", "ventures",
"vermögensberater", "vermögensberatung", "versicherung", "vet", "viajes",
"video", "villas", "vin", "vision", "vista", "vistaprint", "vlaanderen",
"vodka", "vote", "voting", "voto", "voyage", "wales", "walter", "wang",
"watch", "webcam", "website", "wed", "wedding", "weir", "whoswho", "wien",
"wiki", "williamhill", "win", "windows", "wine", "wme", "work", "works",
"world", "wtc", "wtf", "xbox", "xerox", "xin", "xperia", "xxx", "xyz",
"yachts", "yandex", "yodobashi", "yoga", "yokohama", "youtube", "zip",
"zone", "zuerich", "дети", "ком", "москва", "онлайн", "орг", "рус", "сайт",
"קום", "بازار", "شبكة", "كوم", "موقع", "कॉम", "नेट", "संगठन", "คอม",
"みんな", "グーグル", "コム", "世界", "中信", "中文网", "企业", "佛山", "信息",
"健康", "八卦", "公司", "公益", "商城", "商店", "商标", "在线", "大拿", "娱乐",
"工行", "广东", "慈善", "我爱你", "手机", "政务", "政府", "新闻", "时尚", "机构",
"淡马锡", "游戏", "点看", "移动", "组织机构", "网址", "网店", "网络", "谷歌", "集团",
"飞利浦", "餐厅", "닷넷", "닷컴", "삼성", "onion"]
URL_REGEXP = re.compile((
r'('
r'^(?!(https?://|www\.)?\.|ftps?://|([0-9]+\.){{1,3}}\d+)' # exclude urls that start with "."
r'(?:https?://|www\.)*^(?!.*@)(?:[\w+-_]+[.])' # beginning of url
r'(?:{0}\b' # all tlds
r'(?:[:0-9]))' # port numbers & close off TLDs
r'(?:[\w+\/]?[a-z0-9!\*\'\(\);:&=\+\$/%#\[\]\-_\.,~?])*' # path/query params
r')').format(r'\b|'.join(TLDS)), re.U | re.I | re.X)
def calc_expected_status_length(status, short_url_length=23):
""" Calculates the length of a tweet, taking into account Twitter's
replacement of URLs with https://t.co links.
Args:
status: text of the status message to be posted.
short_url_length: the current published https://t.co links
Returns:
Expected length of the status message as an integer.
"""
status_length = 0
if isinstance(status, bytes):
status = unicode(status)
for word in re.split(r'\s', status):
if is_url(word):
status_length += short_url_length
else:
for character in word:
if any([ord(normalize("NFC", character)) in char_range for char_range in CHAR_RANGES]):
status_length += 1
else:
status_length += 2
status_length += len(re.findall(r'\s', status))
return status_length
def is_url(text):
""" Checks to see if a bit of text is a URL.
Args:
text: text to check.
Returns:
Boolean of whether the text should be treated as a URL or not.
"""
return bool(re.findall(URL_REGEXP, text))
def http_to_file(http):
data_file = NamedTemporaryFile()
req = requests.get(http, stream=True)
for chunk in req.iter_content(chunk_size=1024 * 1024):
data_file.write(chunk)
return data_file
def parse_media_file(passed_media, async_upload=False):
""" Parses a media file and attempts to return a file-like object and
information about the media file.
Args:
passed_media: media file which to parse.
async_upload: flag, for validation media file attributes.
Returns:
file-like object, the filename of the media file, the file size, and
the type of media.
"""
img_formats = ['image/jpeg',
'image/png',
'image/bmp',
'image/webp']
long_img_formats = [
'image/gif'
]
video_formats = ['video/mp4',
'video/quicktime']
# If passed_media is a string, check if it points to a URL, otherwise,
# it should point to local file. Create a reference to a file obj for
# each case such that data_file ends up with a read() method.
if not hasattr(passed_media, 'read'):
if passed_media.startswith('http'):
data_file = http_to_file(passed_media)
filename = os.path.basename(urlparse(passed_media).path)
else:
data_file = open(os.path.realpath(passed_media), 'rb')
filename = os.path.basename(passed_media)
# Otherwise, if a file object was passed in the first place,
# create the standard reference to media_file (i.e., rename it to fp).
else:
if passed_media.mode not in ['rb', 'rb+', 'w+b']:
raise TwitterError('File mode must be "rb" or "rb+"')
filename = os.path.basename(passed_media.name)
data_file = passed_media
data_file.seek(0, 2)
file_size = data_file.tell()
try:
data_file.seek(0)
except Exception as e:
pass
media_type = mimetypes.guess_type(os.path.basename(filename))[0]
if media_type is not None:
if media_type in img_formats and file_size > 5 * 1048576:
raise TwitterError({'message': 'Images must be less than 5MB.'})
elif media_type in long_img_formats and file_size > 15 * 1048576:
raise TwitterError({'message': 'GIF Image must be less than 15MB.'})
elif media_type in video_formats and not async_upload and file_size > 15 * 1048576:
raise TwitterError({'message': 'Videos must be less than 15MB.'})
elif media_type in video_formats and async_upload and file_size > 512 * 1048576:
raise TwitterError({'message': 'Videos must be less than 512MB.'})
elif media_type not in img_formats and media_type not in video_formats and media_type not in long_img_formats:
raise TwitterError({'message': 'Media type could not be determined.'})
return data_file, filename, file_size, media_type
def enf_type(field, _type, val):
""" Checks to see if a given val for a field (i.e., the name of the field)
is of the proper _type. If it is not, raises a TwitterError with a brief
explanation.
Args:
field:
Name of the field you are checking.
_type:
Type that the value should be returned as.
val:
Value to convert to _type.
Returns:
val converted to type _type.
"""
try:
return _type(val)
except ValueError:
raise TwitterError({
'message': '"{0}" must be type {1}'.format(field, _type.__name__)
})
def parse_arg_list(args, attr):
out = []
if isinstance(args, (str, unicode)):
out.append(args)
elif isinstance(args, twitter.User):
out.append(getattr(args, attr))
elif isinstance(args, (list, tuple)):
for item in args:
if isinstance(item, (str, unicode)):
out.append(item)
elif isinstance(item, twitter.User):
out.append(getattr(item, attr))
return ",".join([str(item) for item in out])
|
py | b402af07b5bdb53c1165bf79f5596044fc6d2ad3 | # filename = 'test.txt'
# puzzleInput=[]
# filename = 'input.txt'
# puzzleInput=[]
# initialize input
# with open(filename) as file:
# for line in file:
# puzzleInput.append(line)
#puzzleInput = [1,3,2]
#puzzleInput = [1,20,8,12,0,14]
puzzleInput = [14,0,12,8,20,1]
lastNumber = puzzleInput[0]
previousNumbers = puzzleInput[:]
count = len(puzzleInput)
while True:
if lastNumber in previousNumbers[1:]:
for i in range(1, len(previousNumbers)):
if lastNumber == previousNumbers[i]:
previousNumbers.insert(0, i)
break
else:
previousNumbers.insert(0, 0)
count += 1
lastNumber = previousNumbers[0]
if count == 30000000:
print( count, lastNumber )
break
#print( count, lastNumber )
#print( previousNumbers )
|
py | b402af3fb95d3b9b0a4e8a2565804ac0407fe2a9 | #!/usr/bin/env python
####################
# Required Modules #
####################
# Generic/Built-in
from typing import Tuple, List, Dict, Union
# Libs
import syft as sy
from syft.workers.websocket_client import WebsocketClientWorker
# Custom
from .base import BaseAlgorithm
from synalgo.interfaces import Arguments, EarlyStopping, Model
##################
# Configurations #
##################
########################################
# Federated Algorithm Class - FedSplit #
########################################
class FedSplit(BaseAlgorithm):
"""
Implements federated SNN algorithm.
Attributes:
action (str): Type of ML operation to be executed. Supported options
are as follows:
1) 'regress': Orchestrates FL grid to perform regression
2) 'classify': Orchestrates FL grid to perform classification
3) 'cluster': TBA
4) 'associate': TBA
crypto_provider (VirtualWorker): Trusted Third Party coordinating FL
workers (list(WebsocketClientWorker)): All particiating CLIENT workers
arguments (Arguments): Arguments to be passed into each FL function
train_loader (sy.FederatedLoader): Training data in configured batches
eval_loader (sy.FederatedLoader): Validation data in configured batches
test_loader (sy.FederatedLoader): Testing data in configured batches
global_model (Model): Federatedly-trained Global model
local_models (dict(str, Models)): Most recent cache of local models
loss_history (dict): Local & global losses tracked throughout FL cycle
out_dir (str): Output directory for exporting models & metrics
checkpoints (dict): All checkpointed models & metrics accumulated
"""
def __init__(
self,
crypto_provider: sy.VirtualWorker,
workers: List[WebsocketClientWorker],
arguments: Arguments,
train_loader: sy.FederatedDataLoader,
eval_loader: sy.FederatedDataLoader,
test_loader: sy.FederatedDataLoader,
global_model: Model,
local_models: Dict[str, Model] = {},
out_dir: str = '.',
):
super().__init__(
crypto_provider=crypto_provider,
workers=workers,
arguments=arguments,
train_loader=train_loader,
eval_loader=eval_loader,
test_loader=test_loader,
global_model=global_model,
local_models=local_models,
out_dir=out_dir
)
##################
# Core functions #
##################
def analyse(self):
""" Calculates contributions of all workers towards the final global
model.
"""
raise NotImplementedError |
py | b402af75fe3b3c7f83e74d3954732e80e005a54c | import numpy
import theano
import theano.tensor as tt
from theano.gradient import disconnected_grad as stop_grad
x = tt.dscalar('x')
y = x ** 2
gy = tt.grad(y, x)
f = theano.function([x], gy)
f(4)
numpy.allclose(f(94.2), 188.4)
fy = theano.function([x], y)
fy(4)
def magicbox(x):
return tt.exp(x-stop_grad(x))
y2 = magicbox(x ** 2)
fy2 = theano.function([x], y2)
fy2(4)
gy2 = tt.grad(y2, x)
f2 = theano.function([x], gy2)
f2(4)
|
py | b402afa24912c65d2f66665b9cfa0eb624073fdd | from wafer.talks.models import Talk, TalkType
from wafer.tests.utils import create_user
def create_talk_type(name):
"""Create a talk type"""
return TalkType.objects.create(name=name)
def create_talk(title, status, username=None, user=None, talk_type=None):
if sum((user is None, username is None)) != 1:
raise ValueError('One of user OR username must be specified')
if username:
user = create_user(username)
talk = Talk.objects.create(
title=title, status=status, corresponding_author_id=user.id)
talk.authors.add(user)
talk.notes = "Some notes for talk %s" % title
talk.private_notes = "Some private notes for talk %s" % title
talk.save()
if talk_type:
talk.talk_type = talk_type
talk.save()
return talk
|
py | b402b02cba74aed287be35c2995de6df4b341aa1 | #!/usr/bin/env python3
"""
Generate i32x4 integer arithmetic operation cases.
"""
from simd_arithmetic import SimdArithmeticCase
class SimdI32x4ArithmeticCase(SimdArithmeticCase):
LANE_LEN = 4
LANE_TYPE = 'i32x4'
@property
def hex_binary_op_test_data(self):
return [
('0x3fffffff', '0x40000000'),
('0x40000000', '0x40000000'),
('-0x3fffffff', '-0x40000000'),
('-0x40000000', '-0x40000000'),
('-0x40000000', '-0x40000001'),
('0x7fffffff', '0x7fffffff'),
('0x7fffffff', '0x01'),
('0x80000000', '-0x01'),
('0x7fffffff', '0x80000000'),
('0x80000000', '0x80000000'),
('0xffffffff', '0x01'),
('0xffffffff', '0xffffffff')
]
@property
def hex_unary_op_test_data(self):
return ['0x01', '-0x01', '-0x80000000', '-0x7fffffff', '0x7fffffff', '0x80000000', '0xffffffff']
@property
def underscore_literal_test_data(self):
return {
'i32x4.add': [
[['01_234_567_890', '01_234_567_890'], '02_469_135_780', ['i32x4'] * 3],
[['0x0_1234_5678', '0x0_90AB_cdef'], '0x0_a2e0_2467', ['i32x4'] * 3]
],
'i32x4.sub': [
[['03_214_567_890 ', '01_234_567_890 '], '01_980_000_000', ['i32x4'] * 3],
[['0x0_90AB_cdef', '0x0_1234_5678'], '0x0_7e77_7777', ['i32x4'] * 3]
],
'i32x4.mul': [
[['0_123_456_789', '0_987_654_321'], '04_227_814_277', ['i32x4'] * 3],
[['0x0_1234_5678', '0x0_90AB_cdef'], '0x0_2a42_d208', ['i32x4'] * 3]
]
}
@property
def i32x4_i8x16_test_data(self):
return {
'i32x4.add': [
[['0x7fffffff', ['0', '0', '0', '0x80'] * 4], '-1', ['i32x4', 'i8x16', 'i32x4']],
[['1', '255'], '0', ['i32x4', 'i8x16', 'i32x4']]
],
'i32x4.sub': [
[['0x7fffffff', ['0', '0', '0', '0x80'] * 4], '-1', ['i32x4', 'i8x16', 'i32x4']],
[['1', '255'], '2', ['i32x4', 'i8x16', 'i32x4']]
],
'i32x4.mul': [
[['0x10000000', '0x10'], '0', ['i32x4', 'i8x16', 'i32x4']],
[['0xffffffff', '255'], '1', ['i32x4', 'i8x16', 'i32x4']]
]
}
@property
def i32x4_i16x8_test_data(self):
return {
'i32x4.add': [
[['0x7fffffff', ['0', '0x8000'] * 4], '-1', ['i32x4', 'i16x8', 'i32x4']],
[['1', '0xffff'], '0', ['i32x4', 'i16x8', 'i32x4']]
],
'i32x4.sub': [
[['0x7fffffff', ['0', '0x8000'] * 4], '-1', ['i32x4', 'i16x8', 'i32x4']],
[['1', '0xffff'], '0x02', ['i32x4', 'i16x8', 'i32x4']]
],
'i32x4.mul': [
[['0x80000000', ['0', '0x02'] * 4], '0', ['i32x4', 'i16x8', 'i32x4']],
[['0xffffffff', '0xffff'], '1', ['i32x4', 'i16x8', 'i32x4']]
]
}
@property
def i32x4_f32x4_test_data(self):
return {
'i32x4.add': [
[['0x80000000', '+0.0'], '0x80000000', ['i32x4', 'f32x4', 'i32x4']],
[['0x80000000', '-0.0'], '0', ['i32x4', 'f32x4', 'i32x4']],
[['0x80000000', '1.0'], '0xbf800000', ['i32x4', 'f32x4', 'i32x4']],
[['0x80000000', '-1.0'], '0x3f800000', ['i32x4', 'f32x4', 'i32x4']],
[['1', '+inf'], '0x7f800001', ['i32x4', 'f32x4', 'i32x4']],
[['1', '-inf'], '0xff800001', ['i32x4', 'f32x4', 'i32x4']],
[['1', 'nan'], '0x7fc00001', ['i32x4', 'f32x4', 'i32x4']]
],
'i32x4.sub': [
[['0x80000000', '+0.0'], '0x80000000', ['i32x4', 'f32x4', 'i32x4']],
[['0x80000000', '-0.0'], '0', ['i32x4', 'f32x4', 'i32x4']],
[['0x80000000', '1.0'], '0x40800000', ['i32x4', 'f32x4', 'i32x4']],
[['0x80000000', '-1.0'], '0xc0800000', ['i32x4', 'f32x4', 'i32x4']],
[['0x1', '+inf'], '0x80800001', ['i32x4', 'f32x4', 'i32x4']],
[['0x1', '-inf'], '0x00800001', ['i32x4', 'f32x4', 'i32x4']],
[['0x1', 'nan'], '0x80400001', ['i32x4', 'f32x4', 'i32x4']]
],
'i32x4.mul': [
[['0x8000', '+0.0'], '0', ['i32x4', 'f32x4', 'i32x4']],
[['0x8000', '-0.0'], '0', ['i32x4', 'f32x4', 'i32x4']],
[['0x8000', '1.0'], '0', ['i32x4', 'f32x4', 'i32x4']],
[['0x8000', '-1.0'], '0', ['i32x4', 'f32x4', 'i32x4']],
[['0x1', '+inf'], '0x7f800000', ['i32x4', 'f32x4', 'i32x4']],
[['0x1', '-inf'], '0xff800000', ['i32x4', 'f32x4', 'i32x4']],
[['0x1', 'nan'], '0x7fc00000', ['i32x4', 'f32x4', 'i32x4']]
]
}
@property
def combine_dec_hex_test_data(self):
return {
'i32x4.add': [
[[['0', '1', '2', '3'],
['0', '0xffffffff', '0xfffffffe', '0xfffffffd']],
['0'] * 16, ['i32x4'] * 3]
],
'i32x4.sub': [
[[['0', '1', '2', '3'],
['0', '0xffffffff', '0xfffffffe', '0xfffffffd']],
['0', '0x02', '0x04', '0x06'], ['i32x4'] * 3]
],
'i32x4.mul': [
[[['0', '1', '2', '3'],
['0', '0xffffffff', '0xfffffffe', '0xfffffffd']],
['0', '0xffffffff', '0xfffffffc', '0xfffffff7'],
['i32x4'] * 3]
]
}
@property
def range_test_data(self):
return {
'i32x4.add': [
[[[str(i) for i in range(4)], [str(i * 2) for i in range(4)]],
[str(i * 3) for i in range(4)], ['i32x4'] * 3]
],
'i32x4.sub': [
[[[str(i) for i in range(4)], [str(i * 2) for i in range(4)]],
[str(-i) for i in range(4)], ['i32x4'] * 3]
],
'i32x4.mul': [
[[[str(i) for i in range(4)], [str(i * 2) for i in range(4)]],
['0', '0x02', '0x08', '0x12'],
['i32x4'] * 3]
]
}
@property
def full_bin_test_data(self):
return [
self.i32x4_i8x16_test_data,
self.i32x4_i16x8_test_data,
self.i32x4_f32x4_test_data,
self.combine_dec_hex_test_data,
self.range_test_data,
self.underscore_literal_test_data
]
def gen_test_cases():
simd_i32x4_arith = SimdI32x4ArithmeticCase()
simd_i32x4_arith.gen_test_cases()
if __name__ == '__main__':
gen_test_cases() |
py | b402b040d9268729da0ea92771cd5f6b4e4b9b4d | import bz2
from collections import Counter
from contextlib import contextmanager
from datetime import datetime
from functools import wraps
import gzip
import operator
import os
from shutil import rmtree
import string
import tempfile
from typing import Any, Callable, ContextManager, List, Optional, Type, Union, cast
import warnings
import zipfile
import numpy as np
from numpy.random import rand, randn
from pandas._config.localization import ( # noqa:F401
can_set_locale,
get_locales,
set_locale,
)
from pandas._libs.lib import no_default
import pandas._libs.testing as _testing
from pandas._typing import Dtype, FilePathOrBuffer, FrameOrSeries
from pandas.compat import _get_lzma_file, _import_lzma
from pandas.core.dtypes.common import (
is_bool,
is_categorical_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_extension_array_dtype,
is_interval_dtype,
is_number,
is_numeric_dtype,
is_period_dtype,
is_sequence,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.missing import array_equivalent
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
MultiIndex,
RangeIndex,
Series,
bdate_range,
)
from pandas.core.algorithms import take_1d
from pandas.core.arrays import (
DatetimeArray,
ExtensionArray,
IntervalArray,
PeriodArray,
TimedeltaArray,
period_array,
)
from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin
from pandas.io.common import urlopen
from pandas.io.formats.printing import pprint_thing
lzma = _import_lzma()
_N = 30
_K = 4
_RAISE_NETWORK_ERROR_DEFAULT = False
UNSIGNED_INT_DTYPES: List[Dtype] = ["uint8", "uint16", "uint32", "uint64"]
UNSIGNED_EA_INT_DTYPES: List[Dtype] = ["UInt8", "UInt16", "UInt32", "UInt64"]
SIGNED_INT_DTYPES: List[Dtype] = [int, "int8", "int16", "int32", "int64"]
SIGNED_EA_INT_DTYPES: List[Dtype] = ["Int8", "Int16", "Int32", "Int64"]
ALL_INT_DTYPES = UNSIGNED_INT_DTYPES + SIGNED_INT_DTYPES
ALL_EA_INT_DTYPES = UNSIGNED_EA_INT_DTYPES + SIGNED_EA_INT_DTYPES
FLOAT_DTYPES: List[Dtype] = [float, "float32", "float64"]
COMPLEX_DTYPES: List[Dtype] = [complex, "complex64", "complex128"]
STRING_DTYPES: List[Dtype] = [str, "str", "U"]
DATETIME64_DTYPES: List[Dtype] = ["datetime64[ns]", "M8[ns]"]
TIMEDELTA64_DTYPES: List[Dtype] = ["timedelta64[ns]", "m8[ns]"]
BOOL_DTYPES = [bool, "bool"]
BYTES_DTYPES = [bytes, "bytes"]
OBJECT_DTYPES = [object, "object"]
ALL_REAL_DTYPES = FLOAT_DTYPES + ALL_INT_DTYPES
ALL_NUMPY_DTYPES = (
ALL_REAL_DTYPES
+ COMPLEX_DTYPES
+ STRING_DTYPES
+ DATETIME64_DTYPES
+ TIMEDELTA64_DTYPES
+ BOOL_DTYPES
+ OBJECT_DTYPES
+ BYTES_DTYPES
)
# set testing_mode
_testing_mode_warnings = (DeprecationWarning, ResourceWarning)
def set_testing_mode():
# set the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
warnings.simplefilter("always", _testing_mode_warnings)
def reset_testing_mode():
# reset the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
warnings.simplefilter("ignore", _testing_mode_warnings)
set_testing_mode()
def reset_display_options():
"""
Reset the display options for printing and representing objects.
"""
pd.reset_option("^display.", silent=True)
def round_trip_pickle(
obj: Any, path: Optional[FilePathOrBuffer] = None
) -> FrameOrSeries:
"""
Pickle an object and then read it again.
Parameters
----------
obj : any object
The object to pickle and then re-read.
path : str, path object or file-like object, default None
The path where the pickled object is written and then read.
Returns
-------
pandas object
The original object that was pickled and then re-read.
"""
_path = path
if _path is None:
_path = f"__{rands(10)}__.pickle"
with ensure_clean(_path) as temp_path:
pd.to_pickle(obj, temp_path)
return pd.read_pickle(temp_path)
def round_trip_pathlib(writer, reader, path: Optional[str] = None):
"""
Write an object to file specified by a pathlib.Path and read it back
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
pandas object
The original object that was serialized and then re-read.
"""
import pytest
Path = pytest.importorskip("pathlib").Path
if path is None:
path = "___pathlib___"
with ensure_clean(path) as path:
writer(Path(path))
obj = reader(Path(path))
return obj
def round_trip_localpath(writer, reader, path: Optional[str] = None):
"""
Write an object to file specified by a py.path LocalPath and read it back.
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
pandas object
The original object that was serialized and then re-read.
"""
import pytest
LocalPath = pytest.importorskip("py.path").local
if path is None:
path = "___localpath___"
with ensure_clean(path) as path:
writer(LocalPath(path))
obj = reader(LocalPath(path))
return obj
@contextmanager
def decompress_file(path, compression):
"""
Open a compressed file and return a file object.
Parameters
----------
path : str
The path where the file is read from.
compression : {'gzip', 'bz2', 'zip', 'xz', None}
Name of the decompression to use
Returns
-------
file object
"""
if compression is None:
f = open(path, "rb")
elif compression == "gzip":
f = gzip.open(path, "rb")
elif compression == "bz2":
f = bz2.BZ2File(path, "rb")
elif compression == "xz":
f = _get_lzma_file(lzma)(path, "rb")
elif compression == "zip":
zip_file = zipfile.ZipFile(path)
zip_names = zip_file.namelist()
if len(zip_names) == 1:
f = zip_file.open(zip_names.pop())
else:
raise ValueError(f"ZIP file {path} error. Only one file per ZIP.")
else:
raise ValueError(f"Unrecognized compression type: {compression}")
try:
yield f
finally:
f.close()
if compression == "zip":
zip_file.close()
def write_to_compressed(compression, path, data, dest="test"):
"""
Write data to a compressed file.
Parameters
----------
compression : {'gzip', 'bz2', 'zip', 'xz'}
The compression type to use.
path : str
The file path to write the data.
data : str
The data to write.
dest : str, default "test"
The destination file (for ZIP only)
Raises
------
ValueError : An invalid compression value was passed in.
"""
if compression == "zip":
compress_method = zipfile.ZipFile
elif compression == "gzip":
compress_method = gzip.GzipFile
elif compression == "bz2":
compress_method = bz2.BZ2File
elif compression == "xz":
compress_method = _get_lzma_file(lzma)
else:
raise ValueError(f"Unrecognized compression type: {compression}")
if compression == "zip":
mode = "w"
args = (dest, data)
method = "writestr"
else:
mode = "wb"
args = (data,)
method = "write"
with compress_method(path, mode=mode) as f:
getattr(f, method)(*args)
def _get_tol_from_less_precise(check_less_precise: Union[bool, int]) -> float:
"""
Return the tolerance equivalent to the deprecated `check_less_precise`
parameter.
Parameters
----------
check_less_precise : bool or int
Returns
-------
float
Tolerance to be used as relative/absolute tolerance.
Examples
--------
>>> # Using check_less_precise as a bool:
>>> _get_tol_from_less_precise(False)
0.5e-5
>>> _get_tol_from_less_precise(True)
0.5e-3
>>> # Using check_less_precise as an int representing the decimal
>>> # tolerance intended:
>>> _get_tol_from_less_precise(2)
0.5e-2
>>> _get_tol_from_less_precise(8)
0.5e-8
"""
if isinstance(check_less_precise, bool):
if check_less_precise:
# 3-digit tolerance
return 0.5e-3
else:
# 5-digit tolerance
return 0.5e-5
else:
# Equivalent to setting checking_less_precise=<decimals>
return 0.5 * 10 ** -check_less_precise
def assert_almost_equal(
left,
right,
check_dtype: Union[bool, str] = "equiv",
check_less_precise: Union[bool, int] = no_default,
rtol: float = 1.0e-5,
atol: float = 1.0e-8,
**kwargs,
):
"""
Check that the left and right objects are approximately equal.
By approximately equal, we refer to objects that are numbers or that
contain numbers which may be equivalent to specific levels of precision.
Parameters
----------
left : object
right : object
check_dtype : bool or {'equiv'}, default 'equiv'
Check dtype if both a and b are the same type. If 'equiv' is passed in,
then `RangeIndex` and `Int64Index` are also considered equivalent
when doing type checking.
check_less_precise : bool or int, default False
Specify comparison precision. 5 digits (False) or 3 digits (True)
after decimal points are compared. If int, then specify the number
of digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
rtol : float, default 1e-5
Relative tolerance.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance.
.. versionadded:: 1.1.0
"""
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
if isinstance(left, pd.Index):
assert_index_equal(
left,
right,
check_exact=False,
exact=check_dtype,
rtol=rtol,
atol=atol,
**kwargs,
)
elif isinstance(left, pd.Series):
assert_series_equal(
left,
right,
check_exact=False,
check_dtype=check_dtype,
rtol=rtol,
atol=atol,
**kwargs,
)
elif isinstance(left, pd.DataFrame):
assert_frame_equal(
left,
right,
check_exact=False,
check_dtype=check_dtype,
rtol=rtol,
atol=atol,
**kwargs,
)
else:
# Other sequences.
if check_dtype:
if is_number(left) and is_number(right):
# Do not compare numeric classes, like np.float64 and float.
pass
elif is_bool(left) and is_bool(right):
# Do not compare bool classes, like np.bool_ and bool.
pass
else:
if isinstance(left, np.ndarray) or isinstance(right, np.ndarray):
obj = "numpy array"
else:
obj = "Input"
assert_class_equal(left, right, obj=obj)
_testing.assert_almost_equal(
left, right, check_dtype=check_dtype, rtol=rtol, atol=atol, **kwargs
)
def _check_isinstance(left, right, cls):
"""
Helper method for our assert_* methods that ensures that
the two objects being compared have the right type before
proceeding with the comparison.
Parameters
----------
left : The first object being compared.
right : The second object being compared.
cls : The class type to check against.
Raises
------
AssertionError : Either `left` or `right` is not an instance of `cls`.
"""
cls_name = cls.__name__
if not isinstance(left, cls):
raise AssertionError(
f"{cls_name} Expected type {cls}, found {type(left)} instead"
)
if not isinstance(right, cls):
raise AssertionError(
f"{cls_name} Expected type {cls}, found {type(right)} instead"
)
def assert_dict_equal(left, right, compare_keys: bool = True):
_check_isinstance(left, right, dict)
_testing.assert_dict_equal(left, right, compare_keys=compare_keys)
def randbool(size=(), p: float = 0.5):
return rand(*size) <= p
RANDS_CHARS = np.array(list(string.ascii_letters + string.digits), dtype=(np.str_, 1))
RANDU_CHARS = np.array(
list("".join(map(chr, range(1488, 1488 + 26))) + string.digits),
dtype=(np.unicode_, 1),
)
def rands_array(nchars, size, dtype="O"):
"""
Generate an array of byte strings.
"""
retval = (
np.random.choice(RANDS_CHARS, size=nchars * np.prod(size))
.view((np.str_, nchars))
.reshape(size)
)
return retval.astype(dtype)
def randu_array(nchars, size, dtype="O"):
"""
Generate an array of unicode strings.
"""
retval = (
np.random.choice(RANDU_CHARS, size=nchars * np.prod(size))
.view((np.unicode_, nchars))
.reshape(size)
)
return retval.astype(dtype)
def rands(nchars):
"""
Generate one random byte string.
See `rands_array` if you want to create an array of random strings.
"""
return "".join(np.random.choice(RANDS_CHARS, nchars))
def close(fignum=None):
from matplotlib.pyplot import close as _close, get_fignums
if fignum is None:
for fignum in get_fignums():
_close(fignum)
else:
_close(fignum)
# -----------------------------------------------------------------------------
# contextmanager to ensure the file cleanup
@contextmanager
def ensure_clean(filename=None, return_filelike=False, **kwargs):
"""
Gets a temporary path and agrees to remove on close.
Parameters
----------
filename : str (optional)
if None, creates a temporary file which is then removed when out of
scope. if passed, creates temporary file with filename as ending.
return_filelike : bool (default False)
if True, returns a file-like which is *always* cleaned. Necessary for
savefig and other functions which want to append extensions.
**kwargs
Additional keywords passed in for creating a temporary file.
:meth:`tempFile.TemporaryFile` is used when `return_filelike` is ``True``.
:meth:`tempfile.mkstemp` is used when `return_filelike` is ``False``.
Note that the `filename` parameter will be passed in as the `suffix`
argument to either function.
See Also
--------
tempfile.TemporaryFile
tempfile.mkstemp
"""
filename = filename or ""
fd = None
kwargs["suffix"] = filename
if return_filelike:
f = tempfile.TemporaryFile(**kwargs)
try:
yield f
finally:
f.close()
else:
# Don't generate tempfile if using a path with directory specified.
if len(os.path.dirname(filename)):
raise ValueError("Can't pass a qualified name to ensure_clean()")
try:
fd, filename = tempfile.mkstemp(**kwargs)
except UnicodeEncodeError:
import pytest
pytest.skip("no unicode file names on this system")
try:
yield filename
finally:
try:
os.close(fd)
except OSError:
print(f"Couldn't close file descriptor: {fd} (file: {filename})")
try:
if os.path.exists(filename):
os.remove(filename)
except OSError as e:
print(f"Exception on removing file: {e}")
@contextmanager
def ensure_clean_dir():
"""
Get a temporary directory path and agrees to remove on close.
Yields
------
Temporary directory path
"""
directory_name = tempfile.mkdtemp(suffix="")
try:
yield directory_name
finally:
try:
rmtree(directory_name)
except OSError:
pass
@contextmanager
def ensure_safe_environment_variables():
"""
Get a context manager to safely set environment variables
All changes will be undone on close, hence environment variables set
within this contextmanager will neither persist nor change global state.
"""
saved_environ = dict(os.environ)
try:
yield
finally:
os.environ.clear()
os.environ.update(saved_environ)
# -----------------------------------------------------------------------------
# Comparators
def equalContents(arr1, arr2) -> bool:
"""
Checks if the set of unique elements of arr1 and arr2 are equivalent.
"""
return frozenset(arr1) == frozenset(arr2)
def assert_index_equal(
left: Index,
right: Index,
exact: Union[bool, str] = "equiv",
check_names: bool = True,
check_less_precise: Union[bool, int] = no_default,
check_exact: bool = True,
check_categorical: bool = True,
rtol: float = 1.0e-5,
atol: float = 1.0e-8,
obj: str = "Index",
) -> None:
"""
Check that left and right Index are equal.
Parameters
----------
left : Index
right : Index
exact : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
check_names : bool, default True
Whether to check the names attribute.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
check_exact : bool, default True
Whether to compare number exactly.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
rtol : float, default 1e-5
Relative tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
obj : str, default 'Index'
Specify object name being compared, internally used to show appropriate
assertion message.
"""
__tracebackhide__ = True
def _check_types(l, r, obj="Index"):
if exact:
assert_class_equal(l, r, exact=exact, obj=obj)
# Skip exact dtype checking when `check_categorical` is False
if check_categorical:
assert_attr_equal("dtype", l, r, obj=obj)
# allow string-like to have different inferred_types
if l.inferred_type in ("string"):
assert r.inferred_type in ("string")
else:
assert_attr_equal("inferred_type", l, r, obj=obj)
def _get_ilevel_values(index, level):
# accept level number only
unique = index.levels[level]
level_codes = index.codes[level]
filled = take_1d(unique._values, level_codes, fill_value=unique._na_value)
values = unique._shallow_copy(filled, name=index.names[level])
return values
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
# instance validation
_check_isinstance(left, right, Index)
# class / dtype comparison
_check_types(left, right, obj=obj)
# level comparison
if left.nlevels != right.nlevels:
msg1 = f"{obj} levels are different"
msg2 = f"{left.nlevels}, {left}"
msg3 = f"{right.nlevels}, {right}"
raise_assert_detail(obj, msg1, msg2, msg3)
# length comparison
if len(left) != len(right):
msg1 = f"{obj} length are different"
msg2 = f"{len(left)}, {left}"
msg3 = f"{len(right)}, {right}"
raise_assert_detail(obj, msg1, msg2, msg3)
# MultiIndex special comparison for little-friendly error messages
if left.nlevels > 1:
left = cast(MultiIndex, left)
right = cast(MultiIndex, right)
for level in range(left.nlevels):
# cannot use get_level_values here because it can change dtype
llevel = _get_ilevel_values(left, level)
rlevel = _get_ilevel_values(right, level)
lobj = f"MultiIndex level [{level}]"
assert_index_equal(
llevel,
rlevel,
exact=exact,
check_names=check_names,
check_exact=check_exact,
rtol=rtol,
atol=atol,
obj=lobj,
)
# get_level_values may change dtype
_check_types(left.levels[level], right.levels[level], obj=obj)
# skip exact index checking when `check_categorical` is False
if check_exact and check_categorical:
if not left.equals(right):
diff = np.sum((left.values != right.values).astype(int)) * 100.0 / len(left)
msg = f"{obj} values are different ({np.round(diff, 5)} %)"
raise_assert_detail(obj, msg, left, right)
else:
_testing.assert_almost_equal(
left.values,
right.values,
rtol=rtol,
atol=atol,
check_dtype=exact,
obj=obj,
lobj=left,
robj=right,
)
# metadata comparison
if check_names:
assert_attr_equal("names", left, right, obj=obj)
if isinstance(left, pd.PeriodIndex) or isinstance(right, pd.PeriodIndex):
assert_attr_equal("freq", left, right, obj=obj)
if isinstance(left, pd.IntervalIndex) or isinstance(right, pd.IntervalIndex):
assert_interval_array_equal(left._values, right._values)
if check_categorical:
if is_categorical_dtype(left.dtype) or is_categorical_dtype(right.dtype):
assert_categorical_equal(left._values, right._values, obj=f"{obj} category")
def assert_class_equal(left, right, exact: Union[bool, str] = True, obj="Input"):
"""
Checks classes are equal.
"""
__tracebackhide__ = True
def repr_class(x):
if isinstance(x, Index):
# return Index as it is to include values in the error message
return x
return type(x).__name__
if exact == "equiv":
if type(left) != type(right):
# allow equivalence of Int64Index/RangeIndex
types = {type(left).__name__, type(right).__name__}
if len(types - {"Int64Index", "RangeIndex"}):
msg = f"{obj} classes are not equivalent"
raise_assert_detail(obj, msg, repr_class(left), repr_class(right))
elif exact:
if type(left) != type(right):
msg = f"{obj} classes are different"
raise_assert_detail(obj, msg, repr_class(left), repr_class(right))
def assert_attr_equal(attr: str, left, right, obj: str = "Attributes"):
"""
Check attributes are equal. Both objects must have attribute.
Parameters
----------
attr : str
Attribute name being compared.
left : object
right : object
obj : str, default 'Attributes'
Specify object name being compared, internally used to show appropriate
assertion message
"""
__tracebackhide__ = True
left_attr = getattr(left, attr)
right_attr = getattr(right, attr)
if left_attr is right_attr:
return True
elif (
is_number(left_attr)
and np.isnan(left_attr)
and is_number(right_attr)
and np.isnan(right_attr)
):
# np.nan
return True
try:
result = left_attr == right_attr
except TypeError:
# datetimetz on rhs may raise TypeError
result = False
if not isinstance(result, bool):
result = result.all()
if result:
return True
else:
msg = f'Attribute "{attr}" are different'
raise_assert_detail(obj, msg, left_attr, right_attr)
def assert_is_valid_plot_return_object(objs):
import matplotlib.pyplot as plt
if isinstance(objs, (pd.Series, np.ndarray)):
for el in objs.ravel():
msg = (
"one of 'objs' is not a matplotlib Axes instance, "
f"type encountered {repr(type(el).__name__)}"
)
assert isinstance(el, (plt.Axes, dict)), msg
else:
msg = (
"objs is neither an ndarray of Artist instances nor a single "
"ArtistArtist instance, tuple, or dict, 'objs' is a "
f"{repr(type(objs).__name__)}"
)
assert isinstance(objs, (plt.Artist, tuple, dict)), msg
def assert_is_sorted(seq):
"""Assert that the sequence is sorted."""
if isinstance(seq, (Index, Series)):
seq = seq.values
# sorting does not change precisions
assert_numpy_array_equal(seq, np.sort(np.array(seq)))
def assert_categorical_equal(
left, right, check_dtype=True, check_category_order=True, obj="Categorical"
):
"""
Test that Categoricals are equivalent.
Parameters
----------
left : Categorical
right : Categorical
check_dtype : bool, default True
Check that integer dtype of the codes are the same
check_category_order : bool, default True
Whether the order of the categories should be compared, which
implies identical integer codes. If False, only the resulting
values are compared. The ordered attribute is
checked regardless.
obj : str, default 'Categorical'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, Categorical)
if check_category_order:
assert_index_equal(left.categories, right.categories, obj=f"{obj}.categories")
assert_numpy_array_equal(
left.codes, right.codes, check_dtype=check_dtype, obj=f"{obj}.codes"
)
else:
try:
lc = left.categories.sort_values()
rc = right.categories.sort_values()
except TypeError:
# e.g. '<' not supported between instances of 'int' and 'str'
lc, rc = left.categories, right.categories
assert_index_equal(lc, rc, obj=f"{obj}.categories")
assert_index_equal(
left.categories.take(left.codes),
right.categories.take(right.codes),
obj=f"{obj}.values",
)
assert_attr_equal("ordered", left, right, obj=obj)
def assert_interval_array_equal(left, right, exact="equiv", obj="IntervalArray"):
"""
Test that two IntervalArrays are equivalent.
Parameters
----------
left, right : IntervalArray
The IntervalArrays to compare.
exact : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
obj : str, default 'IntervalArray'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, IntervalArray)
assert_index_equal(left.left, right.left, exact=exact, obj=f"{obj}.left")
assert_index_equal(left.right, right.right, exact=exact, obj=f"{obj}.left")
assert_attr_equal("closed", left, right, obj=obj)
def assert_period_array_equal(left, right, obj="PeriodArray"):
_check_isinstance(left, right, PeriodArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}._data")
assert_attr_equal("freq", left, right, obj=obj)
def assert_datetime_array_equal(left, right, obj="DatetimeArray"):
__tracebackhide__ = True
_check_isinstance(left, right, DatetimeArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}._data")
assert_attr_equal("freq", left, right, obj=obj)
assert_attr_equal("tz", left, right, obj=obj)
def assert_timedelta_array_equal(left, right, obj="TimedeltaArray"):
__tracebackhide__ = True
_check_isinstance(left, right, TimedeltaArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}._data")
assert_attr_equal("freq", left, right, obj=obj)
def raise_assert_detail(obj, message, left, right, diff=None, index_values=None):
__tracebackhide__ = True
msg = f"""{obj} are different
{message}"""
if isinstance(index_values, np.ndarray):
msg += f"\n[index]: {pprint_thing(index_values)}"
if isinstance(left, np.ndarray):
left = pprint_thing(left)
elif is_categorical_dtype(left):
left = repr(left)
if isinstance(right, np.ndarray):
right = pprint_thing(right)
elif is_categorical_dtype(right):
right = repr(right)
msg += f"""
[left]: {left}
[right]: {right}"""
if diff is not None:
msg += f"\n[diff]: {diff}"
raise AssertionError(msg)
def assert_numpy_array_equal(
left,
right,
strict_nan=False,
check_dtype=True,
err_msg=None,
check_same=None,
obj="numpy array",
index_values=None,
):
"""
Check that 'np.ndarray' is equivalent.
Parameters
----------
left, right : numpy.ndarray or iterable
The two arrays to be compared.
strict_nan : bool, default False
If True, consider NaN and None to be different.
check_dtype : bool, default True
Check dtype if both a and b are np.ndarray.
err_msg : str, default None
If provided, used as assertion message.
check_same : None|'copy'|'same', default None
Ensure left and right refer/do not refer to the same memory area.
obj : str, default 'numpy array'
Specify object name being compared, internally used to show appropriate
assertion message.
index_values : numpy.ndarray, default None
optional index (shared by both left and right), used in output.
"""
__tracebackhide__ = True
# instance validation
# Show a detailed error message when classes are different
assert_class_equal(left, right, obj=obj)
# both classes must be an np.ndarray
_check_isinstance(left, right, np.ndarray)
def _get_base(obj):
return obj.base if getattr(obj, "base", None) is not None else obj
left_base = _get_base(left)
right_base = _get_base(right)
if check_same == "same":
if left_base is not right_base:
raise AssertionError(f"{repr(left_base)} is not {repr(right_base)}")
elif check_same == "copy":
if left_base is right_base:
raise AssertionError(f"{repr(left_base)} is {repr(right_base)}")
def _raise(left, right, err_msg):
if err_msg is None:
if left.shape != right.shape:
raise_assert_detail(
obj, f"{obj} shapes are different", left.shape, right.shape
)
diff = 0
for l, r in zip(left, right):
# count up differences
if not array_equivalent(l, r, strict_nan=strict_nan):
diff += 1
diff = diff * 100.0 / left.size
msg = f"{obj} values are different ({np.round(diff, 5)} %)"
raise_assert_detail(obj, msg, left, right, index_values=index_values)
raise AssertionError(err_msg)
# compare shape and values
if not array_equivalent(left, right, strict_nan=strict_nan):
_raise(left, right, err_msg)
if check_dtype:
if isinstance(left, np.ndarray) and isinstance(right, np.ndarray):
assert_attr_equal("dtype", left, right, obj=obj)
def assert_extension_array_equal(
left,
right,
check_dtype=True,
index_values=None,
check_less_precise=no_default,
check_exact=False,
rtol: float = 1.0e-5,
atol: float = 1.0e-8,
):
"""
Check that left and right ExtensionArrays are equal.
Parameters
----------
left, right : ExtensionArray
The two arrays to compare.
check_dtype : bool, default True
Whether to check if the ExtensionArray dtypes are identical.
index_values : numpy.ndarray, default None
Optional index (shared by both left and right), used in output.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
check_exact : bool, default False
Whether to compare number exactly.
rtol : float, default 1e-5
Relative tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
Notes
-----
Missing values are checked separately from valid values.
A mask of missing values is computed for each and checked to match.
The remaining all-valid values are cast to object dtype and checked.
"""
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
assert isinstance(left, ExtensionArray), "left is not an ExtensionArray"
assert isinstance(right, ExtensionArray), "right is not an ExtensionArray"
if check_dtype:
assert_attr_equal("dtype", left, right, obj="ExtensionArray")
if (
isinstance(left, DatetimeLikeArrayMixin)
and isinstance(right, DatetimeLikeArrayMixin)
and type(right) == type(left)
):
# Avoid slow object-dtype comparisons
# np.asarray for case where we have a np.MaskedArray
assert_numpy_array_equal(
np.asarray(left.asi8), np.asarray(right.asi8), index_values=index_values
)
return
left_na = np.asarray(left.isna())
right_na = np.asarray(right.isna())
assert_numpy_array_equal(
left_na, right_na, obj="ExtensionArray NA mask", index_values=index_values
)
left_valid = np.asarray(left[~left_na].astype(object))
right_valid = np.asarray(right[~right_na].astype(object))
if check_exact:
assert_numpy_array_equal(
left_valid, right_valid, obj="ExtensionArray", index_values=index_values
)
else:
_testing.assert_almost_equal(
left_valid,
right_valid,
check_dtype=check_dtype,
rtol=rtol,
atol=atol,
obj="ExtensionArray",
index_values=index_values,
)
# This could be refactored to use the NDFrame.equals method
def assert_series_equal(
left,
right,
check_dtype=True,
check_index_type="equiv",
check_series_type=True,
check_less_precise=no_default,
check_names=True,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
check_category_order=True,
check_freq=True,
rtol=1.0e-5,
atol=1.0e-8,
obj="Series",
):
"""
Check that left and right Series are equal.
Parameters
----------
left : Series
right : Series
check_dtype : bool, default True
Whether to check the Series dtype is identical.
check_index_type : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_series_type : bool, default True
Whether to check the Series class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
check_names : bool, default True
Whether to check the Series and Index names attribute.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_category_order : bool, default True
Whether to compare category order of internal Categoricals.
.. versionadded:: 1.0.2
check_freq : bool, default True
Whether to check the `freq` attribute on a DatetimeIndex or TimedeltaIndex.
rtol : float, default 1e-5
Relative tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
obj : str, default 'Series'
Specify object name being compared, internally used to show appropriate
assertion message.
"""
__tracebackhide__ = True
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
# instance validation
_check_isinstance(left, right, Series)
if check_series_type:
assert_class_equal(left, right, obj=obj)
# length comparison
if len(left) != len(right):
msg1 = f"{len(left)}, {left.index}"
msg2 = f"{len(right)}, {right.index}"
raise_assert_detail(obj, "Series length are different", msg1, msg2)
# index comparison
assert_index_equal(
left.index,
right.index,
exact=check_index_type,
check_names=check_names,
check_exact=check_exact,
check_categorical=check_categorical,
rtol=rtol,
atol=atol,
obj=f"{obj}.index",
)
if check_freq and isinstance(left.index, (pd.DatetimeIndex, pd.TimedeltaIndex)):
lidx = left.index
ridx = right.index
assert lidx.freq == ridx.freq, (lidx.freq, ridx.freq)
if check_dtype:
# We want to skip exact dtype checking when `check_categorical`
# is False. We'll still raise if only one is a `Categorical`,
# regardless of `check_categorical`
if (
is_categorical_dtype(left.dtype)
and is_categorical_dtype(right.dtype)
and not check_categorical
):
pass
else:
assert_attr_equal("dtype", left, right, obj=f"Attributes of {obj}")
if check_exact and is_numeric_dtype(left.dtype) and is_numeric_dtype(right.dtype):
# Only check exact if dtype is numeric
assert_numpy_array_equal(
left._values,
right._values,
check_dtype=check_dtype,
obj=str(obj),
index_values=np.asarray(left.index),
)
elif check_datetimelike_compat and (
needs_i8_conversion(left.dtype) or needs_i8_conversion(right.dtype)
):
# we want to check only if we have compat dtypes
# e.g. integer and M|m are NOT compat, but we can simply check
# the values in that case
# datetimelike may have different objects (e.g. datetime.datetime
# vs Timestamp) but will compare equal
if not Index(left._values).equals(Index(right._values)):
msg = (
f"[datetimelike_compat=True] {left._values} "
f"is not equal to {right._values}."
)
raise AssertionError(msg)
elif is_interval_dtype(left.dtype) and is_interval_dtype(right.dtype):
assert_interval_array_equal(left.array, right.array)
elif is_categorical_dtype(left.dtype) or is_categorical_dtype(right.dtype):
_testing.assert_almost_equal(
left._values,
right._values,
rtol=rtol,
atol=atol,
check_dtype=check_dtype,
obj=str(obj),
index_values=np.asarray(left.index),
)
elif is_extension_array_dtype(left.dtype) and is_extension_array_dtype(right.dtype):
assert_extension_array_equal(
left._values,
right._values,
check_dtype=check_dtype,
index_values=np.asarray(left.index),
)
elif needs_i8_conversion(left.dtype) or needs_i8_conversion(right.dtype):
# DatetimeArray or TimedeltaArray
assert_extension_array_equal(
left._values,
right._values,
check_dtype=check_dtype,
index_values=np.asarray(left.index),
)
else:
_testing.assert_almost_equal(
left._values,
right._values,
rtol=rtol,
atol=atol,
check_dtype=check_dtype,
obj=str(obj),
index_values=np.asarray(left.index),
)
# metadata comparison
if check_names:
assert_attr_equal("name", left, right, obj=obj)
if check_categorical:
if is_categorical_dtype(left.dtype) or is_categorical_dtype(right.dtype):
assert_categorical_equal(
left._values,
right._values,
obj=f"{obj} category",
check_category_order=check_category_order,
)
# This could be refactored to use the NDFrame.equals method
def assert_frame_equal(
left,
right,
check_dtype=True,
check_index_type="equiv",
check_column_type="equiv",
check_frame_type=True,
check_less_precise=no_default,
check_names=True,
by_blocks=False,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
check_like=False,
check_freq=True,
rtol=1.0e-5,
atol=1.0e-8,
obj="DataFrame",
):
"""
Check that left and right DataFrame are equal.
This function is intended to compare two DataFrames and output any
differences. Is is mostly intended for use in unit tests.
Additional parameters allow varying the strictness of the
equality checks performed.
Parameters
----------
left : DataFrame
First DataFrame to compare.
right : DataFrame
Second DataFrame to compare.
check_dtype : bool, default True
Whether to check the DataFrame dtype is identical.
check_index_type : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_column_type : bool or {'equiv'}, default 'equiv'
Whether to check the columns class, dtype and inferred_type
are identical. Is passed as the ``exact`` argument of
:func:`assert_index_equal`.
check_frame_type : bool, default True
Whether to check the DataFrame class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
check_names : bool, default True
Whether to check that the `names` attribute for both the `index`
and `column` attributes of the DataFrame is identical.
by_blocks : bool, default False
Specify how to compare internal data. If False, compare by columns.
If True, compare by blocks.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_like : bool, default False
If True, ignore the order of index & columns.
Note: index labels must match their respective rows
(same as in columns) - same labels must be with the same data.
check_freq : bool, default True
Whether to check the `freq` attribute on a DatetimeIndex or TimedeltaIndex.
rtol : float, default 1e-5
Relative tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
obj : str, default 'DataFrame'
Specify object name being compared, internally used to show appropriate
assertion message.
See Also
--------
assert_series_equal : Equivalent method for asserting Series equality.
DataFrame.equals : Check DataFrame equality.
Examples
--------
This example shows comparing two DataFrames that are equal
but with columns of differing dtypes.
>>> from pandas._testing import assert_frame_equal
>>> df1 = pd.DataFrame({'a': [1, 2], 'b': [3, 4]})
>>> df2 = pd.DataFrame({'a': [1, 2], 'b': [3.0, 4.0]})
df1 equals itself.
>>> assert_frame_equal(df1, df1)
df1 differs from df2 as column 'b' is of a different type.
>>> assert_frame_equal(df1, df2)
Traceback (most recent call last):
...
AssertionError: Attributes of DataFrame.iloc[:, 1] (column name="b") are different
Attribute "dtype" are different
[left]: int64
[right]: float64
Ignore differing dtypes in columns with check_dtype.
>>> assert_frame_equal(df1, df2, check_dtype=False)
"""
__tracebackhide__ = True
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
# instance validation
_check_isinstance(left, right, DataFrame)
if check_frame_type:
assert isinstance(left, type(right))
# assert_class_equal(left, right, obj=obj)
# shape comparison
if left.shape != right.shape:
raise_assert_detail(
obj, f"{obj} shape mismatch", f"{repr(left.shape)}", f"{repr(right.shape)}"
)
if check_like:
left, right = left.reindex_like(right), right
# index comparison
assert_index_equal(
left.index,
right.index,
exact=check_index_type,
check_names=check_names,
check_exact=check_exact,
check_categorical=check_categorical,
rtol=rtol,
atol=atol,
obj=f"{obj}.index",
)
# column comparison
assert_index_equal(
left.columns,
right.columns,
exact=check_column_type,
check_names=check_names,
check_exact=check_exact,
check_categorical=check_categorical,
rtol=rtol,
atol=atol,
obj=f"{obj}.columns",
)
# compare by blocks
if by_blocks:
rblocks = right._to_dict_of_blocks()
lblocks = left._to_dict_of_blocks()
for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))):
assert dtype in lblocks
assert dtype in rblocks
assert_frame_equal(
lblocks[dtype], rblocks[dtype], check_dtype=check_dtype, obj=obj
)
# compare by columns
else:
for i, col in enumerate(left.columns):
assert col in right
lcol = left.iloc[:, i]
rcol = right.iloc[:, i]
assert_series_equal(
lcol,
rcol,
check_dtype=check_dtype,
check_index_type=check_index_type,
check_exact=check_exact,
check_names=check_names,
check_datetimelike_compat=check_datetimelike_compat,
check_categorical=check_categorical,
check_freq=check_freq,
obj=f'{obj}.iloc[:, {i}] (column name="{col}")',
rtol=rtol,
atol=atol,
)
def assert_equal(left, right, **kwargs):
"""
Wrapper for tm.assert_*_equal to dispatch to the appropriate test function.
Parameters
----------
left, right : Index, Series, DataFrame, ExtensionArray, or np.ndarray
The two items to be compared.
**kwargs
All keyword arguments are passed through to the underlying assert method.
"""
__tracebackhide__ = True
if isinstance(left, pd.Index):
assert_index_equal(left, right, **kwargs)
if isinstance(left, (pd.DatetimeIndex, pd.TimedeltaIndex)):
assert left.freq == right.freq, (left.freq, right.freq)
elif isinstance(left, pd.Series):
assert_series_equal(left, right, **kwargs)
elif isinstance(left, pd.DataFrame):
assert_frame_equal(left, right, **kwargs)
elif isinstance(left, IntervalArray):
assert_interval_array_equal(left, right, **kwargs)
elif isinstance(left, PeriodArray):
assert_period_array_equal(left, right, **kwargs)
elif isinstance(left, DatetimeArray):
assert_datetime_array_equal(left, right, **kwargs)
elif isinstance(left, TimedeltaArray):
assert_timedelta_array_equal(left, right, **kwargs)
elif isinstance(left, ExtensionArray):
assert_extension_array_equal(left, right, **kwargs)
elif isinstance(left, np.ndarray):
assert_numpy_array_equal(left, right, **kwargs)
elif isinstance(left, str):
assert kwargs == {}
assert left == right
else:
raise NotImplementedError(type(left))
def box_expected(expected, box_cls, transpose=True):
"""
Helper function to wrap the expected output of a test in a given box_class.
Parameters
----------
expected : np.ndarray, Index, Series
box_cls : {Index, Series, DataFrame}
Returns
-------
subclass of box_cls
"""
if box_cls is pd.array:
expected = pd.array(expected)
elif box_cls is pd.Index:
expected = pd.Index(expected)
elif box_cls is pd.Series:
expected = pd.Series(expected)
elif box_cls is pd.DataFrame:
expected = pd.Series(expected).to_frame()
if transpose:
# for vector operations, we we need a DataFrame to be a single-row,
# not a single-column, in order to operate against non-DataFrame
# vectors of the same length.
expected = expected.T
elif box_cls is PeriodArray:
# the PeriodArray constructor is not as flexible as period_array
expected = period_array(expected)
elif box_cls is DatetimeArray:
expected = DatetimeArray(expected)
elif box_cls is TimedeltaArray:
expected = TimedeltaArray(expected)
elif box_cls is np.ndarray:
expected = np.array(expected)
elif box_cls is to_array:
expected = to_array(expected)
else:
raise NotImplementedError(box_cls)
return expected
def to_array(obj):
# temporary implementation until we get pd.array in place
dtype = getattr(obj, "dtype", None)
if is_period_dtype(dtype):
return period_array(obj)
elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
return DatetimeArray._from_sequence(obj)
elif is_timedelta64_dtype(dtype):
return TimedeltaArray._from_sequence(obj)
else:
return np.array(obj)
# -----------------------------------------------------------------------------
# Sparse
def assert_sp_array_equal(left, right):
"""
Check that the left and right SparseArray are equal.
Parameters
----------
left : SparseArray
right : SparseArray
"""
_check_isinstance(left, right, pd.arrays.SparseArray)
assert_numpy_array_equal(left.sp_values, right.sp_values)
# SparseIndex comparison
assert isinstance(left.sp_index, pd._libs.sparse.SparseIndex)
assert isinstance(right.sp_index, pd._libs.sparse.SparseIndex)
left_index = left.sp_index
right_index = right.sp_index
if not left_index.equals(right_index):
raise_assert_detail(
"SparseArray.index", "index are not equal", left_index, right_index
)
else:
# Just ensure a
pass
assert_attr_equal("fill_value", left, right)
assert_attr_equal("dtype", left, right)
assert_numpy_array_equal(left.to_dense(), right.to_dense())
# -----------------------------------------------------------------------------
# Others
def assert_contains_all(iterable, dic):
for k in iterable:
assert k in dic, f"Did not contain item: {repr(k)}"
def assert_copy(iter1, iter2, **eql_kwargs):
"""
iter1, iter2: iterables that produce elements
comparable with assert_almost_equal
Checks that the elements are equal, but not
the same object. (Does not check that items
in sequences are also not the same object)
"""
for elem1, elem2 in zip(iter1, iter2):
assert_almost_equal(elem1, elem2, **eql_kwargs)
msg = (
f"Expected object {repr(type(elem1))} and object {repr(type(elem2))} to be "
"different objects, but they were the same object."
)
assert elem1 is not elem2, msg
def getCols(k):
return string.ascii_uppercase[:k]
# make index
def makeStringIndex(k=10, name=None):
return Index(rands_array(nchars=10, size=k), name=name)
def makeUnicodeIndex(k=10, name=None):
return Index(randu_array(nchars=10, size=k), name=name)
def makeCategoricalIndex(k=10, n=3, name=None, **kwargs):
""" make a length k index or n categories """
x = rands_array(nchars=4, size=n)
return CategoricalIndex(
Categorical.from_codes(np.arange(k) % n, categories=x), name=name, **kwargs
)
def makeIntervalIndex(k=10, name=None, **kwargs):
""" make a length k IntervalIndex """
x = np.linspace(0, 100, num=(k + 1))
return IntervalIndex.from_breaks(x, name=name, **kwargs)
def makeBoolIndex(k=10, name=None):
if k == 1:
return Index([True], name=name)
elif k == 2:
return Index([False, True], name=name)
return Index([False, True] + [False] * (k - 2), name=name)
def makeIntIndex(k=10, name=None):
return Index(list(range(k)), name=name)
def makeUIntIndex(k=10, name=None):
return Index([2 ** 63 + i for i in range(k)], name=name)
def makeRangeIndex(k=10, name=None, **kwargs):
return RangeIndex(0, k, 1, name=name, **kwargs)
def makeFloatIndex(k=10, name=None):
values = sorted(np.random.random_sample(k)) - np.random.random_sample(1)
return Index(values * (10 ** np.random.randint(0, 9)), name=name)
def makeDateIndex(k=10, freq="B", name=None, **kwargs):
dt = datetime(2000, 1, 1)
dr = bdate_range(dt, periods=k, freq=freq, name=name)
return DatetimeIndex(dr, name=name, **kwargs)
def makeTimedeltaIndex(k=10, freq="D", name=None, **kwargs):
return pd.timedelta_range(start="1 day", periods=k, freq=freq, name=name, **kwargs)
def makePeriodIndex(k=10, name=None, **kwargs):
dt = datetime(2000, 1, 1)
dr = pd.period_range(start=dt, periods=k, freq="B", name=name, **kwargs)
return dr
def makeMultiIndex(k=10, names=None, **kwargs):
return MultiIndex.from_product((("foo", "bar"), (1, 2)), names=names, **kwargs)
_names = [
"Alice",
"Bob",
"Charlie",
"Dan",
"Edith",
"Frank",
"George",
"Hannah",
"Ingrid",
"Jerry",
"Kevin",
"Laura",
"Michael",
"Norbert",
"Oliver",
"Patricia",
"Quinn",
"Ray",
"Sarah",
"Tim",
"Ursula",
"Victor",
"Wendy",
"Xavier",
"Yvonne",
"Zelda",
]
def _make_timeseries(start="2000-01-01", end="2000-12-31", freq="1D", seed=None):
"""
Make a DataFrame with a DatetimeIndex
Parameters
----------
start : str or Timestamp, default "2000-01-01"
The start of the index. Passed to date_range with `freq`.
end : str or Timestamp, default "2000-12-31"
The end of the index. Passed to date_range with `freq`.
freq : str or Freq
The frequency to use for the DatetimeIndex
seed : int, optional
The random state seed.
* name : object dtype with string names
* id : int dtype with
* x, y : float dtype
Examples
--------
>>> _make_timeseries()
id name x y
timestamp
2000-01-01 982 Frank 0.031261 0.986727
2000-01-02 1025 Edith -0.086358 -0.032920
2000-01-03 982 Edith 0.473177 0.298654
2000-01-04 1009 Sarah 0.534344 -0.750377
2000-01-05 963 Zelda -0.271573 0.054424
... ... ... ... ...
2000-12-27 980 Ingrid -0.132333 -0.422195
2000-12-28 972 Frank -0.376007 -0.298687
2000-12-29 1009 Ursula -0.865047 -0.503133
2000-12-30 1000 Hannah -0.063757 -0.507336
2000-12-31 972 Tim -0.869120 0.531685
"""
index = pd.date_range(start=start, end=end, freq=freq, name="timestamp")
n = len(index)
state = np.random.RandomState(seed)
columns = {
"name": state.choice(_names, size=n),
"id": state.poisson(1000, size=n),
"x": state.rand(n) * 2 - 1,
"y": state.rand(n) * 2 - 1,
}
df = pd.DataFrame(columns, index=index, columns=sorted(columns))
if df.index[-1] == end:
df = df.iloc[:-1]
return df
def index_subclass_makers_generator():
make_index_funcs = [
makeDateIndex,
makePeriodIndex,
makeTimedeltaIndex,
makeRangeIndex,
makeIntervalIndex,
makeCategoricalIndex,
makeMultiIndex,
]
for make_index_func in make_index_funcs:
yield make_index_func
def all_timeseries_index_generator(k=10):
"""
Generator which can be iterated over to get instances of all the classes
which represent time-series.
Parameters
----------
k: length of each of the index instances
"""
make_index_funcs = [makeDateIndex, makePeriodIndex, makeTimedeltaIndex]
for make_index_func in make_index_funcs:
yield make_index_func(k=k)
# make series
def makeFloatSeries(name=None):
index = makeStringIndex(_N)
return Series(randn(_N), index=index, name=name)
def makeStringSeries(name=None):
index = makeStringIndex(_N)
return Series(randn(_N), index=index, name=name)
def makeObjectSeries(name=None):
data = makeStringIndex(_N)
data = Index(data, dtype=object)
index = makeStringIndex(_N)
return Series(data, index=index, name=name)
def getSeriesData():
index = makeStringIndex(_N)
return {c: Series(randn(_N), index=index) for c in getCols(_K)}
def makeTimeSeries(nper=None, freq="B", name=None):
if nper is None:
nper = _N
return Series(randn(nper), index=makeDateIndex(nper, freq=freq), name=name)
def makePeriodSeries(nper=None, name=None):
if nper is None:
nper = _N
return Series(randn(nper), index=makePeriodIndex(nper), name=name)
def getTimeSeriesData(nper=None, freq="B"):
return {c: makeTimeSeries(nper, freq) for c in getCols(_K)}
def getPeriodData(nper=None):
return {c: makePeriodSeries(nper) for c in getCols(_K)}
# make frame
def makeTimeDataFrame(nper=None, freq="B"):
data = getTimeSeriesData(nper, freq)
return DataFrame(data)
def makeDataFrame():
data = getSeriesData()
return DataFrame(data)
def getMixedTypeDict():
index = Index(["a", "b", "c", "d", "e"])
data = {
"A": [0.0, 1.0, 2.0, 3.0, 4.0],
"B": [0.0, 1.0, 0.0, 1.0, 0.0],
"C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
"D": bdate_range("1/1/2009", periods=5),
}
return index, data
def makeMixedDataFrame():
return DataFrame(getMixedTypeDict()[1])
def makePeriodFrame(nper=None):
data = getPeriodData(nper)
return DataFrame(data)
def makeCustomIndex(
nentries, nlevels, prefix="#", names=False, ndupe_l=None, idx_type=None
):
"""
Create an index/multindex with given dimensions, levels, names, etc'
nentries - number of entries in index
nlevels - number of levels (> 1 produces multindex)
prefix - a string prefix for labels
names - (Optional), bool or list of strings. if True will use default
names, if false will use no names, if a list is given, the name of
each level in the index will be taken from the list.
ndupe_l - (Optional), list of ints, the number of rows for which the
label will repeated at the corresponding level, you can specify just
the first few, the rest will use the default ndupe_l of 1.
len(ndupe_l) <= nlevels.
idx_type - "i"/"f"/"s"/"u"/"dt"/"p"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a datetime index.
if unspecified, string labels will be generated.
"""
if ndupe_l is None:
ndupe_l = [1] * nlevels
assert is_sequence(ndupe_l) and len(ndupe_l) <= nlevels
assert names is None or names is False or names is True or len(names) is nlevels
assert idx_type is None or (
idx_type in ("i", "f", "s", "u", "dt", "p", "td") and nlevels == 1
)
if names is True:
# build default names
names = [prefix + str(i) for i in range(nlevels)]
if names is False:
# pass None to index constructor for no name
names = None
# make singleton case uniform
if isinstance(names, str) and nlevels == 1:
names = [names]
# specific 1D index type requested?
idx_func = dict(
i=makeIntIndex,
f=makeFloatIndex,
s=makeStringIndex,
u=makeUnicodeIndex,
dt=makeDateIndex,
td=makeTimedeltaIndex,
p=makePeriodIndex,
).get(idx_type)
if idx_func:
idx = idx_func(nentries)
# but we need to fill in the name
if names:
idx.name = names[0]
return idx
elif idx_type is not None:
raise ValueError(
f"{repr(idx_type)} is not a legal value for `idx_type`, "
"use 'i'/'f'/'s'/'u'/'dt'/'p'/'td'."
)
if len(ndupe_l) < nlevels:
ndupe_l.extend([1] * (nlevels - len(ndupe_l)))
assert len(ndupe_l) == nlevels
assert all(x > 0 for x in ndupe_l)
tuples = []
for i in range(nlevels):
def keyfunc(x):
import re
numeric_tuple = re.sub(r"[^\d_]_?", "", x).split("_")
return [int(num) for num in numeric_tuple]
# build a list of lists to create the index from
div_factor = nentries // ndupe_l[i] + 1
cnt = Counter()
for j in range(div_factor):
label = f"{prefix}_l{i}_g{j}"
cnt[label] = ndupe_l[i]
# cute Counter trick
result = sorted(cnt.elements(), key=keyfunc)[:nentries]
tuples.append(result)
tuples = list(zip(*tuples))
# convert tuples to index
if nentries == 1:
# we have a single level of tuples, i.e. a regular Index
index = Index(tuples[0], name=names[0])
elif nlevels == 1:
name = None if names is None else names[0]
index = Index((x[0] for x in tuples), name=name)
else:
index = MultiIndex.from_tuples(tuples, names=names)
return index
def makeCustomDataframe(
nrows,
ncols,
c_idx_names=True,
r_idx_names=True,
c_idx_nlevels=1,
r_idx_nlevels=1,
data_gen_f=None,
c_ndupe_l=None,
r_ndupe_l=None,
dtype=None,
c_idx_type=None,
r_idx_type=None,
):
"""
Create a DataFrame using supplied parameters.
Parameters
----------
nrows, ncols - number of data rows/cols
c_idx_names, idx_names - False/True/list of strings, yields No names ,
default names or uses the provided names for the levels of the
corresponding index. You can provide a single string when
c_idx_nlevels ==1.
c_idx_nlevels - number of levels in columns index. > 1 will yield MultiIndex
r_idx_nlevels - number of levels in rows index. > 1 will yield MultiIndex
data_gen_f - a function f(row,col) which return the data value
at that position, the default generator used yields values of the form
"RxCy" based on position.
c_ndupe_l, r_ndupe_l - list of integers, determines the number
of duplicates for each label at a given level of the corresponding
index. The default `None` value produces a multiplicity of 1 across
all levels, i.e. a unique index. Will accept a partial list of length
N < idx_nlevels, for just the first N levels. If ndupe doesn't divide
nrows/ncol, the last label might have lower multiplicity.
dtype - passed to the DataFrame constructor as is, in case you wish to
have more control in conjunction with a custom `data_gen_f`
r_idx_type, c_idx_type - "i"/"f"/"s"/"u"/"dt"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a timedelta index.
if unspecified, string labels will be generated.
Examples
--------
# 5 row, 3 columns, default names on both, single index on both axis
>> makeCustomDataframe(5,3)
# make the data a random int between 1 and 100
>> mkdf(5,3,data_gen_f=lambda r,c:randint(1,100))
# 2-level multiindex on rows with each label duplicated
# twice on first level, default names on both axis, single
# index on both axis
>> a=makeCustomDataframe(5,3,r_idx_nlevels=2,r_ndupe_l=[2])
# DatetimeIndex on row, index with unicode labels on columns
# no names on either axis
>> a=makeCustomDataframe(5,3,c_idx_names=False,r_idx_names=False,
r_idx_type="dt",c_idx_type="u")
# 4-level multindex on rows with names provided, 2-level multindex
# on columns with default labels and default names.
>> a=makeCustomDataframe(5,3,r_idx_nlevels=4,
r_idx_names=["FEE","FI","FO","FAM"],
c_idx_nlevels=2)
>> a=mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
"""
assert c_idx_nlevels > 0
assert r_idx_nlevels > 0
assert r_idx_type is None or (
r_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and r_idx_nlevels == 1
)
assert c_idx_type is None or (
c_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and c_idx_nlevels == 1
)
columns = makeCustomIndex(
ncols,
nlevels=c_idx_nlevels,
prefix="C",
names=c_idx_names,
ndupe_l=c_ndupe_l,
idx_type=c_idx_type,
)
index = makeCustomIndex(
nrows,
nlevels=r_idx_nlevels,
prefix="R",
names=r_idx_names,
ndupe_l=r_ndupe_l,
idx_type=r_idx_type,
)
# by default, generate data based on location
if data_gen_f is None:
data_gen_f = lambda r, c: f"R{r}C{c}"
data = [[data_gen_f(r, c) for c in range(ncols)] for r in range(nrows)]
return DataFrame(data, index, columns, dtype=dtype)
def _create_missing_idx(nrows, ncols, density, random_state=None):
if random_state is None:
random_state = np.random
else:
random_state = np.random.RandomState(random_state)
# below is cribbed from scipy.sparse
size = int(np.round((1 - density) * nrows * ncols))
# generate a few more to ensure unique values
min_rows = 5
fac = 1.02
extra_size = min(size + min_rows, fac * size)
def _gen_unique_rand(rng, _extra_size):
ind = rng.rand(int(_extra_size))
return np.unique(np.floor(ind * nrows * ncols))[:size]
ind = _gen_unique_rand(random_state, extra_size)
while ind.size < size:
extra_size *= 1.05
ind = _gen_unique_rand(random_state, extra_size)
j = np.floor(ind * 1.0 / nrows).astype(int)
i = (ind - j * nrows).astype(int)
return i.tolist(), j.tolist()
def makeMissingDataframe(density=0.9, random_state=None):
df = makeDataFrame()
i, j = _create_missing_idx(*df.shape, density=density, random_state=random_state)
df.values[i, j] = np.nan
return df
def optional_args(decorator):
"""
allows a decorator to take optional positional and keyword arguments.
Assumes that taking a single, callable, positional argument means that
it is decorating a function, i.e. something like this::
@my_decorator
def function(): pass
Calls decorator with decorator(f, *args, **kwargs)
"""
@wraps(decorator)
def wrapper(*args, **kwargs):
def dec(f):
return decorator(f, *args, **kwargs)
is_decorating = not kwargs and len(args) == 1 and callable(args[0])
if is_decorating:
f = args[0]
args = []
return dec(f)
else:
return dec
return wrapper
# skip tests on exceptions with this message
_network_error_messages = (
# 'urlopen error timed out',
# 'timeout: timed out',
# 'socket.timeout: timed out',
"timed out",
"Server Hangup",
"HTTP Error 503: Service Unavailable",
"502: Proxy Error",
"HTTP Error 502: internal error",
"HTTP Error 502",
"HTTP Error 503",
"HTTP Error 403",
"HTTP Error 400",
"Temporary failure in name resolution",
"Name or service not known",
"Connection refused",
"certificate verify",
)
# or this e.errno/e.reason.errno
_network_errno_vals = (
101, # Network is unreachable
111, # Connection refused
110, # Connection timed out
104, # Connection reset Error
54, # Connection reset by peer
60, # urllib.error.URLError: [Errno 60] Connection timed out
)
# Both of the above shouldn't mask real issues such as 404's
# or refused connections (changed DNS).
# But some tests (test_data yahoo) contact incredibly flakey
# servers.
# and conditionally raise on exception types in _get_default_network_errors
def _get_default_network_errors():
# Lazy import for http.client because it imports many things from the stdlib
import http.client
return (IOError, http.client.HTTPException, TimeoutError)
def can_connect(url, error_classes=None):
"""
Try to connect to the given url. True if succeeds, False if IOError
raised
Parameters
----------
url : basestring
The URL to try to connect to
Returns
-------
connectable : bool
Return True if no IOError (unable to connect) or URLError (bad url) was
raised
"""
if error_classes is None:
error_classes = _get_default_network_errors()
try:
with urlopen(url):
pass
except error_classes:
return False
else:
return True
@optional_args
def network(
t,
url="http://www.google.com",
raise_on_error=_RAISE_NETWORK_ERROR_DEFAULT,
check_before_test=False,
error_classes=None,
skip_errnos=_network_errno_vals,
_skip_on_messages=_network_error_messages,
):
"""
Label a test as requiring network connection and, if an error is
encountered, only raise if it does not find a network connection.
In comparison to ``network``, this assumes an added contract to your test:
you must assert that, under normal conditions, your test will ONLY fail if
it does not have network connectivity.
You can call this in 3 ways: as a standard decorator, with keyword
arguments, or with a positional argument that is the url to check.
Parameters
----------
t : callable
The test requiring network connectivity.
url : path
The url to test via ``pandas.io.common.urlopen`` to check
for connectivity. Defaults to 'http://www.google.com'.
raise_on_error : bool
If True, never catches errors.
check_before_test : bool
If True, checks connectivity before running the test case.
error_classes : tuple or Exception
error classes to ignore. If not in ``error_classes``, raises the error.
defaults to IOError. Be careful about changing the error classes here.
skip_errnos : iterable of int
Any exception that has .errno or .reason.erno set to one
of these values will be skipped with an appropriate
message.
_skip_on_messages: iterable of string
any exception e for which one of the strings is
a substring of str(e) will be skipped with an appropriate
message. Intended to suppress errors where an errno isn't available.
Notes
-----
* ``raise_on_error`` supersedes ``check_before_test``
Returns
-------
t : callable
The decorated test ``t``, with checks for connectivity errors.
Example
-------
Tests decorated with @network will fail if it's possible to make a network
connection to another URL (defaults to google.com)::
>>> from pandas._testing import network
>>> from pandas.io.common import urlopen
>>> @network
... def test_network():
... with urlopen("rabbit://bonanza.com"):
... pass
Traceback
...
URLError: <urlopen error unknown url type: rabit>
You can specify alternative URLs::
>>> @network("http://www.yahoo.com")
... def test_something_with_yahoo():
... raise IOError("Failure Message")
>>> test_something_with_yahoo()
Traceback (most recent call last):
...
IOError: Failure Message
If you set check_before_test, it will check the url first and not run the
test on failure::
>>> @network("failing://url.blaher", check_before_test=True)
... def test_something():
... print("I ran!")
... raise ValueError("Failure")
>>> test_something()
Traceback (most recent call last):
...
Errors not related to networking will always be raised.
"""
from pytest import skip
if error_classes is None:
error_classes = _get_default_network_errors()
t.network = True
@wraps(t)
def wrapper(*args, **kwargs):
if check_before_test and not raise_on_error:
if not can_connect(url, error_classes):
skip()
try:
return t(*args, **kwargs)
except Exception as err:
errno = getattr(err, "errno", None)
if not errno and hasattr(errno, "reason"):
errno = getattr(err.reason, "errno", None)
if errno in skip_errnos:
skip(f"Skipping test due to known errno and error {err}")
e_str = str(err)
if any(m.lower() in e_str.lower() for m in _skip_on_messages):
skip(
f"Skipping test because exception message is known and error {err}"
)
if not isinstance(err, error_classes):
raise
if raise_on_error or can_connect(url, error_classes):
raise
else:
skip(f"Skipping test due to lack of connectivity and error {err}")
return wrapper
with_connectivity_check = network
@contextmanager
def assert_produces_warning(
expected_warning=Warning,
filter_level="always",
check_stacklevel=True,
raise_on_extra_warnings=True,
):
"""
Context manager for running code expected to either raise a specific
warning, or not raise any warnings. Verifies that the code raises the
expected warning, and that it does not raise any other unexpected
warnings. It is basically a wrapper around ``warnings.catch_warnings``.
Parameters
----------
expected_warning : {Warning, False, None}, default Warning
The type of Exception raised. ``exception.Warning`` is the base
class for all warnings. To check that no warning is returned,
specify ``False`` or ``None``.
filter_level : str or None, default "always"
Specifies whether warnings are ignored, displayed, or turned
into errors.
Valid values are:
* "error" - turns matching warnings into exceptions
* "ignore" - discard the warning
* "always" - always emit a warning
* "default" - print the warning the first time it is generated
from each location
* "module" - print the warning the first time it is generated
from each module
* "once" - print the warning the first time it is generated
check_stacklevel : bool, default True
If True, displays the line that called the function containing
the warning to show were the function is called. Otherwise, the
line that implements the function is displayed.
raise_on_extra_warnings : bool, default True
Whether extra warnings not of the type `expected_warning` should
cause the test to fail.
Examples
--------
>>> import warnings
>>> with assert_produces_warning():
... warnings.warn(UserWarning())
...
>>> with assert_produces_warning(False):
... warnings.warn(RuntimeWarning())
...
Traceback (most recent call last):
...
AssertionError: Caused unexpected warning(s): ['RuntimeWarning'].
>>> with assert_produces_warning(UserWarning):
... warnings.warn(RuntimeWarning())
Traceback (most recent call last):
...
AssertionError: Did not see expected warning of class 'UserWarning'.
..warn:: This is *not* thread-safe.
"""
__tracebackhide__ = True
with warnings.catch_warnings(record=True) as w:
saw_warning = False
warnings.simplefilter(filter_level)
yield w
extra_warnings = []
for actual_warning in w:
if expected_warning and issubclass(
actual_warning.category, expected_warning
):
saw_warning = True
if check_stacklevel and issubclass(
actual_warning.category, (FutureWarning, DeprecationWarning)
):
from inspect import getframeinfo, stack
caller = getframeinfo(stack()[2][0])
msg = (
"Warning not set with correct stacklevel. "
f"File where warning is raised: {actual_warning.filename} != "
f"{caller.filename}. Warning message: {actual_warning.message}"
)
assert actual_warning.filename == caller.filename, msg
else:
extra_warnings.append(
(
actual_warning.category.__name__,
actual_warning.message,
actual_warning.filename,
actual_warning.lineno,
)
)
if expected_warning:
msg = (
f"Did not see expected warning of class "
f"{repr(expected_warning.__name__)}"
)
assert saw_warning, msg
if raise_on_extra_warnings and extra_warnings:
raise AssertionError(
f"Caused unexpected warning(s): {repr(extra_warnings)}"
)
class RNGContext:
"""
Context manager to set the numpy random number generator speed. Returns
to the original value upon exiting the context manager.
Parameters
----------
seed : int
Seed for numpy.random.seed
Examples
--------
with RNGContext(42):
np.random.randn()
"""
def __init__(self, seed):
self.seed = seed
def __enter__(self):
self.start_state = np.random.get_state()
np.random.seed(self.seed)
def __exit__(self, exc_type, exc_value, traceback):
np.random.set_state(self.start_state)
@contextmanager
def with_csv_dialect(name, **kwargs):
"""
Context manager to temporarily register a CSV dialect for parsing CSV.
Parameters
----------
name : str
The name of the dialect.
kwargs : mapping
The parameters for the dialect.
Raises
------
ValueError : the name of the dialect conflicts with a builtin one.
See Also
--------
csv : Python's CSV library.
"""
import csv
_BUILTIN_DIALECTS = {"excel", "excel-tab", "unix"}
if name in _BUILTIN_DIALECTS:
raise ValueError("Cannot override builtin dialect.")
csv.register_dialect(name, **kwargs)
yield
csv.unregister_dialect(name)
@contextmanager
def use_numexpr(use, min_elements=None):
from pandas.core.computation import expressions as expr
if min_elements is None:
min_elements = expr._MIN_ELEMENTS
olduse = expr._USE_NUMEXPR
oldmin = expr._MIN_ELEMENTS
expr.set_use_numexpr(use)
expr._MIN_ELEMENTS = min_elements
yield
expr._MIN_ELEMENTS = oldmin
expr.set_use_numexpr(olduse)
def test_parallel(num_threads=2, kwargs_list=None):
"""
Decorator to run the same function multiple times in parallel.
Parameters
----------
num_threads : int, optional
The number of times the function is run in parallel.
kwargs_list : list of dicts, optional
The list of kwargs to update original
function kwargs on different threads.
Notes
-----
This decorator does not pass the return value of the decorated function.
Original from scikit-image:
https://github.com/scikit-image/scikit-image/pull/1519
"""
assert num_threads > 0
has_kwargs_list = kwargs_list is not None
if has_kwargs_list:
assert len(kwargs_list) == num_threads
import threading
def wrapper(func):
@wraps(func)
def inner(*args, **kwargs):
if has_kwargs_list:
update_kwargs = lambda i: dict(kwargs, **kwargs_list[i])
else:
update_kwargs = lambda i: kwargs
threads = []
for i in range(num_threads):
updated_kwargs = update_kwargs(i)
thread = threading.Thread(target=func, args=args, kwargs=updated_kwargs)
threads.append(thread)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return inner
return wrapper
class SubclassedSeries(Series):
_metadata = ["testattr", "name"]
@property
def _constructor(self):
return SubclassedSeries
@property
def _constructor_expanddim(self):
return SubclassedDataFrame
class SubclassedDataFrame(DataFrame):
_metadata = ["testattr"]
@property
def _constructor(self):
return SubclassedDataFrame
@property
def _constructor_sliced(self):
return SubclassedSeries
class SubclassedCategorical(Categorical):
@property
def _constructor(self):
return SubclassedCategorical
@contextmanager
def set_timezone(tz: str):
"""
Context manager for temporarily setting a timezone.
Parameters
----------
tz : str
A string representing a valid timezone.
Examples
--------
>>> from datetime import datetime
>>> from dateutil.tz import tzlocal
>>> tzlocal().tzname(datetime.now())
'IST'
>>> with set_timezone('US/Eastern'):
... tzlocal().tzname(datetime.now())
...
'EDT'
"""
import os
import time
def setTZ(tz):
if tz is None:
try:
del os.environ["TZ"]
except KeyError:
pass
else:
os.environ["TZ"] = tz
time.tzset()
orig_tz = os.environ.get("TZ")
setTZ(tz)
try:
yield
finally:
setTZ(orig_tz)
def _make_skipna_wrapper(alternative, skipna_alternative=None):
"""
Create a function for calling on an array.
Parameters
----------
alternative : function
The function to be called on the array with no NaNs.
Only used when 'skipna_alternative' is None.
skipna_alternative : function
The function to be called on the original array
Returns
-------
function
"""
if skipna_alternative:
def skipna_wrapper(x):
return skipna_alternative(x.values)
else:
def skipna_wrapper(x):
nona = x.dropna()
if len(nona) == 0:
return np.nan
return alternative(nona)
return skipna_wrapper
def convert_rows_list_to_csv_str(rows_list: List[str]):
"""
Convert list of CSV rows to single CSV-formatted string for current OS.
This method is used for creating expected value of to_csv() method.
Parameters
----------
rows_list : List[str]
Each element represents the row of csv.
Returns
-------
str
Expected output of to_csv() in current OS.
"""
sep = os.linesep
expected = sep.join(rows_list) + sep
return expected
def external_error_raised(expected_exception: Type[Exception]) -> ContextManager:
"""
Helper function to mark pytest.raises that have an external error message.
Parameters
----------
expected_exception : Exception
Expected error to raise.
Returns
-------
Callable
Regular `pytest.raises` function with `match` equal to `None`.
"""
import pytest
return pytest.raises(expected_exception, match=None)
cython_table = pd.core.base.SelectionMixin._cython_table.items()
def get_cython_table_params(ndframe, func_names_and_expected):
"""
Combine frame, functions from SelectionMixin._cython_table
keys and expected result.
Parameters
----------
ndframe : DataFrame or Series
func_names_and_expected : Sequence of two items
The first item is a name of a NDFrame method ('sum', 'prod') etc.
The second item is the expected return value.
Returns
-------
list
List of three items (DataFrame, function, expected result)
"""
results = []
for func_name, expected in func_names_and_expected:
results.append((ndframe, func_name, expected))
results += [
(ndframe, func, expected)
for func, name in cython_table
if name == func_name
]
return results
def get_op_from_name(op_name: str) -> Callable:
"""
The operator function for a given op name.
Parameters
----------
op_name : string
The op name, in form of "add" or "__add__".
Returns
-------
function
A function performing the operation.
"""
short_opname = op_name.strip("_")
try:
op = getattr(operator, short_opname)
except AttributeError:
# Assume it is the reverse operator
rop = getattr(operator, short_opname[1:])
op = lambda x, y: rop(y, x)
return op
|
py | b402b0f8fb91dd12e20e4971b3ab7888d829b03c | # -*- coding: utf-8; -*-
#
# The MIT License (MIT)
#
# Copyright (c) 2014 Flavien Charlon
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import bitcoin.base58
import bitcoin.wallet
class Base58Address(bytes):
"""Represents a Base58-encoded address. It includes a version, checksum and namespace."""
def __init__(self, data, version, namespace):
"""
Initializes a Base58Address object from data, version and namespace.
:param bytes data: The base-58 payload.
:param int version: The version byte.
:param int | None namespace: The namespace byte.
:return: The Base58Address instance.
:rtype: Base58Address
"""
if not (0 <= version <= 255):
raise ValueError('version must be in range 0 to 255 inclusive; got %d' % version)
if namespace is not None and not (0 <= namespace <= 255):
raise ValueError('namespace must be None or in range 0 to 255 inclusive; got %d' % version)
if len(data) != 20:
raise ValueError('The payload must be 20 bytes long')
super().__init__()
self.address = bitcoin.wallet.CBitcoinAddress.from_bytes(data, version)
self.namespace = namespace
def __new__(cls, data, version, namespace, *args, **kwargs):
return super().__new__(cls, data)
@classmethod
def from_string(cls, base58):
"""
Creates a new instance of the Base58Address class.
:param str base58: The Base-58 encoded address.
:return: The Base58Address instance.
:rtype: Base58Address
"""
decoded_bytes = bitcoin.base58.decode(base58)
checksum = decoded_bytes[-4:]
calculated_checksum = bitcoin.core.Hash(decoded_bytes[:-4])[:4]
if checksum != calculated_checksum:
raise bitcoin.base58.Base58ChecksumError(
'Checksum mismatch: expected %r, calculated %r' % (checksum, calculated_checksum))
if len(decoded_bytes) == 26:
# The address has a namespace defined
namespace, version, data = decoded_bytes[0:1], decoded_bytes[1:2], decoded_bytes[2:-4]
return cls(data, version[0], namespace[0])
elif len(decoded_bytes) == 25:
# The namespace is undefined
version, data = decoded_bytes[0:1], decoded_bytes[1:-4]
return cls(data, version[0], None)
else:
raise ValueError('Invalid length')
def to_bytes(self):
"""Converts to a bytes instance.
Note that it's the data represented that is converted; the checksum, version and namespace are not included.
:return: The Base58Address instance.
:rtype: bytes
"""
return b'' + self
def __str__(self):
"""
Converts the address to a string.
:return: The base-58 encoded string.
:rtype: str
"""
if self.namespace is None:
full_payload = bytes([self.address.nVersion]) + self
else:
full_payload = bytes([self.namespace]) + bytes([self.address.nVersion]) + self
checksum = bitcoin.core.Hash(full_payload)[0:4]
return bitcoin.base58.encode(full_payload + checksum)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.