metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "josephtl/stanCode-projects",
"score": 4
} |
#### File: stanCode-projects/my_photoshop/blur.py
```python
from simpleimage import SimpleImage
def blur(img):
"""
:param img:(SimpleImage), an emoji image.
:return: A blurred emoji image.
"""
blank = SimpleImage.blank(img.width, img.height)
for y in range(img.height):
for x in range(img.width):
pixel_blank = blank.get_pixel(x, y)
# upper left corner
if x == 0 and y == 0:
pixel_img_lower = img.get_pixel(x, y + 1)
pixel_img_right = img.get_pixel(x + 1, y)
pixel_blank.red = (pixel_img_lower.red + pixel_img_right.red)/2
pixel_blank.green = (pixel_img_lower.green + pixel_img_right.green)/2
pixel_blank.blue = (pixel_img_lower.blue + pixel_img_right.blue)/2
# upper right corner
elif x == img.width-1 and y == 0:
pixel_img_lower = img.get_pixel(x, y + 1)
pixel_img_left = img.get_pixel(x-1, y)
pixel_blank.red = (pixel_img_lower.red + pixel_img_left.red) / 2
pixel_blank.green = (pixel_img_lower.green + pixel_img_left.green) / 2
pixel_blank.blue = (pixel_img_lower.blue + pixel_img_left.blue) / 2
# lower left corner
elif x == 0 and y == img.height-1:
pixel_img_upper = img.get_pixel(x, y - 1)
pixel_img_right = img.get_pixel(x + 1, y)
pixel_blank.red = (pixel_img_upper.red + pixel_img_right.red) / 2
pixel_blank.green = (pixel_img_upper.green + pixel_img_right.green) / 2
pixel_blank.blue = (pixel_img_upper.blue + pixel_img_right.blue) /2
# lower right corner
elif x == img.width-1 and y == img.height-1:
pixel_img_upper = img.get_pixel(x, y - 1)
pixel_img_left = img.get_pixel(x - 1, y)
pixel_blank.red = (pixel_img_upper.red + pixel_img_left.red) / 2
pixel_blank.green = (pixel_img_upper.green + pixel_img_left.green) / 2
pixel_blank.blue = (pixel_img_upper.blue + pixel_img_left.blue) / 2
# left border
elif x == 0 and 0 < y < img.height-1:
pixel_img_upper = img.get_pixel(x, y - 1)
pixel_img_lower = img.get_pixel(x, y + 1)
pixel_img_right = img.get_pixel(x + 1, y)
pixel_blank.red = (pixel_img_upper.red + pixel_img_lower.red + pixel_img_right.red)/3
pixel_blank.green = (pixel_img_upper.green + pixel_img_lower.green + pixel_img_right.green)/3
pixel_blank.blue = (pixel_img_upper.blue + pixel_img_lower.blue + pixel_img_right.blue)/3
# upper border
elif 0 < x < img.width-1 and (y == 0):
pixel_img_lower = img.get_pixel(x, y + 1)
pixel_img_left = img.get_pixel(x-1, y)
pixel_img_right = img.get_pixel(x + 1, y)
pixel_blank.red = (pixel_img_left.red + pixel_img_lower.red + pixel_img_right.red) / 3
pixel_blank.green = (pixel_img_left.green + pixel_img_lower.green + pixel_img_right.green) / 3
pixel_blank.blue = (pixel_img_left.blue + pixel_img_lower.blue + pixel_img_right.blue) / 3
# lower border
elif 0 < x < img.width-1 and y == img.height-1:
pixel_img_upper = img.get_pixel(x, y - 1)
pixel_img_left = img.get_pixel(x - 1, y)
pixel_img_right = img.get_pixel(x + 1, y)
pixel_blank.red = (pixel_img_left.red + pixel_img_upper.red + pixel_img_right.red) / 3
pixel_blank.green = (pixel_img_left.green + pixel_img_upper.green + pixel_img_right.green) / 3
pixel_blank.blue = (pixel_img_left.blue + pixel_img_upper.blue + pixel_img_right.blue) / 3
# right border
elif x == img.width-1 and 0 < y < img.height-1:
pixel_img_upper = img.get_pixel(x, y - 1)
pixel_img_left = img.get_pixel(x - 1, y)
pixel_img_lower = img.get_pixel(x, y + 1)
pixel_blank.red = (pixel_img_left.red + pixel_img_upper.red + pixel_img_lower.red) / 3
pixel_blank.green = (pixel_img_left.green + pixel_img_upper.green + pixel_img_lower.green) / 3
pixel_blank.blue = (pixel_img_left.blue + pixel_img_lower.blue + pixel_img_lower.blue) / 3
# middle part
else:
pixel_img_upper = img.get_pixel(x, y - 1)
pixel_img_lower = img.get_pixel(x, y + 1)
pixel_img_left = img.get_pixel(x - 1, y)
pixel_img_right = img.get_pixel(x + 1, y)
pixel_img_upper_left = img.get_pixel(x-1, y-1)
pixel_img_upper_right = img.get_pixel(x+1, y-1)
pixel_img_lower_left = img.get_pixel(x-1, y+1)
pixel_img_lower_right = img.get_pixel(x+1, y+1)
pixel_blank.red = (pixel_img_upper.red + pixel_img_lower.red + pixel_img_left.red + pixel_img_right.red + pixel_img_upper_left.red + pixel_img_lower_left.red + pixel_img_upper_right.red + pixel_img_lower_right.red)/8
pixel_blank.green = (pixel_img_upper.green + pixel_img_lower.green + pixel_img_left.green + pixel_img_right.green + pixel_img_upper_left.green + pixel_img_lower_left.green + pixel_img_upper_right.green + pixel_img_lower_right.green)/8
pixel_blank.blue = (pixel_img_upper.blue + pixel_img_lower.blue + pixel_img_left.blue + pixel_img_right.blue + pixel_img_upper_left.blue + pixel_img_lower_left.blue + pixel_img_upper_right.blue + pixel_img_lower_right.blue)/8
return blank
def main():
"""
This program could blurred the original image to some extend.
"""
old_img = SimpleImage("images/smiley-face.png")
old_img.show()
blurred_img = blur(old_img)
for i in range(5):
blurred_img = blur(blurred_img)
blurred_img.show()
if __name__ == '__main__':
main()
```
#### File: stanCode-projects/my_photoshop/green_screen.py
```python
from simpleimage import SimpleImage
def combine(background_img, figure_img):
"""
:param background_img: (SimpleImage), the background image.
:param figure_img: (SimpleImage), an actress stand in front of a green screen
:return: (SimpleImage), Combining both images, the background_img will replace
all the green part on figure_img.
"""
for y in range(figure_img.height):
for x in range(figure_img.width):
background = background_img.get_pixel(x, y)
figure = figure_img.get_pixel(x, y)
bigger = max(figure.red, figure.blue)
if figure.green > bigger * 2:
figure.red = background.red
figure.green = background.green
figure.blue = background.blue
return figure_img
def main():
"""
This program would combine two images, with background image replacing the green part of figure image.
"""
space_ship = SimpleImage("images/MillenniumFalcon.png")
figure = SimpleImage("images/ReyGreenScreen.png")
result = combine(space_ship, figure)
result.show()
if __name__ == '__main__':
main()
```
#### File: stanCode-projects/weather_master/weather_master.py
```python
EXIT = -100
def main():
"""
This is a Temperature information processor,
which allows users input their temperature information.
At the end, it will show the highest and the lowest temperature.
It also calculates the average temperature
and the number of cold days, which represent the days under 16 degrees.
"""
print('stanCode "Weather Master 4.0"!')
n = int(input('Next Temperature: (or ' + str(EXIT) + ' to quit)? '))
highest = n
lowest = n
total = n
times = 0
cold = 0
if n == EXIT:
print('No temperature were entered.')
else:
if n < 16:
cold = add_cold(cold)
while True:
n = int(input('Next Temperature: (or ' + str(EXIT) + ' to quit)? '))
times += 1
# putting it here isn't that logical, while it could simplify the coding
if n == EXIT:
break
elif n > highest:
highest = n
total = sum_temp(total, n)
if n < 16:
cold = add_cold(cold)
elif n < lowest:
lowest = n
total = sum_temp(total, n)
if n < 16:
cold = add_cold(cold)
else:
total = sum_temp(total, n)
if n < 16:
cold = add_cold(cold)
print('Highest temperature: ' + str(highest))
print('Lowest temperature: ' + str(lowest))
print('Average = ' + str(average(total, times)))
print(str(cold) + ' cold day(s)')
def sum_temp(s, n):
"""
:param s: int, sum of total value of temperature
:param n: int, latest information to add up
:return: s + n
"""
s += n
return s
def add_cold(c):
"""
:param c: int, temp < 16 and != -100
:return: c + 1
"""
c += 1
return c
def average(a, b):
"""
:param a: int, sum of total value
:param b: int, the amount of temp information
:return: a / b
"""
avg = a / b
return avg
#
###### DO NOT EDIT CODE BELOW THIS LINE ######
if __name__ == "__main__":
main()
``` |
{
"source": "JosephTLucas/ahrs",
"score": 4
} |
#### File: ahrs/common/dcm.py
```python
from typing import Tuple
import numpy as np
from .mathfuncs import skew
from .orientation import rotation
from .orientation import rot_seq
# Functions to convert DCM to quaternion representation
from .orientation import shepperd
from .orientation import hughes
from .orientation import chiaverini
from .orientation import itzhack
from .orientation import sarabandi
def _assert_iterables(item, item_name: str = 'iterable'):
if not isinstance(item, (list, tuple, np.ndarray)):
raise TypeError(f"{item_name} must be given as an array, got {type(item)}")
class DCM(np.ndarray):
"""
Direction Cosine Matrix in SO(3)
Class to represent a Direction Cosine Matrix. It is built from a 3-by-3
array, but it can also be built from 3-dimensional vectors representing the
roll-pitch-yaw angles, a quaternion, or an axis-angle pair representation.
Parameters
----------
array : array-like, default: None
Array to build the DCM with.
q : array-like, default: None
Quaternion to convert to DCM.
rpy : array-like, default: None
Array with roll->pitch->yaw angles.
euler : tuple, default: None
Dictionary with a set of angles as a pair of string and array.
axang : tuple, default: None
Tuple with an array and a float of the axis and the angle
representation.
Attributes
----------
A : numpy.ndarray
Array with the 3-by-3 direction cosine matrix.
Examples
--------
All DCM are created as an identity matrix, which means no rotation.
>>> from ahrs import DCM
>>> DCM()
DCM([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
A rotation around a single axis can be defined by giving the desired axis
and its value, in degrees.
>>> DCM(x=10.0)
DCM([[ 1. , 0. , 0. ],
[ 0. , 0.98480775, -0.17364818],
[ 0. , 0.17364818, 0.98480775]])
>>> DCM(y=20.0)
DCM([[ 0.93969262, 0. , 0.34202014],
[ 0. , 1. , 0. ],
[-0.34202014, 0. , 0.93969262]])
>>> DCM(z=30.0)
DCM([[ 0.8660254, -0.5 , 0. ],
[ 0.5 , 0.8660254, 0. ],
[ 0. , 0. , 1. ]])
If we want a rotation conforming the roll-pitch-yaw sequence, we can give
the corresponding angles.
>>> DCM(rpy=[30.0, 20.0, 10.0])
DCM([[ 0.81379768, -0.44096961, 0.37852231],
[ 0.46984631, 0.88256412, 0.01802831],
[-0.34202014, 0.16317591, 0.92541658]])
.. note::
Notice the angles are given in reverse order, as it is the way the
matrices are multiplied.
>>> DCM(z=30.0) @ DCM(y=20.0) @ DCM(x=10.0)
DCM([[ 0.81379768, -0.44096961, 0.37852231],
[ 0.46984631, 0.88256412, 0.01802831],
[-0.34202014, 0.16317591, 0.92541658]])
But also a different sequence can be defined, if given as a tuple with two
elements: the order of the axis to rotate about, and the value of the
rotation angles (again in reverse order)
>>> DCM(euler=('zyz', [40.0, 50.0, 60.0]))
DCM([[-0.31046846, -0.74782807, 0.58682409],
[ 0.8700019 , 0.02520139, 0.49240388],
[-0.38302222, 0.66341395, 0.64278761]])
>>> DCM(z=40.0) @ DCM(y=50.0) @ DCM(z=60.0)
DCM([[-0.31046846, -0.74782807, 0.58682409],
[ 0.8700019 , 0.02520139, 0.49240388],
[-0.38302222, 0.66341395, 0.64278761]])
Another option is to build the rotation matrix from a quaternion:
>>> DCM(q=[1., 2., 3., 4.])
DCM([[-0.66666667, 0.13333333, 0.73333333],
[ 0.66666667, -0.33333333, 0.66666667],
[ 0.33333333, 0.93333333, 0.13333333]])
The quaternions are automatically normalized to make them versors and be
used as rotation operators.
Finally, we can also build the rotation matrix from an axis-angle
representation:
>>> DCM(axang=([1., 2., 3.], 60.0))
DCM([[-0.81295491, 0.52330834, 0.25544608],
[ 0.03452394, -0.3945807 , 0.91821249],
[ 0.58130234, 0.75528436, 0.30270965]])
The axis of rotation is also normalized to be used as part of the rotation
operator.
"""
def __new__(subtype, array: np.ndarray = None, **kwargs):
if array is None:
array = np.identity(3)
if 'q' in kwargs:
array = DCM.from_q(DCM, np.array(kwargs.pop('q')))
if any(x.lower() in ['x', 'y', 'z'] for x in kwargs):
array = np.identity(3)
array = array@rotation('x', kwargs.pop('x', 0.0))
array = array@rotation('y', kwargs.pop('y', 0.0))
array = array@rotation('z', kwargs.pop('z', 0.0))
if 'rpy' in kwargs:
angles = kwargs.pop('rpy')
_assert_iterables(angles, "Roll-Pitch-Yaw angles")
if len(angles) != 3:
raise ValueError("roll-pitch-yaw angles must be an array with 3 rotations in degrees.")
array = rot_seq('zyx', angles)
if 'euler' in kwargs:
seq, angs = kwargs.pop('euler')
array = rot_seq(seq, angs)
if 'axang' in kwargs:
ax, ang = kwargs.pop('axang')
array = DCM.from_axisangle(DCM, np.array(ax), ang)
_assert_iterables(array, "Direction Cosine Matrix")
if array.shape[-2:] != (3, 3):
raise ValueError(f"Direction Cosine Matrix must have shape (3, 3) or (N, 3, 3), got {array.shape}.")
in_SO3 = np.isclose(np.linalg.det(array), 1.0)
in_SO3 &= np.allclose([email protected], np.identity(3))
if not in_SO3:
raise ValueError("Given attitude is not in SO(3)")
# Create the ndarray instance of type DCM. This will call the standard
# ndarray constructor, but return an object of type DCM.
obj = super(DCM, subtype).__new__(subtype, array.shape, float, array)
obj.A = array
return obj
@property
def I(self) -> np.ndarray:
"""
synonym of property :meth:`inv`.
Examples
--------
>>> R = DCM(rpy=[10.0, -20.0, 30.0])
>>> R.view()
DCM([[ 0.92541658, -0.31879578, -0.20487413],
[ 0.16317591, 0.82317294, -0.54383814],
[ 0.34202014, 0.46984631, 0.81379768]])
>>> R.I
array([[ 0.92541658, 0.16317591, 0.34202014],
[-0.31879578, 0.82317294, 0.46984631],
[-0.20487413, -0.54383814, 0.81379768]])
Returns
-------
np.ndarray
Inverse of the DCM.
"""
return self.A.T
@property
def inv(self) -> np.ndarray:
"""
Inverse of the DCM.
The direction cosine matrix belongs to the Special Orthogonal group
`SO(3) <https://en.wikipedia.org/wiki/SO(3)>`_, where its transpose is
equal to its inverse:
.. math::
\\mathbf{R}^T\\mathbf{R} = \\mathbf{R}^{-1}\\mathbf{R} = \\mathbf{I}_3
Examples
--------
>>> R = DCM(rpy=[10.0, -20.0, 30.0])
>>> R.view()
DCM([[ 0.92541658, -0.31879578, -0.20487413],
[ 0.16317591, 0.82317294, -0.54383814],
[ 0.34202014, 0.46984631, 0.81379768]])
>>> R.inv
array([[ 0.92541658, 0.16317591, 0.34202014],
[-0.31879578, 0.82317294, 0.46984631],
[-0.20487413, -0.54383814, 0.81379768]])
Returns
-------
np.ndarray
Inverse of the DCM.
"""
return self.A.T
@property
def det(self) -> float:
"""
Synonym of property :meth:`determinant`.
Returns
-------
float
Determinant of the DCM.
Examples
--------
>>> R = DCM(rpy=[10.0, -20.0, 30.0])
>>> R.view()
DCM([[ 0.92541658, -0.31879578, -0.20487413],
[ 0.16317591, 0.82317294, -0.54383814],
[ 0.34202014, 0.46984631, 0.81379768]])
>>> R.det
1.0000000000000002
"""
return np.linalg.det(self.A)
@property
def determinant(self) -> float:
"""
Determinant of the DCM.
Given a direction cosine matrix :math:`\\mathbf{R}`, its determinant
:math:`|\\mathbf{R}|` is found as:
.. math::
|\\mathbf{R}| =
\\begin{vmatrix}r_{11} & r_{12} & r_{13} \\\\ r_{21} & r_{22} & r_{23} \\\\ r_{31} & r_{32} & r_{33}\\end{vmatrix}=
r_{11}\\begin{vmatrix}r_{22} & r_{23}\\\\r_{32} & r_{33}\\end{vmatrix} -
r_{12}\\begin{vmatrix}r_{21} & r_{23}\\\\r_{31} & r_{33}\\end{vmatrix} +
r_{13}\\begin{vmatrix}r_{21} & r_{22}\\\\r_{31} & r_{32}\\end{vmatrix}
where the determinant of :math:`\\mathbf{B}\\in\\mathbb{R}^{2\\times 2}`
is:
.. math::
|\\mathbf{B}|=\\begin{vmatrix}b_{11}&b_{12}\\\\b_{21}&b_{22}\\end{vmatrix}=b_{11}b_{22}-b_{12}b_{21}
All matrices in SO(3), to which direction cosine matrices belong, have
a determinant equal to :math:`+1`.
Returns
-------
float
Determinant of the DCM.
Examples
--------
>>> R = DCM(rpy=[10.0, -20.0, 30.0])
>>> R.view()
DCM([[ 0.92541658, -0.31879578, -0.20487413],
[ 0.16317591, 0.82317294, -0.54383814],
[ 0.34202014, 0.46984631, 0.81379768]])
>>> R.determinant
1.0000000000000002
"""
return np.linalg.det(self.A)
@property
def fro(self) -> float:
"""
Synonym of property :meth:`frobenius`.
Returns
-------
float
Frobenius norm of the DCM.
Examples
--------
>>> R = DCM(rpy=[10.0, -20.0, 30.0])
>>> R.view()
DCM([[ 0.92541658, -0.31879578, -0.20487413],
[ 0.16317591, 0.82317294, -0.54383814],
[ 0.34202014, 0.46984631, 0.81379768]])
>>> R.fro
1.7320508075688774
"""
return np.linalg.norm(self.A, 'fro')
@property
def frobenius(self) -> float:
"""
Frobenius norm of the DCM.
The `Frobenius norm <https://en.wikipedia.org/wiki/Matrix_norm#Frobenius_norm>`_
of a matrix :math:`\\mathbf{A}` is defined as:
.. math::
\\|\\mathbf{A}\\|_F = \\sqrt{\\sum_{i=1}^m\\sum_{j=1}^n|a_{ij}|^2}
Returns
-------
float
Frobenius norm of the DCM.
Examples
--------
>>> R = DCM(rpy=[10.0, -20.0, 30.0])
>>> R.view()
DCM([[ 0.92541658, -0.31879578, -0.20487413],
[ 0.16317591, 0.82317294, -0.54383814],
[ 0.34202014, 0.46984631, 0.81379768]])
>>> R.frobenius
1.7320508075688774
"""
return np.linalg.norm(self.A, 'fro')
@property
def log(self) -> np.ndarray:
"""
Logarithm of DCM.
The logarithmic map is defined as the inverse of the exponential map.
It corresponds to the logarithm given by the Rodrigues rotation formula:
.. math::
\\log(\\mathbf{R}) = \\frac{\\theta(\\mathbf{R}-\\mathbf{R}^T)}{2\\sin\\theta}
with :math:`\\theta=\\arccos\\Big(\\frac{\\mathrm{tr}(\\mathbf{R}-1)}{2}\\Big)`.
Returns
-------
log : numpy.ndarray
Logarithm of DCM
Examples
--------
>>> R = DCM(rpy=[10.0, -20.0, 30.0])
>>> R.view()
DCM([[ 0.92541658, -0.31879578, -0.20487413],
[ 0.16317591, 0.82317294, -0.54383814],
[ 0.34202014, 0.46984631, 0.81379768]])
>>> R.log
array([[ 0. , -0.26026043, -0.29531805],
[ 0.26026043, 0. , -0.5473806 ],
[ 0.29531805, 0.5473806 , 0. ]])
"""
angle = np.arccos((self.A.trace()-1)/2)
S = self.A-self.A.T # Skew-symmetric matrix
logR = angle*S/(2*np.sin(angle))
return logR
@property
def adjugate(self) -> np.ndarray:
"""
Return the adjugate of the DCM.
The adjugate, a.k.a. *classical adjoint*, of a matrix :math:`\\mathbf{A}`
is the transpose of its *cofactor matrix*. For orthogonal matrices, it
simplifies to:
.. math::
\\mathrm{adj}(\\mathbf{A}) = \\mathrm{det}(\\mathbf{A})\\mathbf{A}^T
Returns
-------
np.ndarray
Adjugate of the DCM.
Examples
--------
>>> R = DCM(rpy=[10.0, -20.0, 30.0])
>>> R.view()
DCM([[ 0.92541658, -0.31879578, -0.20487413],
[ 0.16317591, 0.82317294, -0.54383814],
[ 0.34202014, 0.46984631, 0.81379768]])
>>> R.adjugate
array([[ 0.92541658, 0.16317591, 0.34202014],
[-0.31879578, 0.82317294, 0.46984631],
[-0.20487413, -0.54383814, 0.81379768]])
"""
return np.linalg.det(self.A)*self.A.T
@property
def adj(self) -> np.ndarray:
"""
Synonym of property :meth:`adjugate`.
Returns
-------
np.ndarray
Adjugate of the DCM.
Examples
--------
>>> R = DCM(rpy=[10.0, -20.0, 30.0])
>>> R.view()
DCM([[ 0.92541658, -0.31879578, -0.20487413],
[ 0.16317591, 0.82317294, -0.54383814],
[ 0.34202014, 0.46984631, 0.81379768]])
>>> R.adj
array([[ 0.92541658, 0.16317591, 0.34202014],
[-0.31879578, 0.82317294, 0.46984631],
[-0.20487413, -0.54383814, 0.81379768]])
"""
return np.linalg.det(self.A)*self.A.T
def to_axisangle(self) -> Tuple[np.ndarray, float]:
"""
Return axis-angle representation of the DCM.
Defining a *rotation matrix* :math:`\\mathbf{R}`:
.. math::
\\mathbf{R} =
\\begin{bmatrix}
r_{11} & r_{12} & r_{13} \\\\
r_{21} & r_{22} & r_{23} \\\\
r_{31} & r_{32} & r_{33}
\\end{bmatrix}
The axis-angle representation of :math:`\\mathbf{R}` is obtained with:
.. math::
\\theta = \\arccos\\Big(\\frac{\\mathrm{tr}(\\mathbf{R})-1}{2}\\Big)
for the **rotation angle**, and:
.. math::
\\mathbf{k} = \\frac{1}{2\\sin\\theta}
\\begin{bmatrix}r_{32} - r_{23} \\\\ r_{13} - r_{31} \\\\ r_{21} - r_{12}\\end{bmatrix}
for the **rotation vector**.
.. note::
The axis-angle representation is not unique since a rotation of
:math:`−\\theta` about :math:`−\\mathbf{k}` is the same as a
rotation of :math:`\\theta` about :math:`\\mathbf{k}`.
Returns
-------
axis : numpy.ndarray
Axis of rotation.
angle : float
Angle of rotation, in radians.
Examples
--------
>>> R = DCM(rpy=[10.0, -20.0, 30.0])
>>> R.view()
DCM([[ 0.92541658, -0.31879578, -0.20487413],
[ 0.16317591, 0.82317294, -0.54383814],
[ 0.34202014, 0.46984631, 0.81379768]])
>>> R.to_axisangle()
(array([ 0.81187135, -0.43801381, 0.38601658]), 0.6742208510527136)
"""
angle = np.arccos((self.A.trace()-1)/2)
axis = np.zeros(3)
if angle!=0:
S = np.array([self.A[2, 1]-self.A[1, 2], self.A[0, 2]-self.A[2, 0], self.A[1, 0]-self.A[0, 1]])
axis = S/(2*np.sin(angle))
return axis, angle
def to_axang(self) -> Tuple[np.ndarray, float]:
"""
Synonym of method :meth:`to_axisangle`.
Returns
-------
axis : numpy.ndarray
Axis of rotation.
angle : float
Angle of rotation, in radians.
Examples
--------
>>> R = DCM(rpy=[10.0, -20.0, 30.0])
>>> R.view()
DCM([[ 0.92541658, -0.31879578, -0.20487413],
[ 0.16317591, 0.82317294, -0.54383814],
[ 0.34202014, 0.46984631, 0.81379768]])
>>> R.to_axang()
(array([ 0.81187135, -0.43801381, 0.38601658]), 0.6742208510527136)
"""
return self.to_axisangle()
def from_axisangle(self, axis: np.ndarray, angle: float) -> np.ndarray:
"""
DCM from axis-angle representation
Use Rodrigue's formula to obtain the DCM from the axis-angle
representation.
.. math::
\\mathbf{R} = \\mathbf{I}_3 - (\\sin\\theta)\\mathbf{K} + (1-\\cos\\theta)\\mathbf{K}^2
where :math:`\\mathbf{R}` is the DCM, which rotates through an **angle**
:math:`\\theta` counterclockwise about the **axis** :math:`\\mathbf{k}`,
:math:`\\mathbf{I}_3` is the :math:`3\\times 3` identity matrix, and
:math:`\\mathbf{K}` is the `skew-symmetric <https://en.wikipedia.org/wiki/Skew-symmetric_matrix>`_
matrix of :math:`\\mathbf{k}`.
Parameters
----------
axis : numpy.ndarray
Axis of rotation.
angle : float
Angle of rotation, in radians.
Returns
-------
R : numpy.ndarray
3-by-3 direction cosine matrix
Examples
--------
>>> R = DCM()
>>> R.view()
DCM([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
>>> R.from_axisangle([0.81187135, -0.43801381, 0.38601658], 0.6742208510527136)
array([[ 0.92541658, -0.31879578, -0.20487413],
[ 0.16317591, 0.82317294, -0.54383814],
[ 0.34202014, 0.46984631, 0.81379768]])
"""
_assert_iterables(axis)
if not isinstance(angle, (int, float)):
raise ValueError(f"`angle` must be a float value. Got {type(angle)}")
axis /= np.linalg.norm(axis)
K = skew(axis)
return np.identity(3) + np.sin(angle)*K + (1-np.cos(angle))*K@K
def from_axang(self, axis: np.ndarray, angle: float) -> np.ndarray:
"""
Synonym of method :meth:`from_axisangle`.
Parameters
----------
axis : numpy.ndarray
Axis of rotation.
angle : float
Angle of rotation, in radians.
Returns
-------
R : numpy.ndarray
3-by-3 direction cosine matrix
Examples
--------
>>> R = DCM()
>>> R.view()
DCM([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
>>> R.from_axang([0.81187135, -0.43801381, 0.38601658], 0.6742208510527136)
array([[ 0.92541658, -0.31879578, -0.20487413],
[ 0.16317591, 0.82317294, -0.54383814],
[ 0.34202014, 0.46984631, 0.81379768]])
"""
return self.from_axisangle(axis, angle)
def from_quaternion(self, q: np.ndarray) -> np.ndarray:
"""
DCM from given quaternion
The quaternion :math:`\\mathbf{q}` has the form :math:`\\mathbf{q} = (q_w, q_x, q_y, q_z)`,
where :math:`\\mathbf{q}_v = (q_x, q_y, q_z)` is the vector part, and
:math:`q_w` is the scalar part.
The resulting matrix :math:`\\mathbf{R}` has the form:
.. math::
\\mathbf{R}(\\mathbf{q}) =
\\begin{bmatrix}
1 - 2(q_y^2 + q_z^2) & 2(q_xq_y - q_wq_z) & 2(q_xq_z + q_wq_y) \\\\
2(q_xq_y + q_wq_z) & 1 - 2(q_x^2 + q_z^2) & 2(q_yq_z - q_wq_x) \\\\
2(q_xq_z - q_wq_y) & 2(q_wq_x + q_yq_z) & 1 - 2(q_x^2 + q_y^2)
\\end{bmatrix}
The identity Quaternion :math:`\\mathbf{q} = \\begin{pmatrix}1 & 0 & 0 & 0\\end{pmatrix}`,
produces a :math:`3 \\times 3` Identity matrix :math:`\\mathbf{I}_3`.
Parameters
----------
q : numpy.ndarray
Quaternion
Returns
-------
R : numpy.ndarray
3-by-3 direction cosine matrix
Examples
--------
>>> R = DCM()
>>> R.from_quaternion([0.70710678, 0.0, 0.70710678, 0.0])
array([[-2.22044605e-16, 0.00000000e+00, 1.00000000e+00],
[ 0.00000000e+00, 1.00000000e+00, 0.00000000e+00],
[-1.00000000e+00, 0.00000000e+00, -2.22044605e-16]])
Non-normalized quaternions will be normalized and transformed too.
>>> R.from_quaternion([1, 0.0, 1, 0.0])
array([[ 2.22044605e-16, 0.00000000e+00, 1.00000000e+00],
[ 0.00000000e+00, 1.00000000e+00, 0.00000000e+00],
[-1.00000000e+00, 0.00000000e+00, 2.22044605e-16]])
A list (or a Numpy array) with N quaternions will return an N-by-3-by-3
array with the corresponding DCMs.
.. code-block::
>>> R.from_q([[1, 0.0, 1, 0.0], [1.0, -1.0, 0.0, 1.0], [0.0, 0.0, -1.0, 1.0]])
array([[[ 2.22044605e-16, 0.00000000e+00, 1.00000000e+00],
[ 0.00000000e+00, 1.00000000e+00, 0.00000000e+00],
[-1.00000000e+00, 0.00000000e+00, 2.22044605e-16]],
[[ 3.33333333e-01, -6.66666667e-01, -6.66666667e-01],
[ 6.66666667e-01, -3.33333333e-01, 6.66666667e-01],
[-6.66666667e-01, -6.66666667e-01, 3.33333333e-01]],
[[-1.00000000e+00, -0.00000000e+00, 0.00000000e+00],
[ 0.00000000e+00, 2.22044605e-16, -1.00000000e+00],
[ 0.00000000e+00, -1.00000000e+00, 2.22044605e-16]]])
"""
if q is None:
return np.identity(3)
_assert_iterables(q, "Quaternion")
q = np.copy(q)
if q.shape[-1] != 4 or q.ndim > 2:
raise ValueError(f"Quaternion must be of the form (4,) or (N, 4). Got {q.shape}")
if q.ndim > 1:
q /= np.linalg.norm(q, axis=1)[:, None] # Normalize
R = np.zeros((q.shape[0], 3, 3))
R[:, 0, 0] = 1.0 - 2.0*(q[:, 2]**2 + q[:, 3]**2)
R[:, 1, 0] = 2.0*(q[:, 1]*q[:, 2]+q[:, 0]*q[:, 3])
R[:, 2, 0] = 2.0*(q[:, 1]*q[:, 3]-q[:, 0]*q[:, 2])
R[:, 0, 1] = 2.0*(q[:, 1]*q[:, 2]-q[:, 0]*q[:, 3])
R[:, 1, 1] = 1.0 - 2.0*(q[:, 1]**2 + q[:, 3]**2)
R[:, 2, 1] = 2.0*(q[:, 0]*q[:, 1]+q[:, 2]*q[:, 3])
R[:, 0, 2] = 2.0*(q[:, 1]*q[:, 3]+q[:, 0]*q[:, 2])
R[:, 1, 2] = 2.0*(q[:, 2]*q[:, 3]-q[:, 0]*q[:, 1])
R[:, 2, 2] = 1.0 - 2.0*(q[:, 1]**2 + q[:, 2]**2)
return R
q /= np.linalg.norm(q)
return np.array([
[1.0-2.0*(q[2]**2+q[3]**2), 2.0*(q[1]*q[2]-q[0]*q[3]), 2.0*(q[1]*q[3]+q[0]*q[2])],
[2.0*(q[1]*q[2]+q[0]*q[3]), 1.0-2.0*(q[1]**2+q[3]**2), 2.0*(q[2]*q[3]-q[0]*q[1])],
[2.0*(q[1]*q[3]-q[0]*q[2]), 2.0*(q[0]*q[1]+q[2]*q[3]), 1.0-2.0*(q[1]**2+q[2]**2)]])
def from_q(self, q: np.ndarray) -> np.ndarray:
"""
Synonym of method :meth:`from_quaternion`.
Parameters
----------
q : numpy.ndarray
Quaternion
Returns
-------
R : numpy.ndarray
3-by-3 direction cosine matrix
Examples
--------
>>> R = DCM()
>>> R.from_q([0.70710678, 0.0, 0.70710678, 0.0])
array([[-2.22044605e-16, 0.00000000e+00, 1.00000000e+00],
[ 0.00000000e+00, 1.00000000e+00, 0.00000000e+00],
[-1.00000000e+00, 0.00000000e+00, -2.22044605e-16]])
Non-normalized quaternions will be normalized and transformed too.
>>> R.from_q([1, 0.0, 1, 0.0])
array([[ 2.22044605e-16, 0.00000000e+00, 1.00000000e+00],
[ 0.00000000e+00, 1.00000000e+00, 0.00000000e+00],
[-1.00000000e+00, 0.00000000e+00, 2.22044605e-16]])
A list (or a Numpy array) with N quaternions will return an N-by-3-by-3
array with the corresponding DCMs.
.. code-block::
>>> R.from_q([[1, 0.0, 1, 0.0], [1.0, -1.0, 0.0, 1.0], [0.0, 0.0, -1.0, 1.0]])
array([[[ 2.22044605e-16, 0.00000000e+00, 1.00000000e+00],
[ 0.00000000e+00, 1.00000000e+00, 0.00000000e+00],
[-1.00000000e+00, 0.00000000e+00, 2.22044605e-16]],
[[ 3.33333333e-01, -6.66666667e-01, -6.66666667e-01],
[ 6.66666667e-01, -3.33333333e-01, 6.66666667e-01],
[-6.66666667e-01, -6.66666667e-01, 3.33333333e-01]],
[[-1.00000000e+00, -0.00000000e+00, 0.00000000e+00],
[ 0.00000000e+00, 2.22044605e-16, -1.00000000e+00],
[ 0.00000000e+00, -1.00000000e+00, 2.22044605e-16]]])
"""
return self.from_quaternion(q)
def to_quaternion(self, method: str='chiaverini', **kw) -> np.ndarray:
"""
Quaternion from Direction Cosine Matrix.
There are five methods available to obtain a quaternion from a
Direction Cosine Matrix:
* ``'chiaverini'`` as described in [Chiaverini]_.
* ``'hughes'`` as described in [Hughes]_.
* ``'itzhack'`` as described in [Bar-Itzhack]_ using version ``3`` by
default. Possible options are integers ``1``, ``2`` or ``3``.
* ``'sarabandi'`` as described in [Sarabandi]_ with a threshold equal
to ``0.0`` by default. Possible threshold values are floats between
``-3.0`` and ``3.0``.
* ``'shepperd'`` as described in [Shepperd]_.
Parameters
----------
method : str, default: ``'chiaverini'``
Method to use. Options are: ``'chiaverini'``, ``'hughes'``,
``'itzhack'``, ``'sarabandi'``, and ``'shepperd'``.
Examples
--------
>>> R = DCM(rpy=[10.0, -20.0, 30.0])
>>> R.view()
DCM([[ 0.92541658, -0.31879578, -0.20487413],
[ 0.16317591, 0.82317294, -0.54383814],
[ 0.34202014, 0.46984631, 0.81379768]])
>>> R.to_quaternion() # Uses method 'chiaverini' by default
array([ 0.94371436, 0.26853582, -0.14487813, 0.12767944])
>>> R.to_quaternion('shepperd')
array([ 0.94371436, -0.26853582, 0.14487813, -0.12767944])
>>> R.to_quaternion('hughes')
array([ 0.94371436, -0.26853582, 0.14487813, -0.12767944])
>>> R.to_quaternion('itzhack', version=2)
array([ 0.94371436, -0.26853582, 0.14487813, -0.12767944])
>>> R.to_quaternion('sarabandi', threshold=0.5)
array([0.94371436, 0.26853582, 0.14487813, 0.12767944])
"""
q = np.array([1., 0., 0., 0.])
if method.lower() == 'hughes':
q = hughes(self.A)
if method.lower() == 'chiaverini':
q = chiaverini(self.A)
if method.lower() == 'shepperd':
q = shepperd(self.A)
if method.lower() == 'itzhack':
q = itzhack(self.A, version=kw.get('version', 3))
if method.lower() == 'sarabandi':
q = sarabandi(self.A, eta=kw.get('threshold', 0.0))
return q/np.linalg.norm(q)
def to_q(self, method: str='chiaverini', **kw) -> np.ndarray:
"""
Synonym of method :meth:`to_quaternion`.
Parameters
----------
method : str, default: ``'chiaverini'``
Method to use. Options are: ``'chiaverini'``, ``'hughes'``,
``'itzhack'``, ``'sarabandi'``, and ``'shepperd'``.
Examples
--------
>>> R = DCM(rpy=[10.0, -20.0, 30.0])
>>> R.view()
DCM([[ 0.92541658, -0.31879578, -0.20487413],
[ 0.16317591, 0.82317294, -0.54383814],
[ 0.34202014, 0.46984631, 0.81379768]])
>>> R.to_q() # Uses method 'chiaverini' by default
array([ 0.94371436, 0.26853582, -0.14487813, 0.12767944])
>>> R.to_q('shepperd')
array([ 0.94371436, -0.26853582, 0.14487813, -0.12767944])
>>> R.to_q('hughes')
array([ 0.94371436, -0.26853582, 0.14487813, -0.12767944])
>>> R.to_q('itzhack', version=2)
array([ 0.94371436, -0.26853582, 0.14487813, -0.12767944])
>>> R.to_q('sarabandi', threshold=0.5)
array([0.94371436, 0.26853582, 0.14487813, 0.12767944])
"""
return self.to_quaternion(method=method, **kw)
def to_angles(self) -> np.ndarray:
"""
Synonym of method :meth:`to_rpy`.
Returns
-------
a : numpy.ndarray
roll-pitch-yaw angles
"""
return self.to_rpy()
def to_rpy(self) -> np.ndarray:
"""
Roll-Pitch-Yaw Angles from DCM
A set of Roll-Pitch-Yaw angles may be written according to:
.. math::
\\mathbf{a} =
\\begin{bmatrix}\\phi \\\\ \\theta \\\\ \\psi\\end{bmatrix} =
\\begin{bmatrix}\\mathrm{arctan2}(r_{23}, r_{33}) \\\\ -\\arcsin(r_{13}) \\\\ \\mathrm{arctan2}(r_{12}, r_{11})\\end{bmatrix}
Returns
-------
a : numpy.ndarray
roll-pitch-yaw angles.
"""
phi = np.arctan2(self.A[1, 2], self.A[2, 2]) # Roll Angle
theta = -np.arcsin(self.A[0, 2]) # Pitch Angle
psi = np.arctan2(self.A[0, 1], self.A[0, 0]) # Yaw Angle
return np.array([phi, theta, psi])
def ode(self, w: np.ndarray) -> np.ndarray:
"""
Ordinary Differential Equation of the DCM.
Parameters
----------
w : numpy.ndarray
Instantaneous angular velocity, in rad/s, about X-, Y- and Z-axis.
Returns
-------
dR/dt : numpy.ndarray
Derivative of DCM
"""
return self.A@skew(w)
```
#### File: ahrs/utils/wmm.py
```python
import datetime
import pkgutil
from io import StringIO
from typing import Union, Tuple, Dict
# Third-Party Dependencies
import numpy as np
from ..common.constants import *
def geodetic2spherical(lat: float, lon: float, h: float, a: float = EARTH_EQUATOR_RADIUS/1000.0, b: float = EARTH_POLAR_RADIUS/1000.0) -> Tuple[float, float, float]:
"""
Transform geodetic coordinates into spherical geocentric coordinates
The transformation cannot be a simple cylindric to spherical conversion, as
we must also consider a planet's ellipsoid form. With the aid of its
pre-defined flatness and eccentricity, we can better approximate the values
of the conversion.
In this function the Earth's major and minor semi-axis are considered.
However, we can convert the coordinates of different ellipsoidal bodies, by
giving the dimensions of its semi-axes.
Notice that the longitude between both systems remains the same.
Parameters
----------
lat : float
Latitude, in radians, of point in geodetic coordinates
lon : float
Longitude, in radians, of point in geodetic coordinates
h : float
Height, in kilometers, of point in geodetic coordinates
a : float, default: 6378.137
Major semi-axis, in kilometers. Defaults to Earth's equatorial radius
b : float, default: 6356.752314245
Minor semi-axis, in kilometers. Defaults to Earth's polar radius
Returns
-------
lat_spheric : float
Latitude of point in spherical coordinates.
lon : float
Longitue of point in spherical coordinates. Same as geodetic.
r : float
Radial distance of point in spherical coordinates.
"""
# Estimate Spheroid's Flatness and First Eccentricity
f = (a-b)/a # Flatness
e2 = f*(2.0-f) # First Eccentricity
# Transform geodetic coordinates into spherical geocentric coordinates
Rc = a/np.sqrt(1.0-e2*np.sin(lat)**2) # Radius of curvature of prime vertical
rho = (Rc+h)*np.cos(lat)
z = (Rc*(1-e2)+h)*np.sin(lat)
r = np.linalg.norm([rho, z]) # Radial distance
lat_spheric = np.arcsin(z/r) # Spherical latitude
return lat_spheric, lon, r
class WMM:
"""
World Magnetic Model
It is mainly used to compute all elements of the World Magnetic Model (WMM)
at any given point on Earth.
The main magnetic field :math:`B` is a potential field defined, in
geocentric spherical coordinates (longitude :math:`\\lambda`, latitude
:math:`\\phi '` and radius :math:`r`), as the negative spatial gradient of a
scalar potential at a time :math:`t`. This potential can be expanded in
terms of spherical harmonics:
.. math::
V(\\lambda, \\phi', r, t) = a\\sum_{n=1}^{N}\\Big(\\frac{a}{r}\\Big)^{n+1}\\sum_{m=0}^{n}f(n, m, \\lambda, t)P_n^m(\\phi')
where
.. math::
f(n, m, \\lambda, t) = g_n^m(t) \\cos(m\\lambda) + h_n^m(t) \\sin(m\\lambda)
and the Schmidt semi-normalized associated Legendre functions :math:`P_n^m(\\phi')`
are defined as:
.. math::
P_n^m(\\mu) = \\left\\{
\\begin{array}{ll}
\\sqrt{2\\frac{(n-m)!}{(n+m)!}}P_{n, m}(\\mu) & \\mathrm{if} \; m > 0 \\\\
P_{n, m}(\\mu) & \\mathrm{if} \; m = 0
\\end{array}
\\right.
Any object of this class is initialized with the corresponding epoch,
determined by the given date. If no date is given, it is assumed for the
day of the object's creation.
Once the WMM object is created, the estimation of the geomagnetic elements
is carried out with a call to the method `magnetic_field` giving the
location on Earth at which the magnetic elements will be calculated. This
location is given in decimal geodetic coordinates. See examples.
Every WMM object is created with a set of coefficients read from a COF
file, defined by the desired working date of the model. The latest
model available is WMM2020 corresponding to the lustrum 2020-2024.
This class can create models with dates between 2015 and 2024.
Parameters
----------
date : datetime.date, int or float, default: current day
Date of desired magnetic field estimation.
latitude : float, default: None
Latitude, in decimal degrees, in geodetic coordinates.
longitude : float, default: None
Longitude, in decimal degrees, in geodetic coordinates.
height : float, default: 0.0
Mean Sea Level Height, in kilometers.
Attributes
----------
date : datetime.date, default: datetime.date.today()
Desired date to estimate
date_dec : float
Desired date to estimate as decimal
epoch : float
Initial time of model in decimal years
model : str
WMM Model identificator
modeldate : str
Release date of WMM Model
wmm_filename : str
COF File used to build Model
degree : int
Degree of model
latitude : float
Latitude, in decimal degrees, in geodetic coordinates
longitude : float
Longitude in decimal degrees, in geodetic coordinates
height : float, default: 0.0
Mean Sea Level Height in kilometers
X : float, default: None
Northerly intensity, in nT
Y : float, default: None
Easterly intensity, in nT
Z : float, default: None
Vertical intensity, in nT
H : float, default: None
Horizontal intensity, in nT
F : float, default: None
Total intensity, in nT
I : float, default: None
Inclination angle (dip), in degrees
D : float, default: None
Declination angle, in degrees
GV : float, default: None
Grivation, in degrees
Examples
--------
The magnetic field can be computed at the creation of the WMM object by
passing the main parameters to its constructor:
>>> wmm = ahrs.utils.WMM(datetime.date(2017, 5, 12), latitude=10.0, longitude=-20.0, height=10.5)
>>> wmm.magnetic_elements
{'X': 30499.640469609083, 'Y': -5230.267158472566, 'Z': -1716.633311360368,
'H': 30944.850352270452, 'F': 30992.427998627096, 'I': -3.1751692563622993,
'D': -9.73078560629778, 'GV': -9.73078560629778}
"""
def __init__(self, date: Union[datetime.date, int, float] = None, latitude: float = None, longitude: float = None, height: float = 0.0) -> None:
self.reset_coefficients(date)
self.__dict__.update(dict.fromkeys(['X', 'Y', 'Z', 'H', 'F', 'I', 'D', 'GV']))
self.latitude = latitude
self.longitude = longitude
self.height = height
if all([self.latitude, self.longitude]):
self.magnetic_field(self.latitude, self.longitude, self.height, date=self.date)
def reset_coefficients(self, date: Union[datetime.date, int, float] = None) -> None:
"""
Reset Gauss coefficients to given date.
Given the date, the corresponding coefficients are updated. Basic
properties (epoch, release date, and model id) are read and updated in
the current instance.
The two coefficient tables (arrays) are also updated, where the
attribute `c` contains the Gaussian coefficients, while the attribute
`cd` contains the secular Gaussian coefficients.
The lenght of the Gaussian coefficient array determines the degree
:math:`n` of the model. This property updates the value of attribute
``degree``.
Parameters
----------
date : datetime.date, int or float, default: current day
Date of desired magnetic field estimation.
"""
self.reset_date(date)
self.__dict__.update(self.get_properties(self.wmm_filename))
self.load_coefficients(self.wmm_filename)
def load_coefficients(self, cof_file: str) -> None:
"""
Load model coefficients from COF file.
The model coefficients, also referred to as Gauss coefficients, are
listed in a COF file. These coefficients can be used to compute values
of the fields elements and their annual rates of change at any
location near the surface of the Earth.
The COF file has 6 columns:
* ``n`` is the degree.
* ``m`` is the order.
* ``g`` are time-dependent Gauss coefficients of degree ``n`` and order ``m``.
* ``h`` are time-dependent Gauss coefficients of degree ``n`` and order ``m``.
* ``gd`` are secular variations of coefficient ``g``.
* ``hd`` are secular variations of coefficient ``h``.
which constitute the *model* of the field. The first-order time
derivatives are called *secular terms*. The units are ``nT`` for the
main field, and ``nT/year`` for the secular variation.
The Gauss coefficients are defined for a time :math:`t` as:
.. math::
\\begin{eqnarray}
g_n^m(t) & = & g_n^m(t_0) + (t-t_0) \\dot{g}_n^m(t_0) \\\\
h_n^m(t) & = & h_n^m(t_0) + (t-t_0) \\dot{h}_n^m(t_0)
\\end{eqnarray}
where time is given in decimal years and :math:`t_0` corresponds to the
epoch read from the corresponding COF file.
Parameters
----------
cof_file : str
Path to COF file with the coefficients of the WMM
"""
file_data = pkgutil.get_data(__name__, cof_file).decode()
data = np.genfromtxt(StringIO(file_data), comments="999999", skip_header=1)
self.degree = int(max(data[:, 0]))
self.c = np.zeros((self.degree+1, self.degree+1))
self.cd = np.zeros((self.degree+1, self.degree+1))
for row in data:
n, m = row[:2].astype(int)
self.c[m, n] = row[2] # g_n^m
self.cd[m, n] = row[4] # g_n^m secular
if m != 0:
self.c[n, m-1] = row[3] # h_n^m
self.cd[n, m-1] = row[5] # h_n^m secular
def get_properties(self, cof_file: str) -> Dict[str, Union[str, float]]:
"""
Return dictionary of WMM properties from COF file.
Three properties are read and returned in a dictionary:
* ``epoch`` is the initial time :math:`t_0` as a `float`.
* ``model`` is a string of model used for the required lustrum.
* ``modeldate`` is the release date of used magnetic model.
Parameters
----------
cof_file : str
Path to COF file with the coefficients of the WMM
Returns
-------
properties : dictionary
Dictionary with the three WMM properties.
Examples
--------
>>> wmm = ahrs.WMM()
>>> wmm.get_properties('my_coefficients.COF')
{'model': 'WMM-2020', 'modeldate': '12/10/2019', 'epoch': 2020.0}
"""
if not cof_file.endswith(".COF"):
raise TypeError("File must have extension 'COF'")
first_line = pkgutil.get_data(__name__, cof_file).decode().split('\n')[0]
v = first_line.strip().split()
properties = dict(zip(["model", "modeldate"], v[1:]))
properties.update({"epoch": float(v[0])})
return properties
def reset_date(self, date: Union[datetime.date, int, float]) -> None:
"""
Set date to use with the model.
The WMM requires a date. This date can be given as an instance of
`datetime.date` or as a decimalized date of the format ``YYYY.d``.
If None is given it sets the date to the present day. In addition, the
corresponding COF file is also set.
Please note that only coefficents from year 2015 and later are provided
with this module.
Parameters
----------
date : datetime.date, int or float, default: current day
Date of desired magnetic field estimation.
"""
if date is None:
self.date = datetime.date.today()
self.date_dec = self.date.year + self.date.timetuple().tm_yday/365.0
elif isinstance(date, (int, float)):
self.date_dec = float(date)
self.date = datetime.date.fromordinal(round(datetime.date(int(date), 1, 1).toordinal() + (self.date_dec-int(self.date_dec))*365))
elif isinstance(date, datetime.date):
self.date = date
self.date_dec = self.date.year + self.date.timetuple().tm_yday/365.0
else:
raise TypeError("Date must be an instance of datetime.date or a decimalized year.")
if self.date.year < 2015:
raise ValueError("No available coefficients for dates before 2015.")
self.wmm_filename = 'WMM2015/WMM.COF' if self.date_dec < 2020.0 else 'WMM2020/WMM.COF'
def denormalize_coefficients(self, latitude: float) -> None:
"""Recursively estimate associated Legendre polynomials and derivatives
done in a recursive way as described by <NAME> in [Wertz]_ for
an efficient computation.
Given the Gaussian coefficients, it is possible to estimate the
magnetic field at any latitude on Earth for a certain date.
First, it is assumed that :math:`P_n^m(x)` are the Schmidt
semi-normalized functions. A normalization is made so that the
relative strength of terms of same degree :math:`n` but order :math:`m`
are used by comparing their respective Gauss coefficients.
For :math:`m=0` they are called *Legendre Polynomials* and can be
computed recursively with:
.. math::
P_n(x) = \\frac{2n-1}{n} x P_{n-1}(x) - \\frac{n-1}{n}P_{n-2}(x)
For :math:`m>0` they are known as *associated Legendre functions*
of degree :math:`n` and order :math:`m` and reduced to:
.. math::
P_{nm}(x) = (1-t^2)^{m/2} \\frac{d^m P_n(x)}{dt^m}
expressing the associated Legendre functions in terms of the Legendre
polynomials of same degree :math:`n`.
A more general formula to estimate both polynomial and associated
functions is given by:
.. math::
P_{nm}(x) = 2^{-n}(1-x^2)^{m/2} \\sum_{k=0}^{K}(-1)^k\\frac{(2n-2k)!}{k!(n-k)!(n-m-2k)!}x^{n-m-2k}
where :math:`K` is either :math:`(n-m)/2` or :math:`(n-m-1)/2`,
whichever is an integer. For a computational improvement, the terms are
calculated recursively.
We have to denormalize the coefficients from Schmidt to Gauss. The
Gauss functions :math:`P^{n, m}` are related to Schmidt functions
:math:`P_n^m` as:
.. math::
P_n^m = S_{n, m}P^{n, m}
where the factors :math:`S_{n, m}` are combined with Gaussian
coefficients to accelerate the computation, because they are
independent of the geographic location. Thus, we denormalize the
coefficients with:
.. math::
\\begin{array}{ll}
g^{n,m} & = S_{n,m} g_n^m \\\\
h^{n,m} & = S_{n,m} h_n^m
\\end{array}
The recursion for :math:`S_{n, m}` is:
.. math::
\\begin{array}{rlr}
S_{0,0} & = 1 & \\\\
S_{n,0} & = S_{n-1, 0} \\frac{2n-1}{n} & n\\geq 1 \\\\
S_{n,m} & = S_{n-1, m}\\sqrt{\\frac{(n-m+1)(\\delta _m^1+1)}{n+m}} & m\\geq 1
\\end{array}
where the Kronecker delta :math:`\\delta_j^i` is:
.. math::
\\delta_j^i = \\left\\{
\\begin{array}{ll}
1 & \\: i = j \\\\
0 & \\: \\mathrm{otherwise}
\\end{array}
\\right.
Similarly, :math:`P^{n, m}(x)` can be recursively obtained:
.. math::
\\begin{array}{ll}
P^{0,0} & = 1 \\\\
P^{n,n} & = \\sin (x) P^{n-1, n-1} \\\\
P^{n,m} & = \\cos (x) P^{n-1, m} - K^{n, m} P^{n-2, m}
\\end{array}
where:
.. math::
K^{n, m} = \\left\\{
\\begin{array}{ll}
\\frac{(n-1)^2-m^2}{(2n-1)(2n-3)} & \\: n>1 \\\\
0 & \\: n=1
\\end{array}
\\right.
Likewise, the gradient :math:`\\frac{dP^{n, m}}{dx}` is estimated as:
.. math::
\\begin{array}{llr}
\\frac{dP^{0, 0}}{dx} & = 1 & \\\\
\\frac{dP^{n, n}}{dx} & = \\sin (x) \\frac{dP^{n-1, n-1}}{dx} + \\cos (x) P^{n-1, n-1} & n\\geq 1 \\\\
\\frac{dP^{n, m}}{dx} & = \\cos (x) \\frac{dP^{n-1, m}}{dx} - \\sin (x) P^{n-1, m} - K^{n, m} \\frac{dP^{n-2, m}}{dx} &
\\end{array}
Parameters
----------
latitude : float
Latitude in spherical geocentric coordinates
"""
cos_lat = np.cos(latitude) # cos(phi')
sin_lat = np.sin(latitude) # sin(phi')
S = np.identity(self.degree+1) # Scale factors
self.k = np.zeros((self.degree+1, self.degree+1))
self.P = np.identity(self.degree+2)
self.dP = np.zeros((self.degree+2, self.degree+1))
for n in range(1, self.degree+1):
S[0, n] = S[0, n-1] * (2*n-1)/n
delta = 1 # Kronecker delta
for m in range(n+1):
self.k[m, n] = ((n-1)**2 - m**2) / ((2*n-1)*(2*n-3))
if m > 0:
S[m, n] = S[m-1, n] * np.sqrt((n-m+1)*(delta+1)/(n+m))
self.c[n, m-1] *= S[m, n]
self.cd[n, m-1] *= S[m, n]
delta = 0
if n == m:
self.P[m, n] = cos_lat*self.P[m-1, n-1]
self.dP[m, n] = cos_lat*self.dP[m-1, n-1] + sin_lat*self.P[m-1, n-1]
else:
self.P[m, n] = sin_lat*self.P[m, n-1] - self.k[m, n]*self.P[m, n-2]
self.dP[m, n] = sin_lat*self.dP[m, n-1] - cos_lat*self.P[m, n-1] - self.k[m, n]*self.dP[m, n-2]
self.c[m, n] *= S[m, n]
self.cd[m, n] *= S[m, n]
def magnetic_field(self, latitude: float, longitude: float, height: float = 0.0, date: Union[datetime.date, int, float] = datetime.date.today()) -> None:
"""
Calculate the geomagnetic field elements for a location on Earth.
The code includes comments with references to equation numbers
corresponding to the ones in the official report.
Having the coefficients :math:`g^{n, m}` and :math:`h^{n, m}`, we
extrapolate them for the desired time :math:`t` as:
.. math::
\\begin{array}{ll}
g_n^m(t) & = g_n^m(t_0) + \\Delta_t \\dot{g}_n^m (t_0) \\\\
h_n^m(t) & = h_n^m(t_0) + \\Delta_t \\dot{h}_n^m (t_0)
\\end{array}
where :math:`\\Delta_t = t-t_0` is the difference between the time
:math:`t` and the reference epoch model :math:`t_0` (``2020.0`` for the
newest version.)
The vector components of the main magnetic field :math:`B` are then
calculated with:
.. math::
\\begin{array}{ll}
X' & = -\\sum_{n=1}^N\\Big(\\frac{a}{r}\\Big)^{n+2} \\sum_{m=0}^n\\big(g_n^m(t) \\cos(m\\lambda)+h_n^m(t)\\sin(m\\lambda)\\big) \\frac{dP_n^m(\\sin \\phi ')}{d\\phi '} \\\\
Y' & = \\frac{1}{\\cos\\phi '}\\sum_{n=1}^N\\Big(\\frac{a}{r}\\Big)^{n+2} \\sum_{m=0}^n m\\big(g_n^m(t) \\sin(m\\lambda)-h_n^m(t)\\cos(m\\lambda)\\big)P_n^m(\\sin \\phi ') \\\\
Z' & = -\\sum_{n=1}^N(n+1)\\Big(\\frac{a}{r}\\Big)^{n+2} \\sum_{m=0}^n\\big(g_n^m(t) \\cos(m\\lambda)+h_n^m(t)\\sin(m\\lambda)\\big)P_n^m(\\sin \\phi ')
\\end{array}
Finally, the geomagnetic vector components are rotated into ellipsoidal
reference frame.
.. math::
\\begin{array}{ll}
X & = X'\\cos(\\phi ' - \\phi) - Z' \\sin(\\phi ' - \\phi) \\\\
Y & = Y' \\\\
Z & = X'\\sin(\\phi ' - \\phi) + Z' \\cos(\\phi ' - \\phi)
\\end{array}
These components are used to compute the rest of the magnetic elements:
.. math::
\\begin{array}{ll}
H & = \\sqrt{X^2 + Y^2} \\\\
F & = \\sqrt{H^2 + Z^2} \\\\
I & = \\arctan(\\frac{Z}{H}) \\\\
D & = \\arctan(\\frac{Y}{X})
\\end{array}
.. note::
The use of ``arctan2`` yields a more precise result than ``arctan``,
because it estimates the angle exploring all quadrants.
For polar regions, where the declination changes drastically, the WMM
defines two different grivations (one for each pole) defined as:
.. math::
GV = \\left\\{
\\begin{array}{ll}
D-\\lambda & \\: \\phi > 55 ° \\\\
D+\\lambda & \\: \\phi < -55 °
\\end{array}
\\right.
Parameters
----------
latitude : float
Latitude, in decimal degrees, in geodetic coordinates
longitude : float
Longitude in decimal degrees, in geodetic coordinates
height : float, default: 0.0
Mean Sea Level Height in kilometers
date : datetime.date, int or float, default: datetime.date.today()
Desired date to estimate
"""
if date is not None:
self.reset_coefficients(date)
self.latitude = latitude
self.longitude = longitude
self.height = height
latitude *= DEG2RAD
longitude *= DEG2RAD
# Transform geodetic coordinates into spherical geocentric coordinates
lat_prime, _, r = geodetic2spherical(latitude, longitude, self.height)
# Compute cos(m*phi') and sin(m*phi') for all m values
self.sp = np.zeros(self.degree+1) # sin(m*phi')
self.cp = np.ones(self.degree+2) # cos(m*phi')
self.sp[1] = np.sin(longitude)
self.cp[1] = np.cos(longitude)
for m in range(2, self.degree+1):
self.sp[m] = self.sp[1]*self.cp[m-1] + self.cp[1]*self.sp[m-1]
self.cp[m] = self.cp[1]*self.cp[m-1] - self.sp[1]*self.sp[m-1]
dt = round(self.date_dec, 1) - self.epoch # t - t_0
self.gh = np.zeros((self.degree+2, self.degree+1))
self.denormalize_coefficients(lat_prime)
cos_lat = np.cos(lat_prime) # cos(phi')
sin_lat = np.sin(lat_prime) # sin(phi')
Zp = Xp = Yp = Bp = 0.0
a = EARTH_MEAN_RADIUS/1000.0 # Mean earth radius in km
ar = a/r
# Spherical Harmonics (eq. 4)
for n in range(1, self.degree+1): #### !! According to report it must be equal to defined degree (12 not 13)
arn2 = ar**(n+2)
x_p = y_p = z_p = 0.0
for m in range(n+1): #### !! According to report it must be equal to n
self.gh[m, n] = self.c[m, n] + dt*self.cd[m, n] # g_n^m (eq. 9)
# Terms of spherical harmonic expansions
gchs = self.gh[m, n]*self.cp[m] # g(t)cos(ml)
gshc = self.gh[m, n]*self.sp[m] # g(t)sin(ml)
if m > 0:
self.gh[n, m-1] = self.c[n, m-1] + dt*self.cd[n, m-1] # h_n^m (eq. 9)
gchs += self.gh[n, m-1]*self.sp[m] # g(t)cos(ml) + h(t)sin(ml)
gshc -= self.gh[n, m-1]*self.cp[m] # g(t)sin(ml) - h(t)cos(ml)
x_p += gchs * self.dP[m, n]
y_p += m * gshc * self.P[m, n]
z_p += gchs * self.P[m, n]
# SPECIAL CASE: NORTH/SOUTH GEOGRAPHIC POLES
if (cos_lat == 0.0 and m == 1):
Bp += arn2 * gshc
if n > 1:
Bp *= sin_lat - self.k[m, n]
Xp += arn2 * x_p # (eq. 10) #### !! According to report must be a substraction. Must re-check this
Yp += arn2 * y_p # (eq. 11)
Zp -= (n+1) * arn2 * z_p # (eq. 12)
Yp = Bp if cos_lat == 0.0 else Yp/cos_lat
# Transform magnetic vector components to geodetic coordinates (eq. 17)
self.X = Xp*np.cos(lat_prime-latitude) - Zp*np.sin(lat_prime-latitude)
self.Y = Yp
self.Z = Xp*np.sin(lat_prime-latitude) + Zp*np.cos(lat_prime-latitude)
# Total Intensity, Inclination and Declination (eq. 19)
self.H = np.linalg.norm([self.X, self.Y]) # sqrt(X^2+Y^2)
self.F = np.linalg.norm([self.H, self.Z]) # sqrt(H^2+Z^2)
self.I = RAD2DEG*np.arctan2(self.Z, self.H)
self.D = RAD2DEG*np.arctan2(self.Y, self.X)
# Grivation (eq. 1)
self.GV = self.D.copy()
if self.latitude > 55.0:
self.GV -= self.longitude
if self.latitude < -55.0:
self.GV += self.longitude
@property
def magnetic_elements(self) -> Dict[str, float]:
"""Main geomagnetic elements in a dictionary
======= =============================================
Element Definition
======= =============================================
X Northerly intensity
Y Easterly intensity
Z Vertical intensity (Positive downwards)
H Horizontal intensity
F Total intensity
I Inclination angle (a.k.a. dip angle)
D Declination angle (a.k.a. magnetic variation)
GV Grivation
======= =============================================
Example
-------
>>> wmm = WMM(datetime.date(2017, 5, 12), latitude=10.0, longitude=-20.0, height=10.5)
>>> wmm.magnetic_elements
{'X': 30499.640469609083, 'Y': -5230.267158472566, 'Z': -1716.633311360368,
'H': 30944.850352270452, 'F': 30992.427998627096, 'I': -3.1751692563622993,
'D': -9.73078560629778, 'GV': -9.73078560629778}
"""
return {k: self.__dict__[k] for k in ['X', 'Y', 'Z', 'H', 'F', 'I', 'D', 'GV']}
``` |
{
"source": "joseph-tobin/mqtt-panel",
"score": 2
} |
#### File: mqtt_panel/web/fullscreen.py
```python
from mqtt_panel.web.component import Component
class FullScreen(Component):
def __init__(self):
super(FullScreen, self).__init__(4)
def _body(self, fh):
self._write_render(fh, '''\
<div id="fullscreen" class="d-none"></div>
''')
```
#### File: web/widget/button.py
```python
import logging
from mqtt_panel.web.widget.widget import Widget
class Button(Widget):
widget_type = 'button'
def __init__(self, *args, **kwargs):
super(Button, self).__init__(*args, **kwargs)
def open(self):
pass
def on_widget(self, blob):
logging.debug("{%s} Rx widget: %s", self.id, blob)
payload = self._c['payload']
self._mqtt.publish(self._c['publish'], payload,
retain=self._c.get('retain', False), qos=self._c.get('qos', 1))
self._updated_now()
self._update_clients()
return True
def _blob(self):
return {
}
def _html(self, fh):
icon = self._c.get('icon', 'touch_app')
color = self._c.get('color', 'white')
text = self._c['text']
confirm = self._c.get('confirm', None)
if color:
color = ' style="color:%s"' % color
if confirm:
confirm = f' data-confirm="{confirm}"'
self._write_render(fh, '''\
<div class="value"{confirm}>
<span class="material-icons"{color}>{icon}</span>
<span{color}>{text}</span>
</div>
''', locals(), indent=4)
Widget.register(Button)
```
#### File: web/widget/light.py
```python
import logging
from mqtt_panel.web.widget.widget import Widget
class Light(Widget):
widget_type = 'light'
def __init__(self, *args, **kwargs):
super(Light, self).__init__(*args, **kwargs)
# self._value = self._c['values'][0].get('payload')
self._payload_map = {}
for blob in self._c['values']:
self._payload_map[blob['payload']] = blob
def open(self):
self._mqtt.subscribe(self._c['subscribe'], self._on_mqtt)
def _on_mqtt(self, payload, timestamp):
logging.debug("Light [%s] on_mqtt: %s", self.id, payload)
try:
value = self._payload_map[payload]['payload']
except KeyError as ex:
logging.warning('Unexpected MQTT value: %s', payload)
value = None
self.set_value(value)
def _blob(self):
return {
'value': self.value
}
def _html(self, fh):
self._write_render(fh, '''\
<div class="value">
''', indent=4)
for blob in self._c['values']:
value = blob.get('payload')
display = ''
if self.value != value:
display = ' d-none'
text = blob.get('text', 'text')
icon = blob.get('icon', Default.icon(text))
color = blob.get('color', Default.color(text))
self._write_render(fh, '''\
<div class="value-item value-{value}{display}">
<span class="material-icons" style="color:{color};">{icon}</span>
<span style="color:{color};">{text}</span>
</div>
''', locals(), indent=4)
display = ''
if self.value is not None:
display = ' d-none'
self._write_render(fh, '''\
<div class="value-item value-null{display}">
<span class="material-icons">do_not_disturb</span>
<span>unknown</span>
</div>
</div>
''', locals(), indent=4)
class Default(object):
_map = {
('on', 'true'): ('emoji_objects', 'yellow'),
('off', 'false'): ('emoji_objects','black'),
None: ('help_center', None)
}
@classmethod
def _lookup(cls, key):
key = key.lower()
for keys in cls._map.keys():
if keys and key in keys:
return cls._map[keys]
return cls._map[None]
@classmethod
def icon(cls, key):
return cls._lookup(key)[0]
@classmethod
def color(cls, key):
return cls._lookup(key)[1]
Widget.register(Light)
```
#### File: web/widget/text.py
```python
from mqtt_panel.web.widget.widget import Widget
class Text(Widget):
widget_type = 'text'
def __init__(self, *args, **kwargs):
super(Text, self).__init__(*args, **kwargs)
def open(self):
self._mqtt.subscribe(self._c['subscribe'], self._on_mqtt)
def _on_mqtt(self, payload, timestamp):
self.set_value(payload)
def _blob(self):
return {
'text': self.value or '',
}
def _html(self, fh):
color = self._c.get('color', None)
if color:
color = ' style="color:%s"' % color
self._write_render(fh, '''\
<div class="text"{color}></div>
''', locals(), indent=4)
Widget.register(Text)
```
#### File: mqtt_panel/web/wslink.py
```python
from mqtt_panel.web.component import Component
class WSLink(Component):
def __init__(self):
super(WSLink, self).__init__(4)
``` |
{
"source": "josepht/pricer",
"score": 3
} |
#### File: josepht/pricer/pricer.py
```python
import argparse
import json
import requests
API_ENDPOINT = (
"https://query1.finance.yahoo.com/v7/finance/quote"
"?lang=en-US®ion=US&corsDomain=finance.yahoo.com"
)
# Colors
RED = '\033[31m'
GREEN = '\033[32m'
BOLD = '\033[1m'
ENDC = '\033[0m'
DEFAULT_SHARES_FILE = 'shares.json'
def color_value(value, color='', percent=False, width=10, precision=4,
field_width=10, string=False, bold=False):
cs = ce = ''
type_str = 'f'
if string:
type_str = 's'
if string:
value_str = value
else:
if percent:
value_str = '{{:.{}{}}}'.format(precision, type_str).format(value)
else:
value_str = '{{:{}.{}{}}}'.format(width,
precision,
type_str).format(value)
if color == 'red':
cs = RED
ce = ENDC
elif color == 'green':
cs = GREEN
ce = ENDC
if bold:
cs = '{}{}'.format(cs, BOLD)
ce = ENDC
value_str_fmt = '{{:>{}s}}'.format(field_width)
if percent:
value_str = value_str_fmt.format(
'({}%)'.format(value_str))
value_str = value_str_fmt.format(value_str)
# add color codes last otherwise they count towards the string length
if cs:
value_str = '{}{}{}'.format(cs, value_str, ce)
return value_str
def get_alert_report(symbol, price, share_data, verbose=False, agg=False):
report = ""
symbol_data = share_data
alert = symbol_data.get('alert', {})
alert_price_above = alert.get('price_above')
alert_price_below = alert.get('price_below')
alert_price = (alert_price_above is not None or
alert_price_below is not None)
hidden = alert.get('hide', False)
if hidden or not alert_price:
return report
if alert and alert_price and not hidden:
alert_price = ''
symbol_color = ''
direction = ''
if alert_price_above is not None and price > alert_price_above:
direction = '^'
symbol_color = 'green'
alert_price = alert_price_above
if alert_price_below is not None and price < alert_price_below:
direction = 'v'
symbol_color = 'green'
alert_price = alert_price_below
if direction == '':
return report
kwargs = {}
if agg:
kwargs['bold'] = True
report = "{} {}".format(
color_value(direction, color=symbol_color, string=True,
field_width=1, **kwargs),
color_value(alert_price, color=symbol_color,
precision=2,
width=6,
field_width=6,
**kwargs
),
)
return report
def get_owned_report(symbol, price, share_data, verbose=False, agg=False,
hold=False, until=None, total_shares=None,
avg_price=None):
sum_line = None
symbol_data = share_data
cost = symbol_data.get('cost')
shares = symbol_data.get('shares')
if total_shares is not None and total_shares > 0:
if avg_price is not None:
avg_price = (
(shares * cost + total_shares * avg_price) /
(total_shares + shares)
)
else:
avg_price = cost
total_shares = 0
total_shares += shares
hold_str = ' '
if hold:
hold_str = 'H'
if until is not None:
hold_str = '{} {}'.format(hold_str, until)
kwargs = {}
if agg:
kwargs['bold'] = True
sum_kwargs = {'bold': True}
if cost is None or shares is None or shares == 0.0:
# Fake out the column width for alerts
if verbose:
owned = ' '*67
else:
owned = ' '*57
else:
symbol_change = price - cost
sum_symbol_change = price - avg_price
symbol_color = ''
sum_symbol_color = ''
if symbol_change < 0.0:
symbol_color = 'red'
elif symbol_change > 0.0:
symbol_color = 'green'
if sum_symbol_change < 0.0:
sum_symbol_color = 'red'
elif sum_symbol_change > 0.0:
sum_symbol_color = 'green'
symbol_change_percent = symbol_change / cost * 100
sum_symbol_change_percent = sum_symbol_change / avg_price * 100
if verbose:
owned = "{} {} {} {} {} {}".format(
color_value(cost, color=symbol_color, **kwargs),
color_value(symbol_change, color=symbol_color, **kwargs),
color_value(symbol_change_percent,
color=symbol_color,
precision=2,
field_width=10,
percent=True, **kwargs),
color_value(shares, color=symbol_color,
precision=0, width=6, **kwargs),
color_value(shares * symbol_change,
field_width=11,
color=symbol_color, **kwargs),
color_value(hold_str, field_width=12, string=True,
color=symbol_color, **kwargs),
)
sum_line = "{} {} {} {} {} {}".format(
color_value(cost, color=sum_symbol_color, bold=True),
color_value(symbol_change, color=sum_symbol_color, bold=True),
color_value(sum_symbol_change_percent,
color=sum_symbol_color,
precision=2,
field_width=10,
percent=True, bold=True),
color_value(total_shares, color=sum_symbol_color,
precision=0, width=6, bold=True),
color_value(total_shares * sum_symbol_change,
field_width=11,
color=sum_symbol_color, bold=True),
color_value(hold_str, field_width=12, string=True,
color=sum_symbol_color, bold=True),
)
else:
owned = "{} {} {} {} {}".format(
color_value(cost, color=symbol_color, **kwargs),
color_value(symbol_change_percent,
color=symbol_color,
precision=2,
field_width=10,
percent=True, **kwargs),
color_value(shares, color=symbol_color,
precision=0, width=6, **kwargs),
color_value(shares * symbol_change,
field_width=11,
color=symbol_color, **kwargs),
color_value(hold_str, field_width=12, string=True,
color=symbol_color, **kwargs),
)
sum_line = "{} {} {} {} {}".format(
color_value(avg_price, color=sum_symbol_color, bold=True),
color_value(sum_symbol_change_percent,
color=sum_symbol_color,
precision=2,
field_width=10,
percent=True, bold=True),
color_value(total_shares, color=sum_symbol_color,
precision=0, width=6, bold=True),
color_value(total_shares * sum_symbol_change,
field_width=11,
color=sum_symbol_color, bold=True),
color_value(hold_str, field_width=12, string=True,
color=sum_symbol_color, bold=True),
)
return owned, total_shares, avg_price, sum_line
def get_price_data(market_state, symbol_data):
market_state_str = '*' if market_state != 'REGULAR' else ' '
price = change = percent = None
if market_state == 'PRE':
price = symbol_data.get('preMarketPrice')
change = symbol_data.get('preMarketChange')
percent = symbol_data.get('preMarketChangePercent')
market_state_str = '*'
# if there is no pre-market data get any post market data
if price is None:
price, change, percent, market_state_str = get_price_data(
'POST', symbol_data)
elif market_state == 'POST':
price = symbol_data.get('postMarketPrice')
change = symbol_data.get('postMarketChange')
percent = symbol_data.get('postMarketChangePercent')
market_state_str = '*'
# If there isn't any post-market data use the regular data.
if (
price is None or change is None or percent is None or
change == 0.0
):
price = float(symbol_data.get('regularMarketPrice'))
change = float(symbol_data.get('regularMarketChange'))
percent = float(symbol_data.get('regularMarketChangePercent'))
market_state_str = ' '
price = float(price)
change = float(change)
percent = float(percent)
return price, change, percent, market_state_str
def get_market_data(data, symbol):
for item in data:
if item.get('symbol') == symbol:
return item
return None
def get_current_price(symbols=None, shares_file=DEFAULT_SHARES_FILE,
verbose=False):
share_data = get_share_data(shares_file)
if symbols is None:
symbols = list(set(x['name'] for x in share_data))
string_fields = [
'symbol',
'marketState',
'regularMarketPrice',
]
float_fields = [
'regularMarketChange',
'regularMarketChangePercent',
'preMarketPrice',
'preMarketChange',
'preMarketChangePercent',
'postMarketPrice',
'postMarketChange',
'postMarketChangePercent',
]
fields = string_fields + float_fields
result = requests.get(API_ENDPOINT, params={
'symbols': ','.join(symbols),
'fields': ','.join(fields),
})
if result.status_code == 200:
data = result.json()
response = data.get('quoteResponse')
if response is not None:
data = response
data_result = data.get('result')
data_error = data.get('error')
if data_error is not None:
print("data_error: {}".format(data_error))
if data_result is not None:
data = data_result
symbols_seen = []
last_symbol = None
last_symbol_count = 0
sum_line = None
for item in share_data:
symbol = item['name']
hide = item.get('hide', False)
agg = item.get('agg', False)
hold = item.get('hold', False)
until = item.get('until')
if until is not None:
hold = True
if hide:
continue
market_data = get_market_data(data, symbol)
market_state = market_data.get('marketState')
price = float(market_data.get('regularMarketPrice'))
change = float(market_data.get('regularMarketChange'))
percent = float(market_data.get('regularMarketChangePercent'))
price, change, percent, market_state_str = get_price_data(
market_state, market_data)
color = ''
if change < 0:
color = 'red'
elif change > 0.0:
color = 'green'
change = color_value(change, color=color, width=9)
percent = color_value(percent, color=color, precision=2,
field_width=10,
percent=True)
price_str = color_value(price, bold=True)
line = "{:>5s} {} {} {} {:>1s}".format(
symbol, price_str, change, percent, market_state_str)
if symbol in symbols_seen and last_symbol == symbol:
line = "{:>40s}".format("")
else:
symbols_seen.append(symbol)
if symbol == last_symbol:
last_symbol_count += 1
else:
total_shares = None
avg_price = None
if sum_line is not None and last_symbol_count > 0:
sum_line = "{} {}".format("{:>40s}".format(""), sum_line)
print(sum_line)
last_symbol_count = 0
sum_line = None
last_symbol = symbol
# Add user's owned shares info
owned, total_shares, avg_price, sum_line = get_owned_report(
symbol, price, item,
verbose=verbose, agg=agg, hold=hold,
until=until,
total_shares=total_shares, avg_price=avg_price
)
alert = get_alert_report(symbol, price, item,
verbose=verbose, agg=agg)
if owned:
line = "{} {}".format(line, owned)
if alert:
line = "{} {}".format(line, alert)
print(line)
def query_one(args):
get_current_price(symbols=[args.symbol], shares_file=args.shares_file,
verbose=args.verbose)
def query_all(args):
get_current_price(shares_file=args.shares_file,
verbose=args.verbose)
def check(args):
"""Check the return for selling a non-held stock based on the given
cost, number of shares, and sell price."""
diff = args.shares * (args.sell_price - args.buy_price)
print(diff)
def check_sell(args):
"""Check the return for selling a given stock at a given price based
on current holdings."""
share_data = get_share_data(args.shares_file)
ret = None
for item in share_data:
symbol = item.get('symbol')
if symbol is not None:
symbol = symbol.lower()
if symbol is not None and symbol == args.symbol.lower():
cost = item.get('cost', 0.0)
shares = item.get('shares', 0.0)
ret = shares * (args.price - cost)
break
if ret is not None:
print(ret)
def parse_args():
"""Parse commandline options and sub-commands."""
parser = argparse.ArgumentParser()
parser.add_argument('--shares-file', '-s',
default=DEFAULT_SHARES_FILE,
help='JSON data file with shares owned data')
parser.add_argument('--verbose', '-v', action='store_true',
default=False,
help='Show more data')
parser.set_defaults(func=query_all)
subparsers = parser.add_subparsers(help='sub-commands')
parser_check = subparsers.add_parser(
'check', help='Check return on a transaction')
parser_check.add_argument('buy_price', type=float,
help='Buy price for stock',)
parser_check.add_argument('sell_price', type=float,
help='Sell price for stock',)
parser_check.add_argument('shares', type=float,
help='Number of shares of the stock',)
parser_check.set_defaults(func=check)
parser_check_stock = subparsers.add_parser(
'check_sell', help='Check sell scenario for a specific stock')
parser_check_stock.add_argument('symbol',
help='Stock symbol')
parser_check_stock.add_argument('price', type=float,
help='Sell price')
parser_check_stock.set_defaults(func=check_sell)
parser_query_one = subparsers.add_parser(
'query_one', help='Query one symbol')
parser_query_one.add_argument('symbol',
help='Stock symbol')
parser_query_one.set_defaults(func=query_one)
parser_query_all = subparsers.add_parser(
'query_all', help='Query all symbols')
parser_query_all.set_defaults(func=query_all)
return parser.parse_args()
def get_share_data(filename):
"""Get the share information for the user."""
data = []
with open(filename) as fp:
data = json.load(fp)
return data
def main():
"""The main method."""
args = parse_args()
args.func(args)
if __name__ == '__main__':
main()
``` |
{
"source": "Joseph-tsai415/Msc-All-Terrain-Robot",
"score": 3
} |
#### File: Guidance_system/guidance_sys/calibrate.py
```python
import numpy as np
import cv2, os
from cv2 import aruco
class calibrate():
"""
The class called Calibrate is initialised with constants appropriate
for the given target Calibration
"""
# if __name__ == '__main__':
def __init__(self):
#%matplotlib nbagg
self.aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
self.board = aruco.CharucoBoard_create(7, 5, 1, .8, self.aruco_dict)
#import image
dir_path=os.path.dirname(os.path.realpath(__file__))
datadir = os.path.join(dir_path,"./photo/")
images = np.array([datadir + f for f in os.listdir(datadir) if f.endswith(".png") ])
order = np.argsort([int(p.split(".")[-2].split("_")[-1]) for p in images])
images = images[order]
print(images)
#image calibration
allCorners,allIds,imsize=self.read_chessboards(images)
self.ret, self.mtx, self.dist, self.rvecs, self.tvecs = self.calibrate_camera(allCorners,allIds,imsize)
def get(self):
'''
Return the fix camera matrix
'''
return self.ret, self.mtx, self.dist, self.rvecs, self.tvecs
def read_chessboards(self,images):
"""
Charuco base pose estimation.
"""
print("POSE ESTIMATION STARTS:")
allCorners = []
allIds = []
decimator = 0
# SUB PIXEL CORNER DETECTION CRITERION
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.00001)
for im in images:
print("=> Processing image {0}".format(im))
frame = cv2.imread(im)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(gray, self.aruco_dict)
if len(corners)>0:
# SUB PIXEL DETECTION
for corner in corners:
cv2.cornerSubPix(gray, corner,
winSize = (3,3),
zeroZone = (-1,-1),
criteria = criteria)
res2 = cv2.aruco.interpolateCornersCharuco(corners,ids,gray,self.board)
if res2[1] is not None and res2[2] is not None and len(res2[1])>3 and decimator%1==0:
allCorners.append(res2[1])
allIds.append(res2[2])
decimator+=1
imsize = gray.shape
return allCorners,allIds,imsize
def calibrate_camera(self,allCorners,allIds,imsize):
"""
Calibrates the camera using the dected corners.
"""
print("CAMERA CALIBRATION")
cameraMatrixInit = np.array([[ 1000., 0., imsize[0]/2.],
[ 0., 1000., imsize[1]/2.],
[ 0., 0., 1.]])
distCoeffsInit = np.zeros((5,1))
flags = (cv2.CALIB_USE_INTRINSIC_GUESS + cv2.CALIB_RATIONAL_MODEL + cv2.CALIB_FIX_ASPECT_RATIO)
#flags = (cv2.CALIB_RATIONAL_MODEL)
(ret, camera_matrix, distortion_coefficients0,
rotation_vectors, translation_vectors,
stdDeviationsIntrinsics, stdDeviationsExtrinsics,
perViewErrors) = cv2.aruco.calibrateCameraCharucoExtended(
charucoCorners=allCorners,
charucoIds=allIds,
board=self.board,
imageSize=imsize,
cameraMatrix=cameraMatrixInit,
distCoeffs=distCoeffsInit,
flags=flags,
criteria=(cv2.TERM_CRITERIA_EPS & cv2.TERM_CRITERIA_COUNT, 10000, 1e-9))
return ret, camera_matrix, distortion_coefficients0, rotation_vectors, translation_vectors
if __name__ == '__main__':
calbrate = calibrate()
ret, mtx, dist, rvecs, tvecs = calbrate.get()
```
#### File: Guidance_system/guidance_sys/pid.py
```python
import time
# This section of code was written by <NAME> in 2015.
# Copyright (C) 2015 Ivmech Mechatronics Ltd. <<EMAIL>>
# IvPID is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# IvPID is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>. or https://github.com/ivmech/ivPID
#title :pid.py
#description :python pid controller
#author :<NAME>, modified by Ping-Chun @2020
#date :20151218
#version :0.1
#notes :
#python_version :2.7
#dependencies : matplotlib, numpy, scipy
# The code is modified by <NAME> in 2020.
#==============================================================================
class PID:
"""
PID Controller
"""
def __init__(self, P=0.2, I=0.0, D=0.0, current_time=None):
self.Kp = P
self.Ki = I
self.Kd = D
self.sample_time = 0.00
self.current_time = current_time if current_time is not None else time.time()
self.last_time = self.current_time
self.SetPoint = 0.0
self.clear()
def clear(self):
"""Clears PID computations and coefficients"""
self.PTerm = 0.0
self.ITerm = 0.0
self.DTerm = 0.0
self.last_error = 0.0
# Windup Guard
self.int_error = 0.0
self.windup_guard = 20.0
self.output = 0.0
def update(self, feedback_value, current_time=None):
"""Calculates PID value for given reference feedback
.. math::
u(t) = K_p e(t) + K_i \int_{0}^{t} e(t)dt + K_d {de}/{dt}
.. figure:: images/pid_1.png
:align: center
Test PID with Kp=1.2, Ki=1, Kd=0.001 (test_pid.py)
"""
error = self.SetPoint - feedback_value
self.current_time = current_time if current_time is not None else time.time()
delta_time = self.current_time - self.last_time
delta_error = error - self.last_error
if (delta_time >= self.sample_time):
self.PTerm = self.Kp * error
self.ITerm += error * delta_time
if (self.ITerm < -self.windup_guard):
self.ITerm = -self.windup_guard
elif (self.ITerm > self.windup_guard):
self.ITerm = self.windup_guard
self.DTerm = 0.0
if delta_time > 0:
self.DTerm = delta_error / delta_time
# Remember last time and last error for next calculation
self.last_time = self.current_time
self.last_error = error
self.output = self.PTerm + (self.Ki * self.ITerm) + (self.Kd * self.DTerm)
def setKp(self, proportional_gain):
"""Determines how aggressively the PID reacts to the current error with setting Proportional Gain"""
self.Kp = proportional_gain
def setKi(self, integral_gain):
"""Determines how aggressively the PID reacts to the current error with setting Integral Gain"""
self.Ki = integral_gain
def setKd(self, derivative_gain):
"""Determines how aggressively the PID reacts to the current error with setting Derivative Gain"""
self.Kd = derivative_gain
def setWindup(self, windup):
"""Integral windup, also known as integrator windup or reset windup,
refers to the situation in a PID feedback controller where
a large change in setpoint occurs (say a positive change)
and the integral terms accumulates a significant error
during the rise (windup), thus overshooting and continuing
to increase as this accumulated error is unwound
(offset by errors in the other direction).
The specific problem is the excess overshooting.
"""
self.windup_guard = windup
def setSampleTime(self, sample_time):
"""PID that should be updated at a regular interval.
Based on a pre-determined sampe time, the PID decides if it should compute or return immediately.
"""
self.sample_time = sample_time
```
#### File: src/Guidance_system/main.py
```python
import guidance_sys
import numpy as np
import cv2, os
from cv2 import aruco
import matplotlib.pyplot as plt
import matplotlib as mpl
import pandas as pd
from picamera.array import PiRGBArray
from picamera import PiCamera
import time
import serial
ternimal_UART= False
try:
os.system("sudo chmod a+rw /dev/serial0")
ser = serial.Serial('/dev/serial0', 9600, timeout=1)
ser.flush()
print("open the Serial0")
ternimal_UART =True
except:
print("Can't open Serial dev pls add bash command: sudo chmod a+rw /dev/serial0")
import RPi.GPIO as GPIO
from time import sleep
io_pin=[17,27,22,5,23,24,25,6]
GPIO.setmode(GPIO.BCM)
for pin in io_pin:
GPIO.setup(pin, GPIO.OUT)
w = 640
h = 480
frame_rate = 32
def open_UART_port():
try:
os.system("sudo chmod a+rw /dev/serial0")
ser = serial.Serial('/dev/serial0', 9600, timeout=1)
ser.flush()
print("open the Serial0")
ternimal_UART =True
except:
print("Can't open Serial dev pls add bash command: sudo chmod a+rw /dev/serial0")
if __name__ == '__main__':
#calbrate = guidance_sys.calibrate()
#ret, mtx, dist, rvecs, tvecs = calbrate.get()
open_UART_port()
#initail raspberry pi camera
p_camera = guidance_sys.camera(w=w,h=h,framerate=frame_rate)
image_process = guidance_sys.process()
print("Start")
#for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_0port=True):
for frame in p_camera.video_stream():
start= time.time()
time_used = time.time()
image_process.update(frame)
#image_process.draw()
for io,pin in zip(image_process.IO_Mapping(),io_pin):
GPIO.output(pin,int(io))
time_used=time.time() -start
image_process.string_out = f"Time use: {round(time_used,3)}\n\n"
#image_process.string_out = f"{round(time_used,4)}"
image_process.sendmessage()
if ternimal_UART:
ser.write(image_process.sendmessage())
else:
image_process.sendmessage()
try:
os.system("sudo chmod a+rw /dev/serial0")
ser = serial.Serial('/dev/serial0', 9600, timeout=1)
ser.flush()
print("open the Serial0")
ternimal_UART =True
except:
print("Can't open Serial dev pls add bash command: sudo chmod a+rw /dev/serial0")
print(image_process.terminal_UART)
#cv2.imshow("Frame", image)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# clear the stream in preparation for the next frame
p_camera.rawCapture.truncate(0)
``` |
{
"source": "JosephVC/Python_PDF_OCR",
"score": 3
} |
#### File: Python_PDF_OCR/tests/test_ocr.py
```python
from pathlib import Path
import pytest
from alterpdf import pdf_to_image
# test cases for altering pdfs
# test whether the pdf_to_image module actually works
def test_pdf_to_image():
pdf_to_image.convert('-p', '../sample_pdfs/meetingminutes.pdf')
# run something through pdf_to_image and check if it's there
assert Path('../output_images/output1.jpg').is_file
def test_ocr_image_exists():
pass
def image_made_to_pdf():
pass
``` |
{
"source": "josephverron/sonar-extractor",
"score": 3
} |
#### File: sonar-extractor/authentication/__init__.py
```python
import os
from getpass import getpass, getuser
from typing import Tuple
def export_auth(login_file, login, password):
with open(login_file, "w") as file:
file.write(login + "\n")
file.write(password + "\n")
def import_auth(login_file):
with open(login_file, "r") as file:
login, password, *_ = file.readlines()
return login.strip(), password.strip()
def ask_auth():
default_login = getuser()
login = input("Please input your login [default:{}] : ".format(default_login)) or default_login
password = getpass("Please input your password : ")
return login, password
def persist_auth(key: str, login_password: Tuple[str, str]):
login_file = os.path.expanduser("~/{}.auth.txt".format(key))
if login_password:
login, password = <PASSWORD>
export_auth(login_file, login, password)
return login, password
elif os.path.isfile(login_file):
return import_auth(login_file)
else:
login, password = ask_auth()
export_auth(login_file, login, password)
return login, password
``` |
{
"source": "joseph-v/SIM",
"score": 2
} |
#### File: api/v1/masking_views.py
```python
from delfin import db
from delfin.api import api_utils
from delfin.api.common import wsgi
from delfin.api.views import masking_views
class MaskingViewController(wsgi.Controller):
def __init__(self):
super(MaskingViewController, self).__init__()
self.search_options = ['name', 'id', 'storage_id',
'native_storage_host_group_id',
'native_storage_port_group_id',
'native_storage_volume_group_id',
'native_storage_host_id',
'native_volume_id',
'native_masking_view_id']
def _get_masking_view_search_options(self):
"""Return masking view search options allowed ."""
return self.search_options
def show(self, req, id):
ctxt = req.environ['delfin.context']
query_params = {"storage_id": id}
query_params.update(req.GET)
# Update options other than filters
sort_keys, sort_dirs = api_utils.get_sort_params(query_params)
marker, limit, offset = api_utils.get_pagination_params(query_params)
# Strip out options except supported search options
api_utils.remove_invalid_options(
ctxt, query_params, self._get_masking_view_search_options())
masking_view_lists = db.masking_views_get_all(ctxt, marker, limit,
sort_keys, sort_dirs,
query_params, offset)
return masking_views.build_masking_views(masking_view_lists)
def create_resource():
return wsgi.Resource(MaskingViewController())
```
#### File: hpe/hpe_3par/hpe_3parstor.py
```python
import six
from oslo_log import log
from delfin import context
from delfin.common import constants
from delfin.drivers import driver
from delfin.drivers.hpe.hpe_3par import alert_handler, consts
from delfin.drivers.hpe.hpe_3par import component_handler
from delfin.drivers.hpe.hpe_3par import rest_handler
from delfin.drivers.hpe.hpe_3par import ssh_handler
from delfin.drivers.utils.rest_client import RestClient
LOG = log.getLogger(__name__)
# Hpe3parStor Driver
class Hpe3parStorDriver(driver.StorageDriver):
"""Hpe3parStorDriver implement Hpe 3par Stor driver,
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.rest_client = RestClient(**kwargs)
self.rest_client.verify = kwargs.get('verify', False)
self.rest_handler = rest_handler.RestHandler(self.rest_client)
self.rest_handler.login()
self.ssh_handler = ssh_handler.SSHHandler(**kwargs)
self.version = self.ssh_handler.login(context)
self.comhandler = component_handler.ComponentHandler(
rest_handler=self.rest_handler, ssh_handler=self.ssh_handler)
self.alert_handler = alert_handler.AlertHandler(
rest_handler=self.rest_handler, ssh_handler=self.ssh_handler)
def reset_connection(self, context, **kwargs):
try:
self.rest_handler.logout()
except Exception as e:
LOG.warning('logout failed when resetting connection, '
'reason is %s' % six.text_type(e))
self.rest_client.verify = kwargs.get('verify', False)
self.rest_handler.login()
def close_connection(self):
self.rest_handler.logout()
def get_storage(self, context):
return self.comhandler.get_storage(context)
def list_storage_pools(self, context):
self.comhandler.set_storage_id(self.storage_id)
return self.comhandler.list_storage_pools(context)
def list_volumes(self, context):
self.comhandler.set_storage_id(self.storage_id)
return self.comhandler.list_volumes(context)
def list_controllers(self, context):
return self.comhandler.list_controllers(self.storage_id)
def list_ports(self, context):
return self.comhandler.list_ports(self.storage_id)
def list_disks(self, context):
return self.comhandler.list_disks(self.storage_id)
def list_alerts(self, context, query_para=None):
return self.alert_handler.list_alerts(context, query_para)
def add_trap_config(self, context, trap_config):
pass
def remove_trap_config(self, context, trap_config):
pass
@staticmethod
def parse_alert(context, alert):
return alert_handler.AlertHandler().parse_alert(context, alert)
def clear_alert(self, context, alert):
return self.alert_handler.clear_alert(context, alert)
def list_storage_host_initiators(self, context):
return self.comhandler.list_storage_host_initiators(self.storage_id)
def list_storage_hosts(self, context):
return self.comhandler.list_storage_hosts(self.storage_id)
def collect_perf_metrics(self, context, storage_id, resource_metrics,
start_time, end_time):
return self.comhandler.collect_perf_metrics(storage_id,
resource_metrics,
start_time, end_time)
@staticmethod
def get_capabilities(context, filters=None):
"""Get capability of supported driver"""
return {
'is_historic': True,
'resource_metrics': {
constants.ResourceType.STORAGE_POOL: consts.POOL_CAP,
constants.ResourceType.VOLUME: consts.VOLUME_CAP,
constants.ResourceType.PORT: consts.PORT_CAP,
constants.ResourceType.DISK: consts.DISK_CAP
}
}
def get_latest_perf_timestamp(self, context):
return self.comhandler.get_latest_perf_timestamp()
def list_storage_host_groups(self, context):
return self.comhandler.list_storage_host_groups(self.storage_id)
def list_port_groups(self, context):
return self.comhandler.list_port_groups(self.storage_id)
def list_volume_groups(self, context):
return self.comhandler.list_volume_groups(self.storage_id)
def list_masking_views(self, context):
return self.comhandler.list_masking_views(self.storage_id)
```
#### File: pure/flasharray/rest_handler.py
```python
import six
from oslo_log import log as logging
from delfin import exception, cryptor
from delfin.drivers.pure.flasharray import consts
from delfin.drivers.utils.rest_client import RestClient
LOG = logging.getLogger(__name__)
class RestHandler(RestClient):
REST_STORAGE_URL = '/api/1.17/array?space=true'
REST_ARRAY_URL = '/api/1.17/array'
REST_VOLUME_URL = '/api/1.17/volume?space=true&limit=500&token=' \
'aWQgPSA5ODA1Mg=='
REST_VOLUME_TOKEN_URL = '/api/1.17/volume?space=true&limit=20&token='
REST_PORT_URL = '/api/1.17/port'
REST_NETWORK_URL = '/api/1.17/network'
REST_DISK_URL = '/api/1.17/drive'
REST_HARDWARE_URL = '/api/1.17/hardware'
REST_CONTROLLERS_URL = '/api/1.17/array?controllers=true'
REST_ALERTS_URL = '/api/1.17/message?flagged=true&open=true'
REST_AUTH_URL = '/api/1.17/auth/apitoken'
REST_SESSION_URL = '/api/1.17/auth/session'
REST_HOST_URL = '/api/1.17/host'
REST_HOST_PERSONALITY_URL = '/api/1.17/host?personality=true'
REST_HOST_CONNECT_URL = '/api/1.17/host?connect=true'
REST_HGROUP_CONNECT_URL = '/api/1.17/hgroup?connect=true'
REST_HGROUP_URL = '/api/1.17/hgroup'
REST_VOLUME_GROUP_URL = '/api/1.17/vgroup'
def __init__(self, **kwargs):
super(RestHandler, self).__init__(**kwargs)
def login(self):
try:
data = {'username': self.rest_username, 'password': <PASSWORD>(
self.rest_password)}
self.init_http_head()
token_res = self.do_call(RestHandler.REST_AUTH_URL, data,
method='POST')
if token_res.json().get('msg') == consts.LOGIN_PASSWORD_ERR:
LOG.error("Login error, Obtaining the token is abnormal. "
"status_code:%s, URL: %s",
token_res.status_code, RestHandler.REST_AUTH_URL)
raise exception.InvalidUsernameOrPassword(
'Obtaining the token is abnormal')
if token_res.status_code != consts.SUCCESS_STATUS_CODE or not \
token_res.json().get('api_token'):
LOG.error("Login error, Obtaining the token is abnormal. "
"status_code:%s, URL: %s",
token_res.status_code, RestHandler.REST_AUTH_URL)
raise exception.StorageBackendException(
'Obtaining the token is abnormal')
session_res = self.do_call(RestHandler.REST_SESSION_URL,
token_res.json(), method='POST')
if session_res.status_code != consts.SUCCESS_STATUS_CODE or not \
session_res.json().get('username'):
LOG.error("Login error, Obtaining the session is abnormal."
"status_code:%s, URL: %s", session_res.status_code,
RestHandler.REST_SESSION_URL)
raise exception.StorageBackendException(
'Obtaining the session is abnormal.')
except Exception as e:
LOG.error("Login error: %s", six.text_type(e))
raise e
finally:
data = None
token_res = None
def logout(self):
res = self.do_call(RestHandler.REST_SESSION_URL, None, method='DELETE')
if res.status_code != consts.SUCCESS_STATUS_CODE\
or not res.json().get('username'):
LOG.error("Logout error, Deleting a Token Exception."
"status_code:%s, URL: %s",
res.status_code, RestHandler.REST_SESSION_URL)
raise exception.StorageBackendException(res.text)
def rest_call(self, url, data=None, method='GET'):
result_json = None
res = self.do_call(url, data, method)
if res.status_code == consts.SUCCESS_STATUS_CODE:
result_json = res.json()
elif res.status_code == consts.PERMISSION_DENIED_STATUS_CODE:
self.login()
the_second_time_res = self.do_call(url, data, method)
if the_second_time_res.status_code == consts.SUCCESS_STATUS_CODE:
result_json = the_second_time_res.json()
return result_json
def get_volumes(self, url=REST_VOLUME_URL, data=None, volume_list=None,
count=consts.DEFAULT_COUNT_GET_VOLUMES_INFO):
if volume_list is None:
volume_list = []
res = self.do_call(url, data, 'GET')
if res.status_code == consts.SUCCESS_STATUS_CODE:
result_json = res.json()
volume_list.extend(result_json)
next_token = res.headers.get(consts.CUSTOM_TOKEN)
if next_token:
url = '%s%s' % (RestHandler.REST_VOLUME_TOKEN_URL, next_token)
self.get_volumes(url, data, volume_list)
elif res.status_code == consts.PERMISSION_DENIED_STATUS_CODE:
self.login()
if count < consts.RE_LOGIN_TIMES:
count = count + consts.CONSTANT_ONE
self.get_volumes(url, data, volume_list, count)
return volume_list
```
#### File: vnx/vnx_block/test_vnx_block.py
```python
import sys
import time
from unittest import TestCase, mock
from delfin.drivers.dell_emc.vnx.vnx_block import consts
from delfin.drivers.dell_emc.vnx.vnx_block.alert_handler import AlertHandler
from delfin.drivers.utils.tools import Tools
sys.modules['delfin.cryptor'] = mock.Mock()
from delfin import context
from delfin.drivers.dell_emc.vnx.vnx_block.navi_handler import NaviHandler
from delfin.drivers.dell_emc.vnx.vnx_block.navicli_client import NaviClient
from delfin.drivers.dell_emc.vnx.vnx_block.vnx_block import VnxBlockStorDriver
ACCESS_INFO = {
"storage_id": "12345",
"vendor": "dell_emc",
"model": "vnx_block",
"cli": {
"host": "172.16.58.3",
"port": 22,
"username": "user",
"password": "<PASSWORD>="
}
}
AGENT_INFOS = """
Agent Rev: 7.33.1 (0.38)
Name: K10
Desc:
Revision: 05.33.000.5.038
Model: VNX5400
Serial No: CETV00000001
"""
DOMAIN_INFOS = """
Node: APM00011111111
IP Address: 172.16.31.10
(Master)
Name: CX300I_33_55
Port: 80
Secure Port: 443
IP Address: 172.16.17.32
Name: CX300I_33_44
Port: 80
Secure Port: 443
"""
DISK_INFOS = """
Bus 0 Enclosure 0 Disk 0
State: Enabled
Capacity: 54969
"""
POOL_INFOS = """
Pool Name: Pool 1
Pool ID: 1
Description:
State: Offline
Status: Storage Pool requires recovery. service provider(0x712d8518)
User Capacity (GBs): 8583.732
Consumed Capacity (GBs): 8479.780
Available Capacity (GBs): 103.953
Total Subscribed Capacity (GBs): 8479.780
"""
RAID_INFOS = """
RaidGroup ID: 0
RaidGroup State: Valid_luns
Raw Capacity (Blocks): 1688426496
Logical Capacity (Blocks): 1688420352
Free Capacity (Blocks,non-contiguous): 522260480
"""
LUN_INFOS = """
LOGICAL UNIT NUMBER 239
Name: sun_data_VNX_2
User Capacity (GBs): 9.000
Consumed Capacity (GBs): 1.753
Pool Name: Migration_pool
Current State: Ready
Status: OK(0x0)
Is Thin LUN: Yes
Is Compressed: No
"""
GET_ALL_LUN_INFOS = """
LOGICAL UNIT NUMBER 186
Name LN_10G_01
RAIDGroup ID: 1
State: Bound
LUN Capacity(Megabytes): 10240
Is Thin LUN: YES
"""
CER_INFOS = """
-----------------------------
Subject:CN=TrustedRoot,C=US,ST=MA,L=Hopkinton,EMAIL=<EMAIL>,OU=CSP,O=RSA
Issuer:1.1.1.1
Serial#: 00d8280b0c863f6d4e
Valid From: 20090407135111Z
Valid To: 20190405135111Z
-----------------------------
Subject:CN=TrustedRoot,C=US,ST=MA,L=Hopkinton,EMAIL=<EMAIL>,OU=CSP,O=RSA
Issuer:172.16.58.3
Serial#: 00d8280b0c863f6d4e
Valid From: 20090407135111Z
Valid To: 20190405135111Z
"""
DISK_DATAS = """
Bus 0 Enclosure 0 Disk 0
Vendor Id: HITACHI
Product Id: HUC10906 CLAR600
Product Revision: C430
Type: 193: RAID5 129: RAID5 146: RAID5 151: RAID5
State: Enabled
Hot Spare: N/A
Serial Number: KSJEX35J
Capacity: 549691
Raid Group ID: 0
Drive Type: SAS
Current Speed: 6Gbps
"""
SP_DATAS = """
SP A
Cabinet: DPE9
Signature For The SP: 3600485
Signature For The Peer SP: 3600424
Revision Number For The SP: 05.33.000.5.038
Serial Number For The SP: CF2Z7134700101
Memory Size For The SP: 16384
SP SCSI ID if Available: 0
SP B
Cabinet: DPE9
Signature For The SP: 3600424
Signature For The Peer SP: 3600485
Revision Number For The SP: 05.33.000.5.038
Serial Number For The SP: CF2Z7134700040
Memory Size For The SP: 16384
SP SCSI ID if Available: 0
"""
RESUME_DATAS = """
Storage Processor A
CPU Module
EMC Serial Number: CF2Z7134700101
Assembly Name: JFSP 1.8GHZ 4C CPU GEN3
Storage Processor B
CPU Module
EMC Serial Number: CF2Z7134700040
Assembly Name: JFSP 1.8GHZ 4C CPU GEN3
"""
PORT_DATAS = """
Information about each SPPORT:
SP Name: SP A
SP Port ID: 6
SP UID: 50:06:01:60:88:60:24:1E:50:06:01:66:08:60:24:1E
Link Status: Up
Port Status: Online
Switch Present: YES
Switch UID: 10:00:C4:F5:7C:20:05:80:20:0E:C4:F5:7C:20:05:80
SP Source ID: 1773056
ALPA Value: 0
Speed Value : 8Gbps
Auto Negotiable : YES
Available Speeds:
2Gbps
4Gbps
8Gbps
Auto
Requested Value: Auto
MAC Address: Not Applicable
SFP State: Online
Reads: 510068560
Writes: 331050079
Blocks Read: 1504646456
Blocks Written: 236376118
Queue Full/Busy: 12246
I/O Module Slot: 3
Physical Port ID: 0
"""
BUS_PORT_DATAS = """
Bus 0
Current Speed: 6Gbps.
Available Speeds:
3Gbps.
6Gbps.
SPA SFP State: N/A
SPB SFP State: N/A
I/O Module Slot: Base Module
Physical Port ID: 0
Port Combination In Use: No
SPA Connector State: None
SPB Connector State: None
"""
BUS_PORT_STATE_DATAS = """
Information about each I/O module(s) on SPA:
SP ID: A
I/O Module Slot: Base Module
I/O Module Type: SAS
I/O Module State: Present
I/O Module Substate: Good
I/O Module Power state: On
I/O Carrier: No
Information about each port on this I/O module:
Physical Port ID: 0
Port State: Enabled
Physical Port ID: 1
Port State: Missing
Information about each I/O module(s) on SPB:
SP ID: B
I/O Module Slot: Base Module
I/O Module Type: SAS
I/O Module State: Present
I/O Module Substate: Good
I/O Module Power state: On
I/O Carrier: No
Information about each port on this I/O module:
Physical Port ID: 0
Port State: Enabled
Physical Port ID: 1
Port State: Missing
"""
ISCSI_PORT_DATAS = """
SP: A
Port ID: 4
Port WWN: iqn.1992-04.com.emc:cx.apm00093300877.a4
iSCSI Alias: 0877.a4
IP Address: 172.20.1.140
Subnet Mask: 255.255.255.0
Gateway Address: 172.20.1.1
Initiator Authentication: Not Available
SP: A
Port ID: 5
Port WWN: iqn.1992-04.com.emc:cx.apm00093300877.a5
iSCSI Alias: 0877.a5
SP: A
Port ID: 6
Port WWN: iqn.1992-04.com.emc:cx.apm00093300877.a6
iSCSI Alias: 0877.a6
IP Address: 172.20.2.140
Subnet Mask: 255.255.255.0
Gateway Address: 172.20.2.1
Initiator Authentication: Not Available
SP: A
Port ID: 7
Port WWN: iqn.1992-04.com.emc:cx.apm00093300877.a7
iSCSI Alias: 0877.a7
SP: B
Port ID: 4
Port WWN: iqn.1992-04.com.emc:cx.apm00093300877.b4
iSCSI Alias: 0877.b4
IP Address: 172.20.1.141
Subnet Mask: 255.255.255.0
Gateway Address: 172.20.1.1
Initiator Authentication: Not Available
SP: B
Port ID: 5
Port WWN: iqn.1992-04.com.emc:cx.apm00093300877.b5
iSCSI Alias: 0877.b5
SP: B
Port ID: 6
Port WWN: iqn.1992-04.com.emc:cx.apm00093300877.b6
iSCSI Alias: 0877.b6
IP Address: 172.20.2.141
Subnet Mask: 255.255.255.0
Gateway Address: 172.20.2.1
Initiator Authentication: Not Available
SP: B
Port ID: 7
Port WWN: iqn.1992-04.com.emc:cx.apm00093300877.b7
iSCSI Alias: 0877.b7
SP: B
Port ID: 9
Port WWN: fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:50:06:01:69:3B:24:13:0D
iSCSI Alias: N/A
IP Address: N/A
Subnet Mask: N/A
Gateway Address: N/A
Initiator Authentication: N/A
SP: A
Port ID: 8
Port WWN: fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:50:06:01:60:3B:24:13:0D
iSCSI Alias: N/A
IP Address: N/A
Subnet Mask: N/A
Gateway Address: N/A
Initiator Authentication: N/A
SP: A
Port ID: 9
Port WWN: fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:50:06:01:61:3B:24:13:0D
iSCSI Alias: N/A
IP Address: N/A
Subnet Mask: N/A
Gateway Address: N/A
Initiator Authentication: N/A
SP: B
Port ID: 8
Port WWN: 50:06:01:60:BB:20:13:0D:50:06:01:68:3B:24:13:0D
iSCSI Alias: N/A
IP Address: N/A
Subnet Mask: N/A
Gateway Address: N/A
Initiator Authentication: N/A
"""
IO_PORT_CONFIG_DATAS = """
SP ID : A
I/O Module Slot : 3
I/O Module Type : Fibre Channel
I/O Module State : Present
SP ID : A
I/O Module Slot : Base Module
I/O Module Type : SAS
SP ID : B
I/O Module Slot : Base Module
I/O Module Type : SAS
"""
VIEW_DATAS = """
Storage Group Name: AIX_PowerHA_node2
Storage Group UID: fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b:90:2B:00:60:16:63
HBA/SP Pairs:
HBA UID SP Name SPPort
------- ------- ------
fdf8:f53e:61e4::18:10:00:00:00:C9:76:5E:79 SP A 6
Host name: AIX_21
fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b:10:00:00:00:C9:75:80:4C SP B 3
Host name: AIX_21
HLU/ALU Pairs:
HLU Number ALU Number
---------- ----------
1 335
Shareable: YES
"""
HBA_DATAS = """
Information about each HBA:
HBA UID: fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:10:00:00:00:C9:9B:57:79
Server Name: aix_ma
Server IP Address: 172.16.17.32
HBA Model Description:
HBA Vendor Description:
HBA Device Driver Name: N/A
Information about each port of this HBA:
SP Name: SP A
SP Port ID: 6
HBA Devicename: N/A
Trusted: NO
Logged In: NO
Defined: YES
Initiator Type: 3
StorageGroup Name: None
"""
AGENT_RESULT = {
'agent_rev': '7.33.1 (0.38)',
'name': 'K10',
'desc': '',
'revision': '05.33.000.5.038',
'model': 'VNX5400',
'serial_no': 'CETV00000001'
}
STORAGE_RESULT = {
'name': 'APM00011111111',
'vendor': 'DELL EMC',
'model': 'VNX5400',
'status': 'normal',
'serial_number': 'CETV00000001',
'firmware_version': '05.33.000.5.038',
'total_capacity': 10081183274631,
'raw_capacity': 57639174144,
'used_capacity': 9702168298782,
'free_capacity': 379016049590
}
DOMAIN_RESULT = [
{
'node': 'APM00011111111',
'ip_address': '172.16.31.10',
'master': 'True',
'name': 'CX300I_33_55',
'port': '80',
'secure_port': '443'
}]
POOLS_RESULT = [
{
'name': 'Pool 1',
'storage_id': '12345',
'native_storage_pool_id': '1',
'description': '',
'status': 'offline',
'storage_type': 'block',
'total_capacity': 9216712054407,
'subscribed_capacity': 9105094444318,
'used_capacity': 9105094444318,
'free_capacity': 111618683830
}]
RAID_RESULT = [
{
'raidgroup_id': '0',
'raidgroup_state': 'Valid_luns',
'raw_capacity_blocks': '1688426496',
'logical_capacity_blocks': '1688420352',
'free_capacity_blocks,non-contiguous': '522260480'
}]
ALL_LUN_RESULT = [
{
'logical_unit_number': '186',
'name': 'LN_10G_01',
'raidgroup_id': '1',
'state': 'Bound',
'lun_capacitymegabytes': '10240',
'is_thin_lun': 'YES'
}]
POOLS_ANALYSE_RESULT = [{
'pool_name': 'Pool 1',
'pool_id': '1',
'description': '',
'state': 'Offline',
'status': 'Storage Pool requires recovery. service provider(0x712d8518)',
'user_capacity_gbs': '8583.732',
'consumed_capacity_gbs': '8479.780',
'available_capacity_gbs': '103.953',
'total_subscribed_capacity_gbs': '8479.780'
}]
VOLUMES_RESULT = [
{
'name': 'sun_data_VNX_2',
'storage_id': '12345',
'status': 'normal',
'native_volume_id': '239',
'native_storage_pool_id': '',
'type': 'thin',
'total_capacity': 9663676416,
'used_capacity': 1882269417,
'free_capacity': 7781406998,
'compressed': False,
'wwn': None
}]
ALERTS_RESULT = [
{
'alert_id': '0x76cc',
'alert_name': 'Navisphere Agent, version 7.33',
'severity': 'Critical',
'category': 'Fault',
'type': 'EquipmentAlarm',
'occur_time': 1585114217000,
'description': 'Navisphere Agent, version 7.33',
'resource_type': 'Storage',
'match_key': '<KEY>'
}]
ALERT_RESULT = {
'alert_id': '0x761f',
'alert_name': 'Unisphere can no longer manage',
'severity': 'Critical',
'category': 'Fault',
'type': 'EquipmentAlarm',
'occur_time': 1614310456716,
'description': 'Unisphere can no longer manage',
'resource_type': 'Storage',
'match_key': '8e97fe0af779d78bad8f2de52e15c65c'
}
DISK_RESULT = [
{
'name': 'Bus 0 Enclosure 0 Disk 0',
'storage_id': '12345',
'native_disk_id': 'Bus0Enclosure0Disk0',
'serial_number': 'KSJEX35J',
'manufacturer': 'HITACHI',
'model': 'HUC10906 CLAR600',
'firmware': 'C430',
'speed': None,
'capacity': 576392790016,
'status': 'normal',
'physical_type': 'sas',
'logical_type': 'unknown',
'health_score': None,
'native_disk_group_id': None,
'location': 'Bus 0 Enclosure 0 Disk 0'
}]
SP_RESULT = [
{
'name': 'SP A',
'storage_id': '12345',
'native_controller_id': '3600485',
'status': 'normal',
'location': None,
'soft_version': '05.33.000.5.038',
'cpu_info': 'JFSP 1.8GHZ 4C CPU GEN3',
'memory_size': '17179869184'
},
{
'name': 'SP B',
'storage_id': '12345',
'native_controller_id': '3600424',
'status': None,
'location': None,
'soft_version': '05.33.000.5.038',
'cpu_info': 'JFSP 1.8GHZ 4C CPU GEN3',
'memory_size': '16777216'
}]
PORT_RESULT = [
{
'name': 'A-6',
'storage_id': '12345',
'native_port_id': 'A-6',
'location': 'Slot A3,Port 0',
'connection_status': 'connected',
'health_status': 'normal',
'type': 'fc',
'logical_type': None,
'speed': 8000000000,
'max_speed': 8000000000,
'native_parent_id': None,
'wwn': '50:06:01:60:88:60:24:1E:50:06:01:66:08:60:24:1E',
'mac_address': None,
'ipv4': '172.20.2.140',
'ipv4_mask': '255.255.255.0',
'ipv6': None,
'ipv6_mask': None
}]
VIEW_RESULT = [
{
'native_masking_view_id': 'fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b:90:2B:00:'
'60:16:63_AIX_21_335',
'name': 'AIX_PowerHA_node2',
'storage_id': '12345',
'native_storage_host_id': 'AIX_21',
'native_volume_id': '335'
}]
INITIATOR_RESULT = [
{
'name': 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:10:00:00:00:C9:9B:57:79',
'storage_id': '12345',
'native_storage_host_initiator_id': '20:00:00:00:C9:9B:57:79:10:'
'00:00:00:C9:9B:57:79',
'wwn': 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:10:00:00:00:C9:9B:57:79',
'type': 'fc',
'status': 'online',
'native_storage_host_id': 'aix_ma'
}]
HOST_RESULT = [
{
'name': 'aix_ma',
'storage_id': '12345',
'native_storage_host_id': 'aix_ma',
'os_type': 'Unknown',
'status': 'normal',
'ip_address': '172.16.17.32'
}]
def create_driver():
NaviHandler.login = mock.Mock(return_value={"05.33.000.5.038_test"})
return VnxBlockStorDriver(**ACCESS_INFO)
class TestVnxBlocktorageDriver(TestCase):
driver = create_driver()
def test_init(self):
NaviHandler.login = mock.Mock(return_value="05.33.000.5.038_test")
vnx = VnxBlockStorDriver(**ACCESS_INFO)
self.assertEqual(vnx.version, "05.33.000.5.038_test")
def test_get_storage(self):
NaviClient.exec = mock.Mock(
side_effect=[DOMAIN_INFOS, AGENT_INFOS, DISK_INFOS, POOL_INFOS,
RAID_INFOS])
storage = self.driver.get_storage(context)
self.assertDictEqual(storage, STORAGE_RESULT)
def test_get_pools(self):
NaviClient.exec = mock.Mock(side_effect=[POOL_INFOS, RAID_INFOS])
pools = self.driver.list_storage_pools(context)
self.assertDictEqual(pools[0], POOLS_RESULT[0])
def test_get_volumes(self):
NaviClient.exec = mock.Mock(
side_effect=[LUN_INFOS, POOL_INFOS, GET_ALL_LUN_INFOS])
volumes = self.driver.list_volumes(context)
self.assertDictEqual(volumes[0], VOLUMES_RESULT[0])
def test_get_alerts(self):
with self.assertRaises(Exception) as exc:
self.driver.list_alerts(context, None)
self.assertIn('Driver API list_alerts() is not Implemented',
str(exc.exception))
def test_parse_alert(self):
alert = {
'1.3.6.1.6.3.1.1.4.1.0': '1.3.6.1.4.1.1981.0.6',
'1.3.6.1.4.1.1981.1.4.3': 'A-CETV00000001',
'1.3.6.1.4.1.1981.1.4.4': 'K10',
'1.3.6.1.4.1.1981.1.4.5': '761f',
'1.3.6.1.4.1.1981.1.4.6': 'Unisphere can no longer manage',
'1.3.6.1.4.1.1981.1.4.7': 'VNX5400'
}
alert = self.driver.parse_alert(context, alert)
ALERT_RESULT['occur_time'] = alert['occur_time']
self.assertDictEqual(alert, ALERT_RESULT)
def test_cli_res_to_dict(self):
navi_handler = NaviHandler(**ACCESS_INFO)
agent_re = navi_handler.cli_res_to_dict(AGENT_INFOS)
self.assertDictEqual(agent_re, AGENT_RESULT)
def test_cli_res_to_list(self):
navi_handler = NaviHandler(**ACCESS_INFO)
re_list = navi_handler.cli_res_to_list(POOL_INFOS)
self.assertDictEqual(re_list[0], POOLS_ANALYSE_RESULT[0])
def test_cli_domain_to_dict(self):
navi_handler = NaviHandler(**ACCESS_INFO)
re_list = navi_handler.cli_domain_to_dict(DOMAIN_INFOS)
self.assertDictEqual(re_list[0], DOMAIN_RESULT[0])
def test_cli_lun_to_list(self):
navi_handler = NaviHandler(**ACCESS_INFO)
re_list = navi_handler.cli_lun_to_list(GET_ALL_LUN_INFOS)
self.assertDictEqual(re_list[0], ALL_LUN_RESULT[0])
@mock.patch.object(NaviClient, 'exec')
def test_init_cli(self, mock_exec):
mock_exec.return_value = 'test'
navi_handler = NaviHandler(**ACCESS_INFO)
re = navi_handler.navi_exe('abc')
self.assertEqual(re, 'test')
self.assertEqual(mock_exec.call_count, 1)
@mock.patch.object(NaviClient, 'exec')
def test_remove_cer(self, mock_exec):
navi_handler = NaviHandler(**ACCESS_INFO)
navi_handler.remove_cer()
self.assertEqual(mock_exec.call_count, 1)
def test_err_cli_res_to_dict(self):
with self.assertRaises(Exception) as exc:
navi_handler = NaviHandler(**ACCESS_INFO)
navi_handler.cli_res_to_dict({})
self.assertIn('arrange resource info error', str(exc.exception))
def test_err_cli_res_to_list(self):
with self.assertRaises(Exception) as exc:
navi_handler = NaviHandler(**ACCESS_INFO)
navi_handler.cli_res_to_list({})
self.assertIn('cli resource to list error', str(exc.exception))
@mock.patch.object(time, 'mktime')
def test_time_str_to_timestamp(self, mock_mktime):
tools = Tools()
time_str = '03/26/2021 14:25:36'
mock_mktime.return_value = 1616739936
re = tools.time_str_to_timestamp(time_str, consts.TIME_PATTERN)
self.assertEqual(1616739936000, re)
@mock.patch.object(time, 'strftime')
def test_timestamp_to_time_str(self, mock_strftime):
tools = Tools()
mock_strftime.return_value = '03/26/2021 14:25:36'
timestamp = 1616739936000
re = tools.timestamp_to_time_str(timestamp, consts.TIME_PATTERN)
self.assertEqual('03/26/2021 14:25:36', re)
def test_cli_exec(self):
with self.assertRaises(Exception) as exc:
command_str = 'abc'
NaviClient.exec(command_str)
self.assertIn('Component naviseccli could not be found',
str(exc.exception))
def test_analyse_cer(self):
re_map = {
'1.1.1.1': {
'subject': 'CN=TrustedRoot,C=US,ST=MA,L=Hopkinton,'
'EMAIL=<EMAIL>,OU=CSP,O=RSA',
'issuer': '1.1.1.1',
'serial#': '00d8280b0c863f6d4e',
'valid_from': '20090407135111Z',
'valid_to': '20190405135111Z'
}
}
navi_handler = NaviHandler(**ACCESS_INFO)
cer_map = navi_handler.analyse_cer(CER_INFOS, host_ip='1.1.1.1')
self.assertDictEqual(cer_map, re_map)
def test_analyse_cer_exception(self):
with self.assertRaises(Exception) as exc:
navi_handler = NaviHandler(**ACCESS_INFO)
navi_handler.analyse_cer(CER_INFOS)
self.assertIn('arrange cer info error', str(exc.exception))
def test_get_resources_info_exception(self):
with self.assertRaises(Exception) as exc:
NaviClient.exec = mock.Mock(side_effect=[LUN_INFOS])
navi_handler = NaviHandler(**ACCESS_INFO)
navi_handler.get_resources_info('abc', None)
self.assertIn('object is not callable', str(exc.exception))
def test_parse_alert_exception(self):
with self.assertRaises(Exception) as exc:
AlertHandler.parse_alert(None)
self.assertIn('The results are invalid', str(exc.exception))
def test_clear_alert(self):
self.driver.clear_alert(None, None)
def test_remove_trap_config(self):
self.driver.remove_trap_config(None, None)
def test_get_disks(self):
NaviClient.exec = mock.Mock(return_value=DISK_DATAS)
disks = self.driver.list_disks(context)
self.assertDictEqual(disks[0], DISK_RESULT[0])
def test_get_controllers(self):
NaviClient.exec = mock.Mock(side_effect=[SP_DATAS, RESUME_DATAS])
controllers = self.driver.list_controllers(context)
self.assertDictEqual(controllers[0], SP_RESULT[0])
def test_get_ports(self):
NaviClient.exec = mock.Mock(
side_effect=[IO_PORT_CONFIG_DATAS, ISCSI_PORT_DATAS, PORT_DATAS,
BUS_PORT_DATAS, BUS_PORT_STATE_DATAS])
ports = self.driver.list_ports(context)
self.assertDictEqual(ports[0], PORT_RESULT[0])
def test_get_masking_views(self):
NaviClient.exec = mock.Mock(side_effect=[VIEW_DATAS])
views = self.driver.list_masking_views(context)
self.assertDictEqual(views[0], VIEW_RESULT[0])
def test_get_initiators(self):
NaviClient.exec = mock.Mock(side_effect=[HBA_DATAS,
IO_PORT_CONFIG_DATAS,
ISCSI_PORT_DATAS, PORT_DATAS,
BUS_PORT_DATAS,
BUS_PORT_STATE_DATAS])
initiators = self.driver.list_storage_host_initiators(context)
self.assertDictEqual(initiators[0], INITIATOR_RESULT[0])
def test_get_hosts(self):
NaviClient.exec = mock.Mock(side_effect=[HBA_DATAS])
hosts = self.driver.list_storage_hosts(context)
self.assertDictEqual(hosts[0], HOST_RESULT[0])
```
#### File: hpe/hpe_3par/test_hpe_3parstor.py
```python
import sys
from unittest import TestCase, mock
import paramiko
from delfin.common import constants
sys.modules['delfin.cryptor'] = mock.Mock()
from delfin import exception
from delfin import context
from delfin.drivers.hpe.hpe_3par.hpe_3parstor import Hpe3parStorDriver
from delfin.drivers.hpe.hpe_3par.alert_handler import AlertHandler
from delfin.drivers.hpe.hpe_3par.rest_handler import RestHandler
from delfin.drivers.hpe.hpe_3par.ssh_handler import SSHHandler
from delfin.drivers.utils.rest_client import RestClient
from delfin.drivers.utils.ssh_client import SSHPool
from requests import Session
class Request:
def __init__(self):
self.environ = {'delfin.context': context.RequestContext()}
pass
ACCESS_INFO = {
"storage_id": "12345",
"vendor": "hpe",
"model": "3par",
"rest": {
"host": "10.0.0.1",
"port": 8443,
"username": "user",
"password": "<PASSWORD>="
},
"ssh": {
"host": "192.168.3.11",
"port": 22,
"username": "user",
"password": "<PASSWORD>="
}
}
NODE_DATAS = """
Control Data Cache
Node --Name--- -State-- Master IC SLED LED Mem(MB) Mem(MB) Available(%)
0 1307327-0 Degraded Yes Yes unknown AmberBlnk 4096 6144 0
1 1307327-1 Degraded No Yes unknown AmberBlnk 4096 6144 0
"""
NODE_CPU_DATAS = """
----------------------------CPUs----------------------------
Node CPU -Manufacturer- -Serial- CPUSpeed(MHz) BusSpeed(MHz)
0 0 GenuineIntel -- 2327 1334.57
0 1 GenuineIntel -- 2327 1334.57
0 2 GenuineIntel -- 2327 1334.57
0 3 GenuineIntel -- 2327 1334.57
1 0 GenuineIntel -- 2327 1332.19
1 1 GenuineIntel -- 2327 1332.19
1 2 GenuineIntel -- 2327 1332.19
1 3 GenuineIntel -- 2327 1332.19
"""
NODE_VERSION = """
Node: 0
--------
System serial: 1000183
BIOS version: 4.8.34
OS version: 192.168.3.11
Reset reason: Unknown
Node: 1
--------
BIOS version: 4.8.34
OS version: 192.168.3.11
Reset reason: Unknown
"""
DISK_DATAS = """
---Size(MB)--- ----Ports----
Id CagePos Type RPM State Total Free A B Cap(GB)
0 0:14:0 FC 15 degraded 571904 83968 0:2:2* ----- 600
1 0:1:0 FC 15 degraded 571904 62720 0:2:2* ----- 600
-----------------------------------------------------------------
16 total 9150464 912896
"""
DISK_I_DATAS = """
Id CagePos State Node_WWN MFR Model Serial FW_Rev Protocol MediaType AdminTime
0 0:14:0 degraded WWN11 MFR111 Model11 Serial111 FW_Rev111 Pl MT1 600
1 0:1:0 degraded WWN22 MFR2222 Model22 Serial222 FW_Rev222 P2 MT2 600
"""
PORT_DATAS = """
N:S:P Mode State -Node_WWN- -Port_WWN/HW_Addr- Type Protocol Label Ptner FState
0:0:1 target ready 2FF70002AC001C9F 20010002AC001C9F host FC - 1:0:1 none
0:0:2 target loss_sync 2FF70002AC001C9F 20020002AC001C9F free FC - - -
0:2:2 target loss_sync 2FF70002AC001C9F 20020002AC001C9F free FC - - -
0:6:1 target loss_sync 2FF70002AC001C9F 20020002AC001C9F free FC - - -
--------------------------------------------------------------------------
18
"""
PORT_I_DATAS = """
N:S:P Brand Model Rev Firmware Serial HWType
0:0:1 LSI 9205-8e 01 17.11.00.00 SP12430085 SAS
0:0:2 LSI 9205-8e 01 17.11.00.00 SP12430085 FC
0:1:1 QLOGIC QLE2672 02 8.1.1 RFE1228G50820 FC
0:1:2 QLOGIC QLE2672 02 8.1.1 RFE1228G50820 FC
0:2:1 QLOGIC QLE8242 58 4.15.2 PCGLTX0RC1G3PX CNA
"""
PORT_PER_DATAS = """
N:S:P Connmode ConnType CfgRate MaxRate Class2 UniqNodeWwn VCN Il TMWO SSAN
0:0:1 disk point 6Gbps 6Gbps n/a n/a n/a enabled n/a n/a
0:0:2 disk point 6Gbps 6Gbps n/a n/a n/a enabled n/a n/a
0:1:1 host point auto 16Gbps disabled disabled disabled enabled disabled n/a
0:1:2 host point auto 16Gbps disabled disabled disabled enabled disabled n/a
"""
PORT_ISCSI_DATAS = """
N:S:P State IPAddr Netmask/PrefixLen Gateway TPGT MTU Rate iAddr iPort ST VLAN
0:2:1 ready fc00:db20:35b:7399::5 64 :: 21 1500 10Gbps :: 3205 21 Y
0:2:2 ready 10.99.1.3 255.255.255.0 0.0.0.0 22 1500 10Gbps 0.0.0.0 3205 22 Y
"""
PORT_RCIP_DATAS = """
N:S:P State ---HwAddr--- IPAddr Netmask Gateway MTU Rate Duplex AutoNeg
0:6:1 loss_sync 0002AC684AAD 10.11.35.10 255.255.0.0 10.11.0.1 900 n/a n/a n/a
1:6:1 offline 0002AC6A3A0F - - - - n/a n/a n/a
-----------------------------------------------------------------------------
2
"""
PORT_C_DATAS = """
N:S:P Mode Device Pos Config Topology Rate Cls Mode_change
0:0:1 target RedHat_196 0 valid fabric 8Gbps 3 allowed
RedHat_196 0 valid fabric 8Gbps 3 allowe
0:0:2 target Dorado5000V3_F1 0 valid fabric 8Gbps 3 allowed
Dorado5000V3_F1 0 valid fabric 8Gbps 3 allowed
--------------------------------------------------------------------------
108
"""
POOL_DATAS = ret = {
"total": 12,
"members": [
{
"id": 0,
"uuid": "aa43f218-d3dd-4626-948f-8a160b0eac1d",
"name": "Lcltest333",
"numFPVVs": 21,
"numTPVVs": 25,
"UsrUsage": {
"totalMiB": 1381504,
"rawTotalMiB": 1842004,
"usedMiB": 1376128,
"rawUsedMiB": 712703
},
"SAUsage": {
"totalMiB": 140800,
"rawTotalMiB": 422400,
"usedMiB": 5120,
"rawUsedMiB": 15360
},
"SDUsage": {
"totalMiB": 388736,
"rawTotalMiB": 518315,
"usedMiB": 0,
"rawUsedMiB": 0
},
"SAGrowth": {
"incrementMiB": 8192,
"LDLayout": {
"HA": 3,
"diskPatterns": [
{
"diskType": 1
}
]
}
},
"SDGrowth": {
"incrementMiB": 32768,
"LDLayout": {
"RAIDType": 3,
"HA": 3,
"setSize": 4,
"chunkletPosPref": 1,
"diskPatterns": [
{
"diskType": 1
}
]
}
},
"state": 1,
"failedStates": [],
"degradedStates": [],
"additionalStates": []
},
{
"id": 1,
"uuid": "c392910e-7648-4972-b594-47dd3d28f3ec",
"name": "cpg_Migration1",
"numFPVVs": 14,
"numTPVVs": 319,
"UsrUsage": {
"totalMiB": 1418752,
"rawTotalMiB": 1702500,
"usedMiB": 1417984,
"rawUsedMiB": 568934
},
"SAUsage": {
"totalMiB": 56832,
"rawTotalMiB": 170496,
"usedMiB": 42752,
"rawUsedMiB": 128256
},
"SDUsage": {
"totalMiB": 187648,
"rawTotalMiB": 225179,
"usedMiB": 157184,
"rawUsedMiB": 188620
},
"SAGrowth": {
"incrementMiB": 8192,
"LDLayout": {
"HA": 3,
"diskPatterns": [
{
"diskType": 1
}
]
}
},
"SDGrowth": {
"incrementMiB": 32768,
"LDLayout": {
"RAIDType": 3,
"HA": 3,
"setSize": 6,
"chunkletPosPref": 1,
"diskPatterns": [
{
"diskType": 1
}
]
}
},
"state": 1,
"failedStates": [],
"degradedStates": [],
"additionalStates": []
},
{
"id": 2,
"uuid": "c392910e-7648-4972-b594-47dd3d28f3ec",
"name": "cpg_Oracle",
"numFPVVs": 14,
"numTPVVs": 319,
"UsrUsage": {
"totalMiB": 1418752,
"rawTotalMiB": 1702500,
"usedMiB": 1417984,
"rawUsedMiB": 568934
},
"SAUsage": {
"totalMiB": 56832,
"rawTotalMiB": 170496,
"usedMiB": 42752,
"rawUsedMiB": 128256
},
"SDUsage": {
"totalMiB": 187648,
"rawTotalMiB": 225179,
"usedMiB": 157184,
"rawUsedMiB": 188620
},
"SAGrowth": {
"incrementMiB": 8192,
"LDLayout": {
"HA": 3,
"diskPatterns": [
{
"diskType": 1
}
]
}
},
"SDGrowth": {
"incrementMiB": 32768,
"LDLayout": {
"RAIDType": 3,
"HA": 3,
"setSize": 6,
"chunkletPosPref": 1,
"diskPatterns": [
{
"diskType": 1
}
]
}
},
"state": 1,
"failedStates": [],
"degradedStates": [],
"additionalStates": []
},
{
"id": 3,
"uuid": "c392910e-7648-4972-b594-47dd3d28f3ec",
"name": "cpg_filesystem",
"numFPVVs": 14,
"numTPVVs": 319,
"UsrUsage": {
"totalMiB": 1418752,
"rawTotalMiB": 1702500,
"usedMiB": 1417984,
"rawUsedMiB": 568934
},
"SAUsage": {
"totalMiB": 56832,
"rawTotalMiB": 170496,
"usedMiB": 42752,
"rawUsedMiB": 128256
},
"SDUsage": {
"totalMiB": 187648,
"rawTotalMiB": 225179,
"usedMiB": 157184,
"rawUsedMiB": 188620
},
"SAGrowth": {
"incrementMiB": 8192,
"LDLayout": {
"HA": 3,
"diskPatterns": [
{
"diskType": 1
}
]
}
},
"SDGrowth": {
"incrementMiB": 32768,
"LDLayout": {
"RAIDType": 3,
"HA": 3,
"setSize": 6,
"chunkletPosPref": 1,
"diskPatterns": [
{
"diskType": 1
}
]
}
},
"state": 1,
"failedStates": [],
"degradedStates": [],
"additionalStates": []
},
{
"id": 4,
"uuid": "c392910e-7648-4972-b594-47dd3d28f3ec",
"name": "cpg_test",
"numFPVVs": 14,
"numTPVVs": 319,
"UsrUsage": {
"totalMiB": 1418752,
"rawTotalMiB": 1702500,
"usedMiB": 1417984,
"rawUsedMiB": 568934
},
"SAUsage": {
"totalMiB": 56832,
"rawTotalMiB": 170496,
"usedMiB": 42752,
"rawUsedMiB": 128256
},
"SDUsage": {
"totalMiB": 187648,
"rawTotalMiB": 225179,
"usedMiB": 157184,
"rawUsedMiB": 188620
},
"SAGrowth": {
"incrementMiB": 8192,
"LDLayout": {
"HA": 3,
"diskPatterns": [
{
"diskType": 1
}
]
}
},
"SDGrowth": {
"incrementMiB": 32768,
"LDLayout": {
"RAIDType": 3,
"HA": 3,
"setSize": 6,
"chunkletPosPref": 1,
"diskPatterns": [
{
"diskType": 1
}
]
}
},
"state": 1,
"failedStates": [],
"degradedStates": [],
"additionalStates": []
},
{
"id": 5,
"uuid": "c392910e-7648-4972-b594-47dd3d28f3ec",
"name": "fs_cpg",
"numFPVVs": 14,
"numTPVVs": 319,
"UsrUsage": {
"totalMiB": 1418752,
"rawTotalMiB": 1702500,
"usedMiB": 1417984,
"rawUsedMiB": 568934
},
"SAUsage": {
"totalMiB": 56832,
"rawTotalMiB": 170496,
"usedMiB": 42752,
"rawUsedMiB": 128256
},
"SDUsage": {
"totalMiB": 187648,
"rawTotalMiB": 225179,
"usedMiB": 157184,
"rawUsedMiB": 188620
},
"SAGrowth": {
"incrementMiB": 8192,
"LDLayout": {
"HA": 3,
"diskPatterns": [
{
"diskType": 1
}
]
}
},
"SDGrowth": {
"incrementMiB": 32768,
"LDLayout": {
"RAIDType": 3,
"HA": 3,
"setSize": 6,
"chunkletPosPref": 1,
"diskPatterns": [
{
"diskType": 1
}
]
}
},
"state": 1,
"failedStates": [],
"degradedStates": [],
"additionalStates": []
},
{
"id": 6,
"uuid": "c392910e-7648-4972-b594-47dd3d28f3ec",
"name": "ljn2",
"numFPVVs": 14,
"numTPVVs": 319,
"UsrUsage": {
"totalMiB": 1418752,
"rawTotalMiB": 1702500,
"usedMiB": 1417984,
"rawUsedMiB": 568934
},
"SAUsage": {
"totalMiB": 56832,
"rawTotalMiB": 170496,
"usedMiB": 42752,
"rawUsedMiB": 128256
},
"SDUsage": {
"totalMiB": 187648,
"rawTotalMiB": 225179,
"usedMiB": 157184,
"rawUsedMiB": 188620
},
"SAGrowth": {
"incrementMiB": 8192,
"LDLayout": {
"HA": 3,
"diskPatterns": [
{
"diskType": 1
}
]
}
},
"SDGrowth": {
"incrementMiB": 32768,
"LDLayout": {
"RAIDType": 3,
"HA": 3,
"setSize": 6,
"chunkletPosPref": 1,
"diskPatterns": [
{
"diskType": 1
}
]
}
},
"state": 1,
"failedStates": [],
"degradedStates": [],
"additionalStates": []
},
{
"id": 7,
"uuid": "c392910e-7648-4972-b594-47dd3d28f3ec",
"name": "ljn4_xiuGai",
"numFPVVs": 14,
"numTPVVs": 319,
"UsrUsage": {
"totalMiB": 1418752,
"rawTotalMiB": 1702500,
"usedMiB": 1417984,
"rawUsedMiB": 568934
},
"SAUsage": {
"totalMiB": 56832,
"rawTotalMiB": 170496,
"usedMiB": 42752,
"rawUsedMiB": 128256
},
"SDUsage": {
"totalMiB": 187648,
"rawTotalMiB": 225179,
"usedMiB": 157184,
"rawUsedMiB": 188620
},
"SAGrowth": {
"incrementMiB": 8192,
"LDLayout": {
"HA": 3,
"diskPatterns": [
{
"diskType": 1
}
]
}
},
"SDGrowth": {
"incrementMiB": 32768,
"LDLayout": {
"RAIDType": 3,
"HA": 3,
"setSize": 6,
"chunkletPosPref": 1,
"diskPatterns": [
{
"diskType": 1
}
]
}
},
"state": 1,
"failedStates": [],
"degradedStates": [],
"additionalStates": []
},
{
"id": 8,
"uuid": "c392910e-7648-4972-b594-47dd3d28f3ec",
"name": "ljn_330",
"numFPVVs": 14,
"numTPVVs": 319,
"UsrUsage": {
"totalMiB": 1418752,
"rawTotalMiB": 1702500,
"usedMiB": 1417984,
"rawUsedMiB": 568934
},
"SAUsage": {
"totalMiB": 56832,
"rawTotalMiB": 170496,
"usedMiB": 42752,
"rawUsedMiB": 128256
},
"SDUsage": {
"totalMiB": 187648,
"rawTotalMiB": 225179,
"usedMiB": 157184,
"rawUsedMiB": 188620
},
"SAGrowth": {
"incrementMiB": 8192,
"LDLayout": {
"HA": 3,
"diskPatterns": [
{
"diskType": 1
}
]
}
},
"SDGrowth": {
"incrementMiB": 32768,
"LDLayout": {
"RAIDType": 3,
"HA": 3,
"setSize": 6,
"chunkletPosPref": 1,
"diskPatterns": [
{
"diskType": 1
}
]
}
},
"state": 1,
"failedStates": [],
"degradedStates": [],
"additionalStates": []
},
{
"id": 9,
"uuid": "c392910e-7648-4972-b594-47dd3d28f3ec",
"name": "xulin_cpg1",
"numFPVVs": 14,
"numTPVVs": 319,
"UsrUsage": {
"totalMiB": 1418752,
"rawTotalMiB": 1702500,
"usedMiB": 1417984,
"rawUsedMiB": 568934
},
"SAUsage": {
"totalMiB": 56832,
"rawTotalMiB": 170496,
"usedMiB": 42752,
"rawUsedMiB": 128256
},
"SDUsage": {
"totalMiB": 187648,
"rawTotalMiB": 225179,
"usedMiB": 157184,
"rawUsedMiB": 188620
},
"SAGrowth": {
"incrementMiB": 8192,
"LDLayout": {
"HA": 3,
"diskPatterns": [
{
"diskType": 1
}
]
}
},
"SDGrowth": {
"incrementMiB": 32768,
"LDLayout": {
"RAIDType": 3,
"HA": 3,
"setSize": 6,
"chunkletPosPref": 1,
"diskPatterns": [
{
"diskType": 1
}
]
}
},
"state": 1,
"failedStates": [],
"degradedStates": [],
"additionalStates": []
},
{
"id": 10,
"uuid": "c392910e-7648-4972-b594-47dd3d28f3ec",
"name": "zyz",
"numFPVVs": 14,
"numTPVVs": 319,
"UsrUsage": {
"totalMiB": 1418752,
"rawTotalMiB": 1702500,
"usedMiB": 1417984,
"rawUsedMiB": 568934
},
"SAUsage": {
"totalMiB": 56832,
"rawTotalMiB": 170496,
"usedMiB": 42752,
"rawUsedMiB": 128256
},
"SDUsage": {
"totalMiB": 187648,
"rawTotalMiB": 225179,
"usedMiB": 157184,
"rawUsedMiB": 188620
},
"SAGrowth": {
"incrementMiB": 8192,
"LDLayout": {
"HA": 3,
"diskPatterns": [
{
"diskType": 1
}
]
}
},
"SDGrowth": {
"incrementMiB": 32768,
"LDLayout": {
"RAIDType": 3,
"HA": 3,
"setSize": 6,
"chunkletPosPref": 1,
"diskPatterns": [
{
"diskType": 1
}
]
}
},
"state": 1,
"failedStates": [],
"degradedStates": [],
"additionalStates": []
},
{
"id": 11,
"uuid": "c392910e-7648-4972-b594-47dd3d28f3ec",
"name": "22",
"numFPVVs": 14,
"numTPVVs": 319,
"UsrUsage": {
"totalMiB": 1418752,
"rawTotalMiB": 1702500,
"usedMiB": 1417984,
"rawUsedMiB": 568934
},
"SAUsage": {
"totalMiB": 56832,
"rawTotalMiB": 170496,
"usedMiB": 42752,
"rawUsedMiB": 128256
},
"SDUsage": {
"totalMiB": 187648,
"rawTotalMiB": 225179,
"usedMiB": 157184,
"rawUsedMiB": 188620
},
"SAGrowth": {
"incrementMiB": 8192,
"LDLayout": {
"HA": 3,
"diskPatterns": [
{
"diskType": 1
}
]
}
},
"SDGrowth": {
"incrementMiB": 32768,
"LDLayout": {
"RAIDType": 3,
"HA": 3,
"setSize": 6,
"chunkletPosPref": 1,
"diskPatterns": [
{
"diskType": 1
}
]
}
},
"state": 1,
"failedStates": [],
"degradedStates": [],
"additionalStates": []
}
]
}
POOL_METRICS_DATAS = {
"sampleTime": "2020-03-01T03:50:00+08:00",
"sampleTimeSec": 1583005800,
"total": 2,
"members": [
{
"name": "22",
"IO": {
"read": 0,
"write": 0,
"total": 10
},
"KBytes": {
"read": 0,
"write": 0,
"total": 0
},
"serviceTimeMS": {
"read": 0,
"write": 0,
"total": 0
},
"IOSizeKB": {
"read": 0,
"write": 0,
"total": 0
},
"queueLength": 0,
"busyPct": 0
},
{
"name": "Lcltest333",
"IO": {
"read": 0,
"write": 0,
"total": 20
},
"KBytes": {
"read": 0,
"write": 0,
"total": 0
},
"serviceTimeMS": {
"read": 0,
"write": 0,
"total": 0
},
"IOSizeKB": {
"read": 0,
"write": 0,
"total": 0
},
"queueLength": 0,
"busyPct": 0
}
]
}
PORT_METRICS_DATAS = """
Time: 2021-07-14 14:10:00 CST (1626243000)
----IO/s----- ---KBytes/s---- ----Svct ms----- -IOSz KBytes-
PORT_N PORT_S PORT_P Rd Wr Tot Rd Wr Tot Rd Wr Tot Rd Wr Tot QLen AvgBusy%
0 0 1 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0.00 0.0 0.0 0.0 0 0.0
0 1 1 0.0 14.3 14.3 0.0 86.4 86.4 0.00 11.52 11.52 0.0 6.1 6.1 1 11.9
----------------------------------------------------------------------------
2 7.6 31.4 39.0 0.6 192.0 192.6 0.00 12.34 9.93 0.1 6.2 5.0 1 3.0
"""
DISK_METRICS_DATAS = """
Time: 2021-07-14 15:35:00 CST (1626248100)
----IO/s----- ---KBytes/s---- ----Svct ms----- -IOSz KBytes-
PDID Rd Wr Tot Rd Wr Tot Rd Wr Tot Rd Wr Tot QLen AvgBusy%
0 0.0 0.5 0.5 0.0 4.9 4.9 0.00 3.04 3.04 0.0 10.0 10.0 0 0.1
1 0.0 1.6 1.6 0.0 10.2 10.2 0.00 0.89 0.89 0.0 6.3 6.3 0 0.1
-------------------------------------------------------------------------------
2 0.0 31.4 31.4 0.0 191.4 191.4 0.00 11.98 11.98 0.0 6.2 6.2 0 1.5
"""
VOLUME_METRICS_DATAS = """
Time: 2021-07-14 14:10:00 CST (1626243000)
----IO/s----- ---KBytes/s---- ----Svct ms----- -IOSz KBytes-
VVID VV_NAME Rd Wr Tot Rd Wr Tot Rd Wr Tot Rd Wr Tot QLen AvgBusy%
0 srdata 0.0 1.0 2.0 3.0 11.0 22.0 33.00 111.00 222.00 333.0 0.0 0.0 0 0.0
1 admin 0.0 14.3 14.3 0.0 86.4 86.4 0.00 11.52 11.52 0.0 6.1 6.1 1 11.9
----------------------------------------------------------------------------
2 7.6 31.4 39.0 0.6 192.0 192.6 0.00 12.34 9.93 0.1 6.2 5.0 1 3.0
"""
HOST_GROUP_DATAS = """
Id Name Members Comment
194 HostSet_VMware Host_ESXi6.5_125 --
229 HostSet_Suse11_Oracle Host_Suse11_172.16.17.32 --
257 HostGroup_ESX6.0 ESX6.0_192.168.127.12 --
ESX6.0_172.16.58.3
264 HostSet_Win2016_WSFC RH2288V5_Win2016_node2 --
RH2288V5_Win2016_node1
266 HostSet_Win2012_WSFC RH2285_Win2012_wsfc1 --
Rh2285_Win2012_wsfc2
268 HostSet_AIX Host_AIX_172.16.58.3 --
270 HostSet_Suse11 Host_Suse11_172.16.31.10 --
274 Suse11sp4_150 litng138.150 --
-----------------------------------------------------------
32 total 28
"""
HOST_ID_DATAS = """
Id Name Persona -WWN/iSCSI_Name- Port IP_addr
175 Host_ESXi6.5_125 Generic 2408244427906812 --- n/a
54 Doradov3_lm Generic 2418244427906812 --- n/a
57 AIX_wenbin AIX-legacy 10000000C9E74BCC --- n/a
65 SKY-ESXI60 Generic 2100001B321BE0FF --- n/a
65 SKY-ESXI60 Generic 2101001B323BE0FF --- n/a
67 zouming Generic 2012E4A8B6B0A1CC --- n/a
67 zouming Generic 2002E4A8B6B0A1CC --- n/a
68 powerpath Generic 21000024FF36D406 --- n/a
68 powerpath Generic 21000024FF36D407 --- n/a
69 power_v3 Generic 20809CE37435D845 --- n/a
69 power_v3 Generic 20909CE37435D845 --- n/a
89 vplex_meta_important Generic 5000144280292012 0:1:2 n/a
89 vplex_meta_important Generic 5000144280292010 0:1:2 n/a
89 vplex_meta_important Generic 5000144290292012 1:1:2 n/a
89 vplex_meta_important Generic 500014429029E910 1:1:2 n/a
89 vplex_meta_important Generic 500014429029E912 1:1:2 n/a
89 vplex_meta_important Generic 500014428029E912 1:1:2 n/a
89 vplex_meta_important Generic 500014428029E910 1:1:2 n/a
89 vplex_meta_important Generic 5000144290292010 1:1:2 n/a
89 vplex_meta_important Generic 5000144290292012 0:1:2 n/a
89 vplex_meta_important Generic 5000144290292010 0:1:2 n/a
89 vplex_meta_important Generic 500014429029E912 0:1:2 n/a
89 vplex_meta_important Generic 500014429029E910 0:1:2 n/a
89 vplex_meta_important Generic 5000144280292012 1:1:2 n/a
89 vplex_meta_important Generic 5000144280292010 1:1:2 n/a
89 vplex_meta_important Generic 500014428029E912 0:1:2 n/a
89 vplex_meta_important Generic 500014428029E910 0:1:2 n/a
91 Dorado5000_51.45 Generic 200080D4A58EA53A --- n/a
91 Dorado5000_51.45 Generic 201080D4A58EA53A --- n/a
98 AIX6.1_LN AIX-legacy 10000000C9781C57 --- n/a
98 AIX6.1_LN AIX-legacy 10000000C9781853 --- n/a
115 huhuihost Generic 2100000E1E1A9B30 --- n/a
121 Dorado5000V3_F3 Generic 201880D4A58EA53A --- n/a
160 host002 Generic 21000024FF41DCF8 --- n/a
-- -- -- 21000024FF41DCF7 1:0:2 n/a
-- -- -- 21000024FF41DCF6 1:0:2 n/a
-- -- -- 21000024FF0CC6CA 0:1:2 n/a
-- -- -- 21000024FF0CC6CA 1:1:2 n/a
-- -- -- 21000024FF0CBF47 0:1:2 n/a
-- -- -- 21000024FF0CBF47 1:1:2 n/a
"""
VOLUME_GROUP_DATAS = """
Id Name Members Comment
91 wcj_2 wcj_2.0 --
wcj_2.1
wcj_2.2
wcj_2.3
110 HP-Esxi-LUNSet -- --
124 zhangjun -- --
126 wcj_1 wcj_1.1 --
127 wcj_3 wcj_3.0 --
wcj_3.1
128 IBM_SVC -- --
129 zyz_3parF200_ zyz_3parF200.0 --
zyz_3parF200.1
zyz_3parF200.2
zyz_3parF200.3
130 zyz zyz_2 --
131 tx -- --
132 tx9 -- --
133 wcj_hp_1 -- --
136 AIX_YG_WYK_LUN AIX_YG_WYK_LUN.0 --
AIX_YG_WYK_LUN.1
AIX_YG_WYK_LUN.2
AIX_YG_WYK_LUN.3
140 st11 -- --
146 Solaris_lun_group Solaris_LUN1_13G --
solaris_LUN_2_33G
147 wcj_vplex wcj_vplex.0 --
-----------------------------------------------------------
32 total 28
"""
VOLUME_ID_DATAS = """
Id Name Prov Type CopyOf BsId Rd -Detailed_State- Adm Snp Usr VSize
4836 wcj_2.0 tpvv base --- 4836 RW normal 256 512 512 5120
4798 zyz_2 tpvv base --- 4836 RW normal 256 512 512 5120
4797 wcj_3.1 tpvv base --- 4836 RW normal 256 512 512 5120
666 yytest_vv_001 tpvv base --- 4836 RW normal 256 512 512 5120
------------------------------------------------------------------------
409 total 51072 158720 3279488 18186240
"""
HOST_DATAS = [
{
"total": 38,
"members": [
{
"id": 54,
"name": "Doradov3_lm",
"descriptors": {
"location": "U9-3-B17R_B7",
"IPAddr": "192.168.127.12",
"os": "ESXI6.0",
"model": "RH2288H V3"
},
"FCPaths": [
{
"wwn": "2408244427906812",
"hostSpeed": 0
},
{
"wwn": "2418244427906812",
"hostSpeed": 0
}
],
"iSCSIPaths": [],
"persona": 1,
"initiatorChapEnabled": False,
"targetChapEnabled": False
},
{
"id": 57,
"name": "AIX_wenbin",
"FCPaths": [
{
"wwn": "10000000C9E74BCC",
"hostSpeed": 0
}
],
"iSCSIPaths": [],
"persona": 5,
"initiatorChapEnabled": False,
"targetChapEnabled": False
},
{
"id": 65,
"name": "SKY-ESXI60",
"descriptors": {
"location": "U9-3-B17R_B7",
"IPAddr": "192.168.127.12",
"os": "ESXI6.0",
"model": "RH2288H V3"
},
"FCPaths": [
{
"wwn": "2100001B321BE0FF",
"hostSpeed": 0
},
{
"wwn": "2101001B323BE0FF",
"hostSpeed": 0
}
],
"iSCSIPaths": [],
"persona": 1,
"initiatorChapEnabled": False,
"targetChapEnabled": False
},
{
"id": 67,
"name": "zouming",
"FCPaths": [
{
"wwn": "2012E4A8B6B0A1CC",
"hostSpeed": 0
},
{
"wwn": "2002E4A8B6B0A1CC",
"hostSpeed": 0
}
],
"iSCSIPaths": [],
"persona": 1,
"initiatorChapEnabled": False,
"targetChapEnabled": False
},
{
"id": 68,
"name": "powerpath",
"FCPaths": [
{
"wwn": "21000024FF36D406",
"hostSpeed": 0
},
{
"wwn": "21000024FF36D407",
"hostSpeed": 0
}
],
"iSCSIPaths": [],
"persona": 1,
"initiatorChapEnabled": False,
"targetChapEnabled": False
},
{
"id": 69,
"name": "power_v3",
"FCPaths": [
{
"wwn": "20809CE37435D845",
"hostSpeed": 0
},
{
"wwn": "20909CE37435D845",
"hostSpeed": 0
}
],
"iSCSIPaths": [],
"persona": 1,
"initiatorChapEnabled": False,
"targetChapEnabled": False
},
{
"id": 89,
"name": "vplex_meta_important",
"FCPaths": [
{
"wwn": "5000144280292012",
"portPos": {
"node": 0,
"slot": 1,
"cardPort": 2
},
"hostSpeed": 0
},
{
"wwn": "5000144280292010",
"portPos": {
"node": 0,
"slot": 1,
"cardPort": 2
},
"hostSpeed": 0
},
{
"wwn": "5000144290292012",
"portPos": {
"node": 1,
"slot": 1,
"cardPort": 2
},
"hostSpeed": 0
},
{
"wwn": "500014429029E910",
"portPos": {
"node": 1,
"slot": 1,
"cardPort": 2
},
"hostSpeed": 0
},
{
"wwn": "500014429029E912",
"portPos": {
"node": 1,
"slot": 1,
"cardPort": 2
},
"hostSpeed": 0
},
{
"wwn": "500014428029E912",
"portPos": {
"node": 1,
"slot": 1,
"cardPort": 2
},
"hostSpeed": 0
},
{
"wwn": "500014428029E910",
"portPos": {
"node": 1,
"slot": 1,
"cardPort": 2
},
"hostSpeed": 0
},
{
"wwn": "5000144290292010",
"portPos": {
"node": 1,
"slot": 1,
"cardPort": 2
},
"hostSpeed": 0
},
{
"wwn": "5000144290292012",
"portPos": {
"node": 0,
"slot": 1,
"cardPort": 2
},
"hostSpeed": 0
},
{
"wwn": "5000144290292010",
"portPos": {
"node": 0,
"slot": 1,
"cardPort": 2
},
"hostSpeed": 0
},
{
"wwn": "500014429029E912",
"portPos": {
"node": 0,
"slot": 1,
"cardPort": 2
},
"hostSpeed": 0
},
{
"wwn": "500014429029E910",
"portPos": {
"node": 0,
"slot": 1,
"cardPort": 2
},
"hostSpeed": 0
},
{
"wwn": "5000144280292012",
"portPos": {
"node": 1,
"slot": 1,
"cardPort": 2
},
"hostSpeed": 0
},
{
"wwn": "5000144280292010",
"portPos": {
"node": 1,
"slot": 1,
"cardPort": 2
},
"hostSpeed": 0
},
{
"wwn": "500014428029E912",
"portPos": {
"node": 0,
"slot": 1,
"cardPort": 2
},
"hostSpeed": 0
},
{
"wwn": "500014428029E910",
"portPos": {
"node": 0,
"slot": 1,
"cardPort": 2
},
"hostSpeed": 0
}
],
"iSCSIPaths": [],
"persona": 1,
"initiatorChapEnabled": False,
"targetChapEnabled": False
},
{
"id": 91,
"name": "Dorado5000_51.45",
"FCPaths": [
{
"wwn": "200080D4A58EA53A",
"hostSpeed": 0
},
{
"wwn": "201080D4A58EA53A",
"hostSpeed": 0
}
],
"iSCSIPaths": [],
"persona": 1,
"initiatorChapEnabled": False,
"targetChapEnabled": False
},
{
"id": 98,
"name": "AIX6.1_LN",
"descriptors": {
"os": "AIX"
},
"FCPaths": [
{
"wwn": "10000000C9781C57",
"hostSpeed": 0
},
{
"wwn": "10000000C9781853",
"hostSpeed": 0
}
],
"iSCSIPaths": [],
"persona": 5,
"initiatorChapEnabled": False,
"targetChapEnabled": False
},
{
"id": 115,
"name": "huhuihost",
"descriptors": {
"os": "SuSE"
},
"FCPaths": [
{
"wwn": "2100000E1E1A9B30",
"hostSpeed": 0
}
],
"iSCSIPaths": [],
"persona": 1,
"initiatorChapEnabled": False,
"targetChapEnabled": False
},
{
"id": 121,
"name": "Dorado5000V3_F3",
"descriptors": {
"os": "Red Hat Enterprise Linux"
},
"FCPaths": [
{
"wwn": "201880D4A58EA53A",
"hostSpeed": 0
},
{
"wwn": "200380D4A58EA53A",
"hostSpeed": 0
}
],
"iSCSIPaths": [],
"persona": 1,
"initiatorChapEnabled": False,
"targetChapEnabled": False
},
{
"id": 122,
"name": "DYP_RHEL",
"descriptors": {
"IPAddr": "172.16.31.10",
"os": "Red Hat Enterprise Linux"
},
"FCPaths": [
{
"wwn": "10000090FA76D446",
"hostSpeed": 0
},
{
"wwn": "10000090FA76D447",
"hostSpeed": 0
}
],
"iSCSIPaths": [],
"persona": 1,
"initiatorChapEnabled": False,
"targetChapEnabled": False
},
{
"id": 123,
"name": "DYP_Dorado6000",
"FCPaths": [
{
"wwn": "2618346AC212FB94",
"hostSpeed": 0
}
],
"iSCSIPaths": [],
"persona": 1,
"initiatorChapEnabled": False,
"targetChapEnabled": False
},
{
"id": 124,
"name": "tool_rhel6.8",
"FCPaths": [
{
"wwn": "21000024FF543687",
"hostSpeed": 0
},
{
"wwn": "21000024FF543686",
"hostSpeed": 0
}
],
"iSCSIPaths": [],
"persona": 1,
"initiatorChapEnabled": False,
"targetChapEnabled": False
},
{
"id": 125,
"name": "OceanStor6800",
"FCPaths": [
{
"wwn": "2430E0979656725A",
"hostSpeed": 0
},
{
"wwn": "2208E0979656725A",
"hostSpeed": 0
},
{
"wwn": "2218E0979656725A",
"hostSpeed": 0
},
{
"wwn": "2428E0979656725A",
"hostSpeed": 0
}
],
"iSCSIPaths": [],
"persona": 1,
"initiatorChapEnabled": False,
"targetChapEnabled": False
},
{
"id": 126,
"name": "fyc_test",
"FCPaths": [
{
"wwn": "21000024FF41DE7E",
"hostSpeed": 0
}
],
"iSCSIPaths": [],
"persona": 1,
"initiatorChapEnabled": False,
"targetChapEnabled": False
},
{
"id": 127,
"name": "huhui",
"descriptors": {
"os": "SuSE"
},
"FCPaths": [
{
"wwn": "500601610864241E",
"hostSpeed": 0
}
],
"iSCSIPaths": [],
"persona": 1,
"initiatorChapEnabled": False,
"targetChapEnabled": False
},
{
"id": 132,
"name": "ESX172.16.58.3",
"descriptors": {
"os": "ESX 4.x/5.x"
},
"FCPaths": [
{
"wwn": "21000024FF2F3266",
"hostSpeed": 0
},
{
"wwn": "21000024FF2F3267",
"hostSpeed": 0
}
],
"iSCSIPaths": [],
"persona": 8,
"initiatorChapEnabled": False,
"targetChapEnabled": False
},
{
"id": 133,
"name": "ESX89PT_suse_172.16.31.10",
"descriptors": {
"os": "SuSE"
},
"FCPaths": [
{
"wwn": "21000024FF36F1ED",
"hostSpeed": 0
}
],
"iSCSIPaths": [],
"persona": 1,
"initiatorChapEnabled": False,
"targetChapEnabled": False
},
{
"id": 134,
"name": "SVC",
"descriptors": {
"os": "Exanet"
},
"FCPaths": [
{
"wwn": "500507680110EF7C",
"hostSpeed": 0
},
{
"wwn": "500507680120EF7C",
"hostSpeed": 0
},
{
"wwn": "500507680120EF3E",
"hostSpeed": 0
},
{
"wwn": "500507680110EF3E",
"hostSpeed": 0
}
],
"iSCSIPaths": [],
"persona": 3,
"initiatorChapEnabled": False,
"targetChapEnabled": False
},
{
"id": 135,
"name": "NSS_172.16.31.10",
"descriptors": {
"os": "Red Hat Enterprise Linux"
},
"FCPaths": [
{
"wwn": "21000024FF0DC381",
"hostSpeed": 0
}
],
"iSCSIPaths": [],
"persona": 1,
"initiatorChapEnabled": False,
"targetChapEnabled": False
},
{
"id": 137,
"name": "D185_172.16.31.10",
"descriptors": {
"os": "Red Hat Enterprise Linux"
},
"FCPaths": [
{
"wwn": "29A11603042D0306",
"hostSpeed": 0
},
{
"wwn": "28D01603042D0306",
"hostSpeed": 0
},
{
"wwn": "2903010203040509",
"hostSpeed": 0
},
{
"wwn": "2802010203040509",
"hostSpeed": 0
}
],
"iSCSIPaths": [],
"persona": 1,
"initiatorChapEnabled": False,
"targetChapEnabled": False
},
{
"id": 139,
"name": "Dorado3000V6",
"FCPaths": [
{
"wwn": "2019CC64A68314D3",
"hostSpeed": 0
},
{
"wwn": "2009CC64A68314D3",
"hostSpeed": 0
}
],
"iSCSIPaths": [],
"persona": 1,
"initiatorChapEnabled": False,
"targetChapEnabled": False
},
{
"id": 141,
"name": "172.16.31.10T2",
"FCPaths": [
{
"wwn": "10000090FA50C4DF",
"hostSpeed": 0
},
{
"wwn": "10000090FA50C4DE",
"hostSpeed": 0
}
],
"iSCSIPaths": [],
"persona": 1,
"initiatorChapEnabled": False,
"targetChapEnabled": False
},
{
"id": 142,
"name": "172.16.31.10T1",
"FCPaths": [],
"iSCSIPaths": [],
"persona": 1,
"initiatorChapEnabled": False,
"targetChapEnabled": False
},
{
"id": 144,
"name": "C61_51.10.58.190",
"descriptors": {
"os": "Red Hat Enterprise Linux"
},
"FCPaths": [
{
"wwn": "2210112224901223",
"hostSpeed": 0
},
{
"wwn": "2200112224901223",
"hostSpeed": 0
},
{
"wwn": "2230112224901223",
"hostSpeed": 0
},
{
"wwn": "2220112224901223",
"hostSpeed": 0
}
],
"iSCSIPaths": [],
"persona": 1,
"initiatorChapEnabled": False,
"targetChapEnabled": False
},
{
"id": 145,
"name": "172.16.31.10",
"FCPaths": [
{
"wwn": "21000024FF754606",
"hostSpeed": 0
},
{
"wwn": "21000024FF1A99E1",
"hostSpeed": 0
}
],
"iSCSIPaths": [],
"persona": 1,
"initiatorChapEnabled": False,
"targetChapEnabled": False
},
{
"id": 146,
"name": "ZTY_win2012",
"descriptors": {
"os": "Windows 2012"
},
"FCPaths": [
{
"wwn": "21000024FF40272B",
"portPos": {
"node": 1,
"slot": 1,
"cardPort": 2
},
"hostSpeed": 0
},
{
"wwn": "21000024FF40272A",
"hostSpeed": 0
}
],
"iSCSIPaths": [],
"persona": 2,
"initiatorChapEnabled": False,
"targetChapEnabled": False
},
{
"id": 147,
"name": "DoradoV6_183",
"FCPaths": [
{
"wwn": "240B121314151617",
"hostSpeed": 0
},
{
"wwn": "2409121314151617",
"hostSpeed": 0
}
],
"iSCSIPaths": [],
"persona": 1,
"initiatorChapEnabled": False,
"targetChapEnabled": False
},
{
"id": 148,
"name": "rhev_125",
"descriptors": {
"os": "Windows 2012"
},
"FCPaths": [
{
"wwn": "21000024FF4BC1B7",
"hostSpeed": 0
},
{
"wwn": "21000024FF4BC1B6",
"hostSpeed": 0
}
],
"iSCSIPaths": [],
"persona": 2,
"initiatorChapEnabled": False,
"targetChapEnabled": False
},
{
"id": 150,
"name": "windows2012_68",
"descriptors": {
"os": "Windows 2012"
},
"FCPaths": [
{
"wwn": "2101001B32B0667A",
"hostSpeed": 0
},
{
"wwn": "2100001B3290667A",
"hostSpeed": 0
}
],
"iSCSIPaths": [],
"persona": 2,
"initiatorChapEnabled": False,
"targetChapEnabled": False
},
{
"id": 151,
"name": "Dorado5000V6_80",
"FCPaths": [
{
"wwn": "2001183D5E0F5131",
"portPos": {
"node": 1,
"slot": 0,
"cardPort": 2
},
"hostSpeed": 0
},
{
"wwn": "2011183D5E0F5131",
"portPos": {
"node": 1,
"slot": 0,
"cardPort": 2
},
"hostSpeed": 0
}
],
"iSCSIPaths": [],
"persona": 1,
"initiatorChapEnabled": False,
"targetChapEnabled": False
},
{
"id": 152,
"name": "windows2012_60",
"descriptors": {
"os": "Windows 2012"
},
"FCPaths": [
{
"wwn": "21000024FF53B4BC",
"hostSpeed": 0
},
{
"wwn": "21000024FF53B4BD",
"hostSpeed": 0
}
],
"iSCSIPaths": [],
"persona": 2,
"initiatorChapEnabled": False,
"targetChapEnabled": False
},
{
"id": 153,
"name": "aix_192.168.3.11",
"descriptors": {
"os": "AIX"
},
"FCPaths": [
{
"wwn": "10000000C975804C",
"portPos": {
"node": 1,
"slot": 0,
"cardPort": 2
},
"hostSpeed": 0
},
{
"wwn": "10000000C9765E79",
"portPos": {
"node": 1,
"slot": 0,
"cardPort": 2
},
"hostSpeed": 0
}
],
"iSCSIPaths": [],
"persona": 5,
"initiatorChapEnabled": False,
"targetChapEnabled": False
},
{
"id": 154,
"name": "Dorado5500_V6_109",
"descriptors": {
"IPAddr": "192.168.127.12",
"os": "Windows 2012"
},
"FCPaths": [
{
"wwn": "221818022D189653",
"portPos": {
"node": 1,
"slot": 0,
"cardPort": 2
},
"hostSpeed": 0
},
{
"wwn": "220818022D189653",
"portPos": {
"node": 1,
"slot": 0,
"cardPort": 2
},
"hostSpeed": 0
}
],
"iSCSIPaths": [],
"persona": 2,
"initiatorChapEnabled": False,
"targetChapEnabled": False
},
{
"id": 155,
"name": "aix134.205",
"descriptors": {
"IPAddr": "192.168.127.12",
"os": "AIX"
},
"FCPaths": [
{
"wwn": "20000000C9781C81",
"hostSpeed": 0
},
{
"wwn": "10000000C9781C0C",
"portPos": {
"node": 1,
"slot": 0,
"cardPort": 2
},
"hostSpeed": 0
}
],
"iSCSIPaths": [],
"persona": 5,
"initiatorChapEnabled": False,
"targetChapEnabled": False
},
{
"id": 158,
"name": "hsv6",
"FCPaths": [
{
"wwn": "28130A2B304438A8",
"hostSpeed": 0
},
{
"wwn": "28120A2B304438A8",
"hostSpeed": 0
},
{
"wwn": "28F20A2B304438A8",
"hostSpeed": 0
},
{
"wwn": "28F30A2B304438A8",
"hostSpeed": 0
}
],
"iSCSIPaths": [],
"persona": 1,
"initiatorChapEnabled": False,
"targetChapEnabled": False
},
{
"FCPaths": [
{
"wwn": "21000024FF41DCF7",
"portPos": {
"node": 1,
"slot": 0,
"cardPort": 2
},
"hostSpeed": 0
},
{
"wwn": "21000024FF41DCF6",
"portPos": {
"node": 1,
"slot": 0,
"cardPort": 2
},
"hostSpeed": 0
},
{
"wwn": "21000024FF0CC6CA",
"portPos": {
"node": 0,
"slot": 1,
"cardPort": 2
},
"hostSpeed": 0
},
{
"wwn": "21000024FF0CC6CA",
"portPos": {
"node": 1,
"slot": 1,
"cardPort": 2
},
"hostSpeed": 0
},
{
"wwn": "21000024FF0CBF47",
"portPos": {
"node": 0,
"slot": 1,
"cardPort": 2
},
"hostSpeed": 0
},
{
"wwn": "21000024FF0CBF47",
"portPos": {
"node": 1,
"slot": 1,
"cardPort": 2
},
"hostSpeed": 0
}
],
"iSCSIPaths": [],
"initiatorChapEnabled": False,
"targetChapEnabled": False
}
]
}
]
VIEW_DATAS = """
Lun VVName HostName -Host_WWN/iSCSI_Name- Port Type
2 yytest_vv_001 host002 ---------------- 0:2:1 host
0 set:vvset001 set:hostset111 ---------------- 1:2:1 host set
--------------------------------------------------------------------
2 total
"""
CONTROLLER_RESULT = [
{
'name': '1307327-0',
'storage_id': '12345',
'native_controller_id': '0',
'status': 'degraded',
'location': None,
'soft_version': '3.2.2.204',
'cpu_info': '4 * 2327 MHz',
'memory_size': '10737418240'
}]
DISK_RESULT = [
{
'name': '0:14:0',
'storage_id': '12345',
'native_disk_id': '0',
'serial_number': 'Serial111',
'manufacturer': 'MFR111',
'model': 'Model11',
'firmware': 'FW_Rev111',
'speed': 15000,
'capacity': 599684808704,
'status': 'degraded',
'physical_type': 'fc',
'logical_type': None,
'health_score': None,
'native_disk_group_id': None,
'location': '0:14:0'
}]
PORT_RESULT = [
{
'name': '0:0:1',
'storage_id': '12345',
'native_port_id': '0:0:1',
'location': '0:0:1',
'connection_status': 'connected',
'health_status': 'normal',
'type': 'sas',
'logical_type': None,
'speed': 8000000000,
'max_speed': 6000000000,
'native_parent_id': None,
'wwn': '20010002AC001C9F',
'mac_address': None,
'ipv4': None,
'ipv4_mask': None,
'ipv6': None,
'ipv6_mask': None
}]
METRICS_RESULT = [
constants.metric_struct(name='iops',
labels={
'storage_id': '12345',
'resource_type': 'storagePool',
'resource_id': '11',
'type': 'RAW',
'unit': 'IOPS'},
values={1583005800000: 10}
),
constants.metric_struct(name='iops',
labels={
'storage_id': '12345',
'resource_type': 'volume',
'resource_id': '0',
'type': 'RAW',
'unit': 'IOPS'},
values={1626243000000: 2.0}
),
constants.metric_struct(name='iops',
labels={
'storage_id': '12345',
'resource_type': 'port',
'resource_id': '0:0:1',
'type': 'RAW',
'unit': 'IOPS'
},
values={1626243000000: 0.0}
),
constants.metric_struct(name='iops',
labels={
'storage_id': '12345',
'resource_type': 'disk',
'resource_id': '0',
'type': 'RAW',
'unit': 'IOPS'
},
values={1626248100000: 0.5}
),
]
HOST_GROUP_RESULT = [
{
'name': 'HostSet_VMware',
'description': '',
'storage_id': '12345',
'native_storage_host_group_id': '194'
}]
VOLUME_GROUP_RESULT = [
{
'name': 'wcj_2',
'description': '',
'storage_id': '12345',
'native_volume_group_id': '91'
}]
PORT_GROUP_RESULT = [
{
'name': 'port_group_0:2:1',
'description': 'port_group_0:2:1',
'storage_id': '12345',
'native_port_group_id': 'port_group_0:2:1'
}]
HOST_RESULT = [
{
'name': 'Doradov3_lm',
'description': None,
'storage_id': '12345',
'native_storage_host_id': 54,
'os_type': 'VMware ESX',
'status': 'normal',
'ip_address': '192.168.127.12'
}]
INITIATOR_RESULT = [
{
'name': '2408244427906812',
'storage_id': '12345',
'native_storage_host_initiator_id': '2408244427906812',
'wwn': '2408244427906812',
'type': 'fc',
'status': 'online',
'native_storage_host_id': '175'
}]
VIEW_RESULT = [
{
'native_masking_view_id': '2_0:2:1_host002_yytest_vv_001',
'name': '2',
'storage_id': '12345',
'native_port_group_id': 'port_group_0:2:1',
'native_volume_id': '666',
'native_storage_host_id': '160'
}]
def create_driver():
kwargs = ACCESS_INFO
SSHHandler.login = mock.Mock(
return_value={"result": "success", "reason": "null"})
m = mock.MagicMock(status_code=201)
with mock.patch.object(Session, 'post', return_value=m):
m.raise_for_status.return_value = 201
m.json.return_value = {
'key': 'deviceid123ABC456'
}
return Hpe3parStorDriver(**kwargs)
class TestHpe3parStorageDriver(TestCase):
def test_a_init(self):
kwargs = ACCESS_INFO
SSHHandler.login = mock.Mock(
return_value={""})
RestHandler.login = mock.Mock(
return_value={""})
Hpe3parStorDriver(**kwargs)
def test_b_initrest(self):
m = mock.MagicMock()
with mock.patch.object(Session, 'post', return_value=m):
m.raise_for_status.return_value = 201
m.json.return_value = {
'key': '1&2F28CA9FC1EA0B8EAB80E9D8FD'
}
kwargs = ACCESS_INFO
rc = RestClient(**kwargs)
RestHandler(rc)
def test_d_get_storage(self):
driver = create_driver()
expected = {
'name': 'hp3parf200',
'vendor': 'HPE',
'model': 'InServ F200',
'status': 'abnormal',
'serial_number': '1307327',
'firmware_version': '3.1.2.484',
'location': None,
'total_capacity': 7793486594048,
'raw_capacity': 9594956939264,
'used_capacity': 6087847706624,
'free_capacity': 1705638887424
}
ret = {
"id": 7327,
"name": "hp3parf200",
"systemVersion": "3.1.2.484",
"IPv4Addr": "172.16.17.32",
"model": "InServ F200",
"serialNumber": "1307327",
"totalNodes": 2,
"masterNode": 0,
"onlineNodes": [
0,
1
],
"clusterNodes": [
0,
1
],
"chunkletSizeMiB": 256,
"totalCapacityMiB": 9150464,
"allocatedCapacityMiB": 5805824,
"freeCapacityMiB": 1626624,
"failedCapacityMiB": 1718016,
"timeZone": "Asia/Shanghai"
}
RestHandler.get_capacity = mock.Mock(
return_value={
"allCapacity": {
"totalMiB": 9150464,
"allocated": {
"system": {
"totalSystemMiB": 1232384,
"internalMiB": 303104,
"spareMiB": 929280,
"spareUsedMiB": 307456,
"spareUnusedMiB": 621824
}
}
}
}
)
health_state = 'PDs that are degraded'
SSHHandler.get_health_state = mock.Mock(return_value=health_state)
m = mock.MagicMock(status_code=200)
with mock.patch.object(RestHandler, 'call', return_value=m):
m.raise_for_status.return_value = 200
m.json.return_value = ret
storage = driver.get_storage(context)
self.assertDictEqual(storage, expected)
def test_e_list_storage_pools(self):
driver = create_driver()
expected = [
{
'name': 'test',
'storage_id': '12345',
'native_storage_pool_id': '0',
'description': 'Hpe 3par CPG:test',
'status': 'normal',
'storage_type': 'block',
'total_capacity': 2003870679040,
'subscribed_capacity': 2917892358144,
'used_capacity': 1448343502848,
'free_capacity': 555527176192
}, {
'name': 'cxd',
'storage_id': '12345',
'native_storage_pool_id': '1',
'description': 'Hpe 3par CPG:cxd',
'status': 'normal',
'storage_type': 'block',
'total_capacity': 1744025157632,
'subscribed_capacity': 2200095948800,
'used_capacity': 1696512081920,
'free_capacity': 47513075712
}
]
ret = [
{
"total": 2,
"members": [
{
"id": 0,
"uuid": "aa43f218-d3dd-4626-948f-8a160b0eac1d",
"name": "test",
"numFPVVs": 21,
"numTPVVs": 25,
"UsrUsage": {
"totalMiB": 1381504,
"rawTotalMiB": 1842004,
"usedMiB": 1376128,
"rawUsedMiB": 712703
},
"SAUsage": {
"totalMiB": 140800,
"rawTotalMiB": 422400,
"usedMiB": 5120,
"rawUsedMiB": 15360
},
"SDUsage": {
"totalMiB": 388736,
"rawTotalMiB": 518315,
"usedMiB": 0,
"rawUsedMiB": 0
},
"SAGrowth": {
"incrementMiB": 8192,
"LDLayout": {
"HA": 3,
"diskPatterns": [
{
"diskType": 1
}
]
}
},
"SDGrowth": {
"incrementMiB": 32768,
"LDLayout": {
"RAIDType": 3,
"HA": 3,
"setSize": 4,
"chunkletPosPref": 1,
"diskPatterns": [
{
"diskType": 1
}
]
}
},
"state": 1,
"failedStates": [],
"degradedStates": [],
"additionalStates": []
},
{
"id": 1,
"uuid": "c392910e-7648-4972-b594-47dd3d28f3ec",
"name": "cxd",
"numFPVVs": 14,
"numTPVVs": 319,
"UsrUsage": {
"totalMiB": 1418752,
"rawTotalMiB": 1702500,
"usedMiB": 1417984,
"rawUsedMiB": 568934
},
"SAUsage": {
"totalMiB": 56832,
"rawTotalMiB": 170496,
"usedMiB": 42752,
"rawUsedMiB": 128256
},
"SDUsage": {
"totalMiB": 187648,
"rawTotalMiB": 225179,
"usedMiB": 157184,
"rawUsedMiB": 188620
},
"SAGrowth": {
"incrementMiB": 8192,
"LDLayout": {
"HA": 3,
"diskPatterns": [
{
"diskType": 1
}
]
}
},
"SDGrowth": {
"incrementMiB": 32768,
"LDLayout": {
"RAIDType": 3,
"HA": 3,
"setSize": 6,
"chunkletPosPref": 1,
"diskPatterns": [
{
"diskType": 1
}
]
}
},
"state": 1,
"failedStates": [],
"degradedStates": [],
"additionalStates": []
}
]
}
]
with mock.patch.object(RestHandler, 'get_resinfo_call',
side_effect=ret):
pools = driver.list_storage_pools(context)
self.assertDictEqual(pools[0], expected[0])
self.assertDictEqual(pools[1], expected[1])
with mock.patch.object(RestHandler, 'get_all_pools',
side_effect=exception.DelfinException):
with self.assertRaises(Exception) as exc:
driver.list_storage_pools(context)
self.assertIn('An unknown exception occurred',
str(exc.exception))
def test_f_list_volumes(self):
driver = create_driver()
expected = [{
'name': 'admin',
'storage_id': '12345',
'description': None,
'status': 'normal',
'native_volume_id': '0',
'native_storage_pool_id': '',
'wwn': '50002AC000001C9F',
'type': 'thick',
'total_capacity': 10737418240,
'used_capacity': 10737418240,
'free_capacity': 0,
'compressed': True,
'deduplicated': True
}]
ret = [{
"members": [{
"id": 0,
"name": "admin",
"provisioningType": 1,
"copyType": 1,
"baseId": 0,
"readOnly": False,
"state": 1,
"userSpace": {
"reservedMiB": 10240,
"rawReservedMiB": 20480,
"usedMiB": 10240,
"freeMiB": 0
},
"sizeMiB": 10240,
"wwn": "50002AC000001C9F"
}]
}]
pool_ret = {
"members": [{
"id": 0,
"uuid": "aa43f218-d3dd-4626-948f-8a160b0eac1d",
"name": "test"
}]
}
RestHandler.get_all_pools = mock.Mock(return_value=pool_ret)
with mock.patch.object(RestHandler, 'get_resinfo_call',
side_effect=ret):
volumes = driver.list_volumes(context)
self.assertDictEqual(volumes[0], expected[0])
def test_h_parse_alert(self):
""" Success flow with all necessary parameters"""
driver = create_driver()
alert = {
'sysUpTime': '1399844806',
'snmpTrapOID': 'alertNotify',
'1.3.6.1.4.1.12925.1.7.1.5.1': 'test_trap',
'1.3.6.1.4.1.12925.1.7.1.6.1': 'This is a test trap',
'nodeID': '0',
'1.3.6.1.4.1.12925.1.7.1.2.1': '6',
'1.3.6.1.4.1.12925.1.7.1.3.1': 'test time',
'1.3.6.1.4.1.12925.1.7.1.7.1': '89',
'1.3.6.1.4.1.12925.1.7.1.8.1': '2555934',
'1.3.6.1.4.1.12925.1.7.1.9.1': '5',
'serialNumber': '1307327',
'transport_address': '172.16.17.32',
'storage_id': '1c094309-70f2-4da3-ac47-e87cc1492ad5'
}
expected_alert_model = {
'alert_id': '0x027001e',
'alert_name': 'CPG growth non admin limit',
'severity': 'NotSpecified',
'category': 'Recovery',
'type': 'EquipmentAlarm',
'sequence_number': '89',
'description': 'This is a test trap',
'resource_type': 'Storage',
'location': 'test_trap',
'occur_time': '',
'clear_category': 'Automatic'
}
context = {}
alert_model = driver.parse_alert(context, alert)
# Verify that all other fields are matching
self.assertDictEqual(expected_alert_model, alert_model)
def test_list_alert(self):
""" Success flow with all necessary parameters"""
driver = create_driver()
alert = """
Id : 1
State : New
MessageCode : 0x2200de
Time : 2015-07-17 20:14:29 PDT
Severity : Degraded
Type : Component state change
Message : Node 0, Power Supply 1, Battery 0 Degraded
Component: 172.16.17.32
"""
expected_alert = [{
'alert_id': '0x2200de',
'alert_name': 'Component state change',
'severity': 'Warning',
'category': 'Fault',
'type': 'EquipmentAlarm',
'sequence_number': '1',
'occur_time': 1437135269000,
'description': 'Node 0, Power Supply 1, Battery 0 Degraded',
'resource_type': 'Storage',
'location': '172.16.17.32'
}]
SSHHandler.get_all_alerts = mock.Mock(return_value=alert)
alert_list = driver.list_alerts(context, None)
expected_alert[0]['occur_time'] = alert_list[0]['occur_time']
self.assertDictEqual(alert_list[0], expected_alert[0])
@mock.patch.object(AlertHandler, 'clear_alert')
def test_clear_alert(self, mock_clear_alert):
driver = create_driver()
alert_id = '230584300921369'
driver.clear_alert(context, alert_id)
self.assertEqual(mock_clear_alert.call_count, 1)
def test_get_controllers(self):
driver = create_driver()
SSHPool.get = mock.Mock(return_value={paramiko.SSHClient()})
SSHPool.do_exec = mock.Mock(
side_effect=[NODE_DATAS, NODE_CPU_DATAS, NODE_VERSION])
controllers = driver.list_controllers(context)
self.assertDictEqual(controllers[0], CONTROLLER_RESULT[0])
def test_get_disks(self):
driver = create_driver()
SSHPool.do_exec = mock.Mock(side_effect=[DISK_DATAS, DISK_I_DATAS])
disks = driver.list_disks(context)
self.assertDictEqual(disks[0], DISK_RESULT[0])
def test_get_ports(self):
driver = create_driver()
SSHPool.do_exec = mock.Mock(
side_effect=[PORT_DATAS, PORT_I_DATAS, PORT_PER_DATAS,
PORT_ISCSI_DATAS, PORT_RCIP_DATAS, PORT_C_DATAS,
PORT_RCIP_DATAS, PORT_RCIP_DATAS])
ports = driver.list_ports(context)
self.assertDictEqual(ports[0], PORT_RESULT[0])
@mock.patch.object(RestHandler, 'get_pool_metrics')
@mock.patch.object(SSHPool, 'do_exec')
def test_get_perf_metrics(self, mock_exec, mock_pool):
driver = create_driver()
resource_metrics = {
'storagePool': [
'iops', 'readIops', 'writeIops',
'throughput', 'readThroughput', 'writeThroughput',
'responseTime'
],
'volume': [
'iops', 'readIops', 'writeIops',
'throughput', 'readThroughput', 'writeThroughput',
'responseTime',
'ioSize', 'readIoSize', 'writeIoSize',
],
'port': [
'iops', 'readIops', 'writeIops',
'throughput', 'readThroughput', 'writeThroughput',
'responseTime'
],
'disk': [
'iops', 'readIops', 'writeIops',
'throughput', 'readThroughput', 'writeThroughput',
'responseTime'
],
'filesystem': [
'iops', 'readIops', 'writeIops',
'throughput', 'readThroughput', 'writeThroughput',
'readResponseTime', 'writeResponseTime',
'readIoSize', 'writeIoSize'
]
}
start_time = 1628472280000
end_time = 1628472900000
RestHandler.get_all_pools = mock.Mock(return_value=POOL_DATAS)
mock_pool.return_value = POOL_METRICS_DATAS
mock_exec.side_effect = [VOLUME_METRICS_DATAS, PORT_METRICS_DATAS,
DISK_METRICS_DATAS]
metrics = driver.collect_perf_metrics(context, '12345',
resource_metrics, start_time,
end_time)
self.assertEqual(metrics[0], METRICS_RESULT[0])
self.assertEqual(metrics[14], METRICS_RESULT[1])
self.assertEqual(metrics[34], METRICS_RESULT[2])
self.assertEqual(metrics[48], METRICS_RESULT[3])
def test_get_capabilities(self):
driver = create_driver()
cap = driver.get_capabilities(context)
self.assertIsNotNone(cap.get('resource_metrics'))
self.assertIsNotNone(cap.get('resource_metrics').get('storagePool'))
self.assertIsNotNone(cap.get('resource_metrics').get('volume'))
self.assertIsNotNone(cap.get('resource_metrics').get('port'))
self.assertIsNotNone(cap.get('resource_metrics').get('disk'))
def test_get_storage_host_groups(self):
driver = create_driver()
SSHPool.do_exec = mock.Mock(side_effect=[HOST_GROUP_DATAS,
HOST_ID_DATAS])
host_groups = driver.list_storage_host_groups(context)
self.assertDictEqual(host_groups.get('storage_host_groups')[0],
HOST_GROUP_RESULT[0])
def test_get_volume_groups(self):
driver = create_driver()
SSHPool.do_exec = mock.Mock(side_effect=[VOLUME_GROUP_DATAS,
VOLUME_ID_DATAS])
volume_groups = driver.list_volume_groups(context)
self.assertDictEqual(volume_groups.get('volume_groups')[0],
VOLUME_GROUP_RESULT[0])
def test_storage_hosts(self):
driver = create_driver()
with mock.patch.object(RestHandler, 'get_resinfo_call',
side_effect=HOST_DATAS):
storage_hosts = driver.list_storage_hosts(context)
self.assertDictEqual(storage_hosts[0], HOST_RESULT[0])
def test_get_storage_host_initiators(self):
driver = create_driver()
SSHPool.do_exec = mock.Mock(side_effect=[HOST_ID_DATAS])
initiators = driver.list_storage_host_initiators(context)
self.assertDictEqual(initiators[0], INITIATOR_RESULT[0])
def test_get_masking_views(self):
driver = create_driver()
SSHPool.do_exec = mock.Mock(
side_effect=[VIEW_DATAS, HOST_ID_DATAS, HOST_GROUP_DATAS,
VOLUME_ID_DATAS, VOLUME_GROUP_DATAS])
views = driver.list_masking_views(context)
self.assertDictEqual(views[0], VIEW_RESULT[0])
def test_get_port_groups(self):
driver = create_driver()
SSHPool.do_exec = mock.Mock(side_effect=[VIEW_DATAS])
port_groups = driver.list_port_groups(context)
self.assertDictEqual(port_groups.get('port_groups')[0],
PORT_GROUP_RESULT[0])
```
#### File: netapp/netapp_ontap/test_netapp.py
```python
from unittest import TestCase, mock
import paramiko
from delfin.tests.unit.drivers.netapp.netapp_ontap import test_constans
from delfin import context
from delfin.drivers.netapp.dataontap.netapp_handler import NetAppHandler
from delfin.drivers.netapp.dataontap.cluster_mode import NetAppCmodeDriver
from delfin.drivers.utils.ssh_client import SSHPool
class Request:
def __init__(self):
self.environ = {'delfin.context': context.RequestContext()}
pass
class TestNetAppCmodeDriver(TestCase):
SSHPool.get = mock.Mock(
return_value={paramiko.SSHClient()})
NetAppHandler.login = mock.Mock()
NetAppHandler.do_rest_call = mock.Mock()
netapp_client = NetAppCmodeDriver(**test_constans.ACCESS_INFO)
def test_reset_connection(self):
kwargs = test_constans.ACCESS_INFO
NetAppHandler.login = mock.Mock()
netapp_client = NetAppCmodeDriver(**kwargs)
netapp_client.reset_connection(context, **kwargs)
netapp_client.netapp_handler.do_rest_call = mock.Mock()
self.assertEqual(netapp_client.netapp_handler.ssh_pool.ssh_host,
"192.168.159.130")
self.assertEqual(netapp_client.netapp_handler.ssh_pool.ssh_port, 22)
def test_get_storage(self):
SSHPool.do_exec = mock.Mock(
side_effect=[test_constans.SYSTEM_INFO,
test_constans.VERSION,
test_constans.SYSTEM_STATUS,
test_constans.CONTROLLER_INFO,
test_constans.CONTROLLER_IP_INFO,
test_constans.DISKS_INFO,
test_constans.PHYSICAL_INFO,
test_constans.ERROR_DISK_INFO,
test_constans.POOLS_INFO,
test_constans.AGGREGATE_DETAIL_INFO])
data = self.netapp_client.get_storage(context)
self.assertEqual(data['vendor'], 'NetApp')
def test_list_storage_pools(self):
SSHPool.do_exec = mock.Mock(
side_effect=[test_constans.POOLS_INFO,
test_constans.AGGREGATE_DETAIL_INFO])
data = self.netapp_client.list_storage_pools(context)
self.assertEqual(data[0]['name'], 'aggr0')
def test_list_volumes(self):
SSHPool.do_exec = mock.Mock(
side_effect=[test_constans.LUN_INFO,
test_constans.FS_INFO,
test_constans.THIN_FS_INFO,
test_constans.POOLS_INFO,
test_constans.AGGREGATE_DETAIL_INFO])
data = self.netapp_client.list_volumes(context)
self.assertEqual(data[0]['name'], 'lun_0')
def test_list_alerts(self):
SSHPool.do_exec = mock.Mock(
side_effect=[test_constans.ALERT_INFO])
data = self.netapp_client.list_alerts(context)
self.assertEqual(data[0]['alert_name'],
'DualPathToDiskShelf_Alert')
def test_clear_alters(self):
alert = {'alert_id': '123'}
SSHPool.do_exec = mock.Mock()
self.netapp_client.clear_alert(context, alert)
def test_parse_alert(self):
data = self.netapp_client.parse_alert(context, test_constans.TRAP_MAP)
self.assertEqual(data['alert_name'], 'DisabledInuseSASPort_Alert')
def test_list_controllers(self):
SSHPool.do_exec = mock.Mock(
side_effect=[test_constans.CONTROLLER_INFO,
test_constans.CONTROLLER_IP_INFO])
data = self.netapp_client.list_controllers(context)
self.assertEqual(data[0]['name'], 'cl-01')
def test_list_ports(self):
SSHPool.do_exec = mock.Mock(
side_effect=[test_constans.FC_PORT_INFO,
test_constans.PORTS_INFO])
data = self.netapp_client.list_ports(context)
self.assertEqual(data[0]['name'], 'cl-01:0a')
def test_list_disks(self):
SSHPool.do_exec = mock.Mock(
side_effect=[test_constans.DISKS_INFO,
test_constans.PHYSICAL_INFO,
test_constans.ERROR_DISK_INFO])
data = self.netapp_client.list_disks(context)
self.assertEqual(data[0]['name'], 'NET-1.1')
def test_list_qtrees(self):
SSHPool.do_exec = mock.Mock(side_effect=[
test_constans.QTREES_INFO, test_constans.FS_INFO])
data = self.netapp_client.list_qtrees(context)
self.assertEqual(data[0]['security_mode'], 'ntfs')
def test_list_shares(self):
SSHPool.do_exec = mock.Mock(
side_effect=[test_constans.QTREES_INFO,
test_constans.FS_INFO,
test_constans.SHARES_AGREEMENT_INFO,
test_constans.SHARE_VSERVER_INFO,
test_constans.SHARES_INFO,
test_constans.NFS_SHARE_INFO])
data = self.netapp_client.list_shares(context)
self.assertEqual(data[0]['name'], 'admin$')
def test_list_filesystems(self):
SSHPool.do_exec = mock.Mock(
side_effect=[test_constans.FS_INFO,
test_constans.THIN_FS_INFO,
test_constans.POOLS_INFO,
test_constans.AGGREGATE_DETAIL_INFO])
data = self.netapp_client.list_filesystems(context)
self.assertEqual(data[0]['name'], 'vol0')
def test_list_quotas(self):
SSHPool.do_exec = mock.Mock(
side_effect=[test_constans.QUOTAS_INFO])
data = self.netapp_client.list_quotas(context)
self.assertEqual(data[0]['file_soft_limit'], 1000)
def test_ge_alert_sources(self):
SSHPool.do_exec = mock.Mock(
side_effect=[test_constans.CLUSTER_IPS_INFO,
test_constans.CONTROLLER_INFO,
test_constans.CONTROLLER_IP_INFO])
data = self.netapp_client.get_alert_sources(context)
self.assertEqual(data[0]['host'], '172.16.31.10')
def test_get_storage_performance(self):
SSHPool.do_exec = mock.Mock(
side_effect=[
# storage
test_constans.SYSTEM_INFO,
# pool
test_constans.AGGREGATE_DETAIL_INFO,
# volume
test_constans.LUN_INFO,
])
self.netapp_client.netapp_handler.do_rest_call = mock.Mock(
side_effect=[ # storage
test_constans.CLUSTER_PER_INFO,
# pool
test_constans.POOL_PER_INFO,
test_constans.POOL_PER_INFO,
test_constans.POOL_PER_INFO,
# volume
test_constans.LUN_PER_INFO,
# port
test_constans.PORT_REST_INFO,
test_constans.FC_PER_INFO,
test_constans.PORT_REST_INFO,
test_constans.ETH_PER_INFO,
# fs
test_constans.FS_REST_INFO,
test_constans.FS_PER_INFO,
])
data = self.netapp_client.collect_perf_metrics(
context, test_constans.ACCESS_INFO['storage_id'],
test_constans.RESOURCE_METRICS,
start_time=str(1435214300000),
end_time=str(1495315500000))
self.assertEqual(data[0][2][1485343200000], 1000)
def test_get_capabilities_is_None(self):
data = self.netapp_client.get_capabilities(context, None)
self.assertEqual(data[9.8]['resource_metrics']['storage']
['throughput']['unit'], 'MB/s')
def test_get_capabilities(self):
data = self.netapp_client.\
get_capabilities(context,
{'firmware_version': 'NetApp Release 9.8R15'})
self.assertEqual(data['resource_metrics']['storage']
['throughput']['unit'], 'MB/s')
def test_list_storage_host_initiators(self):
SSHPool.do_exec = mock.Mock(
side_effect=[test_constans.ISCSI_INITIATOR_INFO,
test_constans.FC_INITIATOR_INFO,
test_constans.HOSTS_INFO])
data = self.netapp_client.list_storage_host_initiators(context)
self.assertEqual(data[0]['name'], 'fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b')
def test_list_port_groups(self):
SSHPool.do_exec = mock.Mock(
side_effect=[test_constans.PORT_SET_INFO,
test_constans.LIF_INFO])
data = self.netapp_client.list_port_groups(context)
self.assertEqual(data['port_groups'][0]['name'], 'portgroup')
def test_list_storage_hosts(self):
SSHPool.do_exec = mock.Mock(
side_effect=[test_constans.HOSTS_INFO])
data = self.netapp_client.list_storage_hosts(context)
self.assertEqual(data[0]['name'], 'fcstart1')
def test_list_masking_views(self):
SSHPool.do_exec = mock.Mock(
side_effect=[test_constans.LUN_MAPPING_INFO,
test_constans.MAPPING_LUN_INFO,
test_constans.HOSTS_INFO])
data = self.netapp_client.list_masking_views(context)
self.assertEqual(data[0]['name'], 'fcstart1_lun_1')
def test_get_latest_perf_timestamp(self):
self.netapp_client.netapp_handler.do_rest_call = mock.Mock(
side_effect=[test_constans.CLUSTER_PER_INFO])
data = self.netapp_client.get_latest_perf_timestamp(context)
self.assertEqual(data, 1485343200000)
``` |
{
"source": "JosephVSN/cmus-scrobbler",
"score": 2
} |
#### File: cmus-scrobbler/cmus_scrobbler/config.py
```python
import os
import json
import lastfm
CONFIG_DIR = os.path.join(os.path.expanduser("~"), ".config", "cmus-scrobbler")
CONFIG_JSON = os.path.join(CONFIG_DIR, "cmus_scrobbler_config.json")
def _setup_config() -> bool:
"""Creates the API config directory and file"""
try:
os.mkdir(CONFIG_DIR)
except (IOError, PermissionError) as e:
print(f"DEBUG: Failed to create config directory due to '{e}'")
return False
try:
with open (CONFIG_JSON, "w") as cfg_f:
json.dump({"api_key": "", "secret_key": "", "session_key": "", "api_token": ""}, cfg_f)
except (IOError, PermissionError) as e:
print(f"DEBUG: Failed to create config file due to '{e}'")
return False
return True
def read_config() -> dict:
"""Wrapper for opening CONFIG_JSON and returning it as a dictionary"""
try:
with open (CONFIG_JSON) as cfg:
cfg_json = json.load(cfg)
except (json.decoder.JSONDecodeError, PermissionError, IOError) as e:
print(f"DEBUG: Refusing to read config, encountered '{e}'")
return None
return cfg_json
def update_config(api_key: str = None, secret_key: str = None, session_key: str = None, api_token: str = None) -> bool:
"""Updates the values in the API config file"""
if not os.path.exists(CONFIG_JSON):
if not _setup_config():
print("DEBUG: Refusing to update config, file/directory do not exist and were unable to be created")
return False
if not (cfg_json := read_config()):
return False
if api_key:
cfg_json["api_key"] = api_key
if secret_key:
cfg_json["secret_key"] = secret_key
if session_key:
cfg_json["session_key"] = session_key
if api_token:
cfg_json["api_token"] = api_token
try:
with open(CONFIG_JSON, 'w') as cfg:
json.dump(cfg_json, cfg)
except PermissionError as e:
print(f"DEBUG: Refusing to update config, encountered '{e}'")
return False
return True
``` |
{
"source": "JosephVSN/maze-gen",
"score": 4
} |
#### File: JosephVSN/maze-gen/Solver.py
```python
import copy
# Constants
WALL_V = "|"
WALL_H = "-"
STEP = "O"
PATH = "X"
class Solver:
maze = []
path = []
maze_exit = []
def __init__(self, maze, maze_exit):
self.maze = maze
self.maze_exit = maze_exit
def solve(self, cur_step, steps_taken):
paths = 0
# Check if finished
if self.is_solved(cur_step):
steps_taken.append(cur_step)
self.path = steps_taken
return True
else:
# Count paths
paths = self.count_paths(cur_step)
# Dead end
if paths == 0:
return None
# Single Path
elif paths == 1:
next_steps = copy.deepcopy(steps_taken)
next_steps.append(cur_step)
# North
if self.maze[cur_step[0]-1][cur_step[1]] == STEP and not self.traveled_to([cur_step[0],cur_step[1]], steps_taken):
new_move = self.solve([cur_step[0]-1, cur_step[1]], next_steps)
if new_move is not None:
return new_move
# East
elif self.maze[cur_step[0]][cur_step[1]+1] == STEP and not self.traveled_to([cur_step[0],cur_step[1]], steps_taken):
new_move = self.solve([cur_step[0], cur_step[1]+1], next_steps)
if new_move is not None:
return new_move
# West
elif self.maze[cur_step[0]][cur_step[1]-1] == STEP and not self.traveled_to([cur_step[0],cur_step[1]], steps_taken):
new_move = self.solve([cur_step[0], cur_step[1]-1], next_steps)
if new_move is not None:
return new_move
# South
elif self.maze[cur_step[0]+1][cur_step[1]] == STEP and not self.traveled_to([cur_step[0],cur_step[1]], steps_taken):
new_move = self.solve([cur_step[0]+1, cur_step[1]], next_steps)
if new_move is not None:
return new_move
else:
return None
# Multiple Paths
elif paths > 1:
# North
if self.maze[cur_step[0]-1][cur_step[1]] == STEP and not self.traveled_to([cur_step[0],cur_step[1]], steps_taken):
next_steps = copy.deepcopy(steps_taken)
next_steps.append(cur_step)
new_move = self.solve([cur_step[0]-1, cur_step[1]], next_steps)
if new_move is not None:
return new_move
# East
if self.maze[cur_step[0]][cur_step[1]+1] == STEP and not self.traveled_to([cur_step[0],cur_step[1]], steps_taken):
next_steps = copy.deepcopy(steps_taken)
next_steps.append(cur_step)
new_move = self.solve([cur_step[0], cur_step[1]+1], next_steps)
if new_move is not None:
return new_move
# West
if self.maze[cur_step[0]][cur_step[1]-1] == STEP and not self.traveled_to([cur_step[0],cur_step[1]], steps_taken):
next_steps = copy.deepcopy(steps_taken)
next_steps.append(cur_step)
new_move = self.solve([cur_step[0], cur_step[1]-1], next_steps)
if new_move is not None:
return new_move
# South
if self.maze[cur_step[0]+1][cur_step[1]] == STEP and not self.traveled_to([cur_step[0],cur_step[1]], steps_taken):
next_steps = copy.deepcopy(steps_taken)
next_steps.append(cur_step)
new_move = self.solve([cur_step[0]+1, cur_step[1]], next_steps)
if new_move is not None:
return new_move
else:
return None
def count_paths(self, cur_step):
path_count = 0
# North
if self.maze[cur_step[0]-1][cur_step[1]] == STEP:
path_count += 1
# East
if self.maze[cur_step[0]][cur_step[1]+1] == STEP:
path_count += 1
# South
if self.maze[cur_step[0]+1][cur_step[1]] == STEP:
path_count += 1
# West
if self.maze[cur_step[0]][cur_step[1]-1] == STEP:
path_count += 1
return path_count
def traveled_to(self, cur_step, steps_taken):
if len(steps_taken) != 0:
if cur_step in steps_taken:
return True
return False
def is_solved(self, cur_step):
if cur_step[0] == self.maze_exit[0] and cur_step[1] == self.maze_exit[1]:
return True
return False
``` |
{
"source": "Joseph-Wairimu/news-API",
"score": 3
} |
#### File: news-API/app/request.py
```python
import urllib.request,json
from .models import News , Articles
# News = news.News
# Getting api key
api_key = None
# Getting the movie base url
base_url = None
def configure_request(app):
global api_key,base_url
api_key = app.config['NEWS_API_KEY']
base_url = app.config['NEWS_API_BASE_URL']
def get_news():
'''
Function that gets the json response to our url request
'''
get_news_url = base_url.format(api_key)
with urllib.request.urlopen(get_news_url) as url:
get_news_data = url.read()
get_news_response = json.loads(get_news_data)
news_results = None
if get_news_response['sources']:
news_results_list = get_news_response['sources']
news_results = process_results(news_results_list)
return news_results
def process_results(news_list):
'''
Function that processes the movie result and transform them to a list of Objects
Args:
movie_list: A list of dictionaries that contain movie details
Returns :
movie_results: A list of movie objects
'''
news_results = []
for news_item in news_list:
id = news_item.get('id')
name = news_item.get('name')
description = news_item.get('description')
url = news_item.get('url')
urlToImage = news_item.get('urlToImage')
language= news_item.get('language')
country = news_item.get('country')
category = news_item.get('category')
if url:
news_object = News(id,name,description,url, urlToImage ,language,country,category)
news_results.append(news_object)
return news_results
def get_new(id):
get_new_details_url = base_url.format(id,api_key)
with urllib.request.urlopen(get_new_details_url) as url:
new_details_data = url.read()
new_details_response = json.loads( new_details_data)
new_object = None
if new_details_response:
id = new_details_response.get('id')
name = new_details_response.get('name')
description = new_details_response.get('description')
url = new_details_response.get('url')
urlToImage = new_details_response.get('urlToImage')
language= new_details_response.get('language')
country = new_details_response.get('country')
category = new_details_response.get('category')
new_object = News(id,name,description,url,urlToImage,language,country,category)
return new_object
def get_article(id):
article_source_url = 'https://newsapi.org/v2/top-headlines?sources={}&apiKey=<KEY>'.format(id)
# print(article_source_url)
with urllib.request.urlopen(article_source_url) as url:
article_source_data = url.read()
article_source_response = json.loads(article_source_data)
article_source_results = None
if article_source_response['articles']:
article_source_list = article_source_response['articles']
article_source_results = process_articles_results(article_source_list)
return article_source_results
def process_articles_results(articles_list):
'''
function that processes the json files of articles from the api key
'''
article_results = []
for article in articles_list:
author = article.get('author')
description = article.get('description')
url = article.get('url')
urlToImage = article.get('urlToImage')
publishedAt = article.get('publishedAt')
title = article.get ('title')
content= article.get('content')
if url:
article_objects = Articles(author,title,description,url,urlToImage,publishedAt,content)
article_results.append(article_objects)
return article_results
def get_category(category):
article_source_url = 'https://newsapi.org/v2/top-headlines?category={}&apiKey=<KEY>'.format(category)
# print(article_source_url)
with urllib.request.urlopen(article_source_url) as url:
article_source_data = url.read()
article_source_response = json.loads(article_source_data)
article_source_results = None
if article_source_response['articles']:
article_source_list = article_source_response['articles']
article_source_results = process_articles_results(article_source_list)
return article_source_results
def process_category_results(articles_list):
'''
function that processes the json files of articles from the api key
'''
article_results = []
for article in articles_list:
author = article.get('author')
description = article.get('description')
url = article.get('url')
urlToImage = article.get('urlToImage')
publishedAt = article.get('publishedAt')
title = article.get ('title')
content= article.get('content')
if url:
article_objects = Articles(author,title,description,url,urlToImage,publishedAt,content)
article_results.append(article_objects)
return article_results
``` |
{
"source": "Joseph-Waldron/StructureFold2",
"score": 3
} |
#### File: Joseph-Waldron/StructureFold2/react_heat_correct.py
```python
from sf2libs.structure_io import read_react, write_react
import argparse
import sys
#Functions
def sum_react(react_dict):
'''Sum of all the reactivities'''
return sum([sum(filter(lambda x: isinstance(x, float), v)) for v in react_dict.values()])
def apply_correction(react_dict,correction):
'''Applies a correction'''
atarashii = {}
for k, v in react_dict.items():
atarashii[k] = [x*correction if x!= 'NA' else 'NA' for x in v]
return atarashii
#Main Function
def main():
parser = argparse.ArgumentParser(description='Corrects two <.react>s for differential temperature')
parser.add_argument('lower',type=str,help='lower temp <.react> file')
parser.add_argument('higher',type=str,help='higher temp <.react> file')
parser.add_argument('-suffix',type=str,default='corrected',help='[default = corrected] Suffix for out files')
args = parser.parse_args()
#Sum all reactivities
cold_react,hot_react = map(read_react,[args.lower,args.higher])
cold_sum,hot_sum = map(sum_react,[cold_react,hot_react])
if not cold_react.keys() == hot_react.keys():
print 'Warning! Non-parallel transcript sets between reacts! Quitting...'
sys.exit()
#Calculate Corrections
heat_correction = (hot_sum+cold_sum)/(2*hot_sum)
cold_correction = (hot_sum+cold_sum)/(2*cold_sum)
print 'Higher Temp values to be downscaled by factor: {}'.format(heat_correction)
print 'Lower Temp values to be upscaled by factor: {}'.format(cold_correction)
#Apply corrections
new_hot = apply_correction(hot_react,heat_correction)
new_cold = apply_correction(cold_react,cold_correction)
#Write Out
write_react(new_cold,args.lower.replace('.react','_'+args.suffix+'.react'))
write_react(new_hot,args.higher.replace('.react','_'+args.suffix+'.react'))
if __name__ == '__main__':
main()
``` |
{
"source": "JosephWardDotTech/helium",
"score": 3
} |
#### File: _impl/util/html.py
```python
from html.parser import HTMLParser
import re
def strip_tags(html):
s = TagStripper()
s.feed(html)
return s.get_data()
class TagStripper(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ''.join(self.fed)
def get_easily_readable_snippet(html):
html = normalize_whitespace(html)
try:
inner_start = html.index('>') + 1
inner_end = html.rindex('<', inner_start)
except ValueError:
return html
opening_tag = html[:inner_start]
closing_tag = html[inner_end:]
inner = html[inner_start:inner_end]
if '<' in inner or len(inner) > 60:
return '%s...%s' % (opening_tag, closing_tag)
else:
return html
def normalize_whitespace(html):
result = html.strip()
# Remove multiple spaces:
result = re.sub(r'\s+', ' ', result)
# Remove spaces after opening or before closing tags:
result = result.replace('> ', '>').replace(' <', '<')
return result
```
#### File: tests/api/test_click.py
```python
from helium import click, Config
from helium._impl.util.lang import TemporaryAttrValue
from tests.api import BrowserAT
class ClickTest(BrowserAT):
def get_page(self):
return 'test_click.html'
def test_click(self):
click("Click me!")
self.assertEqual('Success!', self.read_result_from_browser())
def test_click_non_existent_element(self):
with TemporaryAttrValue(Config, 'implicit_wait_secs', 1):
with self.assertRaises(LookupError):
click("Non-existent")
```
#### File: tests/api/test_kill_service_at_exit.py
```python
from psutil import NoSuchProcess
import psutil
class KillServiceAtExitAT:
def test_kill_service_at_exit(self):
self.start_browser_in_sub_process()
self.assertEqual([], self.get_new_running_services())
def start_browser_in_sub_process(self):
raise NotImplementedError()
def get_new_running_services(self):
return [s for s in self.get_running_services()
if s not in self.running_services_before]
def setUp(self):
self.running_services_before = self.get_running_services()
self.running_browsers_before = self.get_running_browsers()
def tearDown(self):
for service in self.get_new_running_services():
try:
service.terminate()
except NoSuchProcess:
# Process terminated already.
pass
for browser in self.get_new_running_browsers():
try:
browser.terminate()
except NoSuchProcess:
# Process terminated already.
pass
def get_new_running_browsers(self):
return [s for s in self.get_running_browsers()
if s not in self.running_browsers_before]
def get_running_services(self):
return self._get_running_processes(self.get_service_process_names())
def get_running_browsers(self):
return self._get_running_processes([self.get_browser_process_name()])
def _get_running_processes(self, image_names):
result = []
for p in psutil.process_iter():
if p.name in image_names:
result.append(p)
return result
def get_service_process_names(self):
raise NotImplementedError()
def get_browser_process_name(self):
raise NotImplementedError()
def start_browser(self):
raise NotImplementedError()
```
#### File: tests/api/test_no_driver.py
```python
from helium import *
from helium._impl import APIImpl
from unittest import TestCase
class NoDriverTest(TestCase):
def test_go_to_requires_driver(self):
self._check_requires_driver(lambda: go_to('google.com'))
def test_write_requires_driver(self):
self._check_requires_driver(lambda: write('foo'))
def test_press_requires_driver(self):
self._check_requires_driver(lambda: press(ENTER))
def test_click_requires_driver(self):
self._check_requires_driver(lambda: click("Sign in"))
def test_doubleclick_requires_driver(self):
self._check_requires_driver(lambda: doubleclick("Sign in"))
def test_drag_requires_driver(self):
self._check_requires_driver(lambda: drag("Drag me", to="Drop here"))
def test_find_all_requires_driver(self):
self._check_requires_driver(lambda: find_all(Button()))
def test_scroll_down_requires_driver(self):
self._check_requires_driver(lambda: scroll_down())
def test_scroll_up_requires_driver(self):
self._check_requires_driver(lambda: scroll_up())
def test_scroll_right_requires_driver(self):
self._check_requires_driver(lambda: scroll_right())
def test_scroll_left_requires_driver(self):
self._check_requires_driver(lambda: scroll_left())
def test_hover_requires_driver(self):
self._check_requires_driver(lambda: hover("Hi there!"))
def test_rightclick_requires_driver(self):
self._check_requires_driver(lambda: rightclick("Hi there!"))
def test_select_requires_driver(self):
self._check_requires_driver(lambda: select("Language", "English"))
def test_drag_file_requires_driver(self):
self._check_requires_driver(
lambda: drag_file(r'C:\test.txt', to="Here")
)
def test_attach_file_requires_driver(self):
self._check_requires_driver(lambda: attach_file(r'C:\test.txt'))
def test_refresh_requires_driver(self):
self._check_requires_driver(lambda: refresh())
def test_wait_until_requires_driver(self):
self._check_requires_driver(lambda: wait_until(lambda: True))
def test_switch_to_requires_driver(self):
self._check_requires_driver(lambda: switch_to('Popup'))
def test_kill_browser_requires_driver(self):
self._check_requires_driver(lambda: switch_to('Popup'))
def test_highlight_requires_driver(self):
self._check_requires_driver(lambda: switch_to('Popup'))
def test_s_requires_driver(self):
self._check_requires_driver(lambda: S('#home'))
def test_text_requires_driver(self):
self._check_requires_driver(lambda: Text('Home'))
def test_link_requires_driver(self):
self._check_requires_driver(lambda: Link('Home'))
def test_list_item_requires_driver(self):
self._check_requires_driver(lambda: ListItem('Home'))
def test_button_requires_driver(self):
self._check_requires_driver(lambda: Button('Home'))
def test_image_requires_driver(self):
self._check_requires_driver(lambda: Image('Logo'))
def test_text_field_requires_driver(self):
self._check_requires_driver(lambda: TextField('File name'))
def test_combo_box_requires_driver(self):
self._check_requires_driver(lambda: ComboBox('Language'))
def test_check_box_requires_driver(self):
self._check_requires_driver(lambda: CheckBox('True?'))
def test_radio_button_requires_driver(self):
self._check_requires_driver(lambda: RadioButton('Option A'))
def test_window_requires_driver(self):
self._check_requires_driver(lambda: Window('Main'))
def test_alert_requires_driver(self):
self._check_requires_driver(lambda: Alert())
def _check_requires_driver(self, function):
with self.assertRaises(RuntimeError) as cm:
function()
self.assertEqual(APIImpl.DRIVER_REQUIRED_MESSAGE, cm.exception.args[0])
```
#### File: tests/api/test_text_impl.py
```python
from helium._impl import TextImpl
from helium._impl.selenium_wrappers import WebDriverWrapper
from tests.api import BrowserAT
class TextImplTest(BrowserAT):
def get_page(self):
return 'test_text_impl.html'
def test_empty_search_text_xpath(self):
xpath = TextImpl(WebDriverWrapper(self.driver))._get_search_text_xpath()
text_elements = self.driver.find_elements_by_xpath(xpath)
texts = [w.get_attribute('innerHTML') for w in text_elements]
self.assertEqual(
["A paragraph", "A paragraph inside a div",
"Another paragraph inside the div"],
sorted(texts)
)
```
#### File: tests/api/test_window.py
```python
from helium import Window, click, go_to, get_driver, wait_until
from tests.api.util import get_data_file_url
from tests.api import BrowserAT
class WindowTest(BrowserAT):
def get_page(self):
return 'test_window/test_window.html'
def test_window_exists(self):
self.assertTrue(Window('test_window').exists())
def test_window_not_exists(self):
self.assertFalse(Window('non-existent').exists())
def test_no_arg_window_exists(self):
self.assertTrue(Window().exists())
def test_handle(self):
self.assertTrue(Window('test_window').handle)
def test_title(self):
self.assertEqual('test_window', Window('test_window').title)
class MultipleWindowTest(WindowTest):
"""
The purpose of this Test is to run the same tests as WindowTest, but with an
additional pop up window open.
"""
@classmethod
def setUpClass(cls):
super().setUpClass()
go_to(get_data_file_url('test_window/test_window.html'))
click("Click here to open a popup.")
wait_until(Window('test_window - popup').exists)
def test_popup_window_exists(self):
self.assertTrue(Window('test_window - popup').exists())
def setUp(self):
# Don't let super go_to(...):
pass
@classmethod
def tearDownClass(cls):
popup_window_handle = Window("test_window - popup").handle
main_window_handle = Window("test_window").handle
get_driver().switch_to.window(popup_window_handle)
get_driver().close()
get_driver().switch_to.window(main_window_handle)
super().tearDownClass()
```
#### File: tests/api/test_write.py
```python
from helium import write, TextField
from tests.api import BrowserAT
class WriteTest(BrowserAT):
def get_page(self):
return 'test_write.html'
def test_write(self):
write("Hello World!")
self.assertEqual(
"Hello World!", TextField('Autofocus text field').value
)
def test_write_into(self):
write("Hi there!", into='Normal text field')
self.assertEqual("Hi there!", TextField('Normal text field').value)
def test_write_into_text_field_to_right_of(self):
write("Hi there!", into=(TextField(to_right_of='Normal text field')))
self.assertEqual("Hi there!", TextField('Normal text field').value)
``` |
{
"source": "JosephWardDotTech/SeleniumBase",
"score": 3
} |
#### File: SeleniumBase/examples/ip_cow_test.py
```python
import time
from seleniumbase import BaseCase
class MyTestClass(BaseCase):
def test_ip_cow(self):
self.open('https://www.ipcow.com/')
ip_data = self.get_text("div.box")
print("\n\n*** IP and Browser Data: ***")
print(ip_data)
print("\nThe browser will close automatically in 7 seconds...")
time.sleep(7)
```
#### File: SeleniumBase/examples/test_download_files.py
```python
import math
from seleniumbase import BaseCase
class DownloadTests(BaseCase):
def test_download_files(self):
self.open("https://pypi.org/project/seleniumbase/#files")
pkg_header = self.get_text("h1.package-header__name").strip()
pkg_name = pkg_header.replace(" ", "-")
whl_file = pkg_name + "-py2.py3-none-any.whl"
tar_gz_file = pkg_name + ".tar.gz"
# Click the links to download the files into: "./downloaded_files/"
# (If using Safari, IE, or Chromium Guest Mode: download directly.)
# (The default Downloads Folder can't be changed when using those.)
whl_selector = 'div#files a[href$="%s"]' % whl_file
tar_selector = 'div#files a[href$="%s"]' % tar_gz_file
if self.browser == "safari" or self.browser == "ie" or (
self.is_chromium() and self.guest_mode and not self.headless):
whl_href = self.get_attribute(whl_selector, "href")
tar_href = self.get_attribute(tar_selector, "href")
self.download_file(whl_href)
self.download_file(tar_href)
else:
self.click(whl_selector)
self.click(tar_selector)
# Verify that the downloaded files appear in the [Downloads Folder]
# (This only guarantees that the exact file name is in the folder.)
# (This does not guarantee that the downloaded files are complete.)
# (Later, we'll check that the files were downloaded successfully.)
self.assert_downloaded_file(whl_file)
self.assert_downloaded_file(tar_gz_file)
self.sleep(1) # Add more time to make sure downloads have completed
# Get the actual size of the downloaded files (in bytes)
whl_path = self.get_path_of_downloaded_file(whl_file)
with open(whl_path, 'rb') as f:
whl_file_bytes = len(f.read())
print("\n%s | Download = %s bytes." % (whl_file, whl_file_bytes))
tar_gz_path = self.get_path_of_downloaded_file(tar_gz_file)
with open(tar_gz_path, 'rb') as f:
tar_gz_file_bytes = len(f.read())
print("%s | Download = %s bytes." % (tar_gz_file, tar_gz_file_bytes))
# Check to make sure the downloaded files are not empty or too small
self.assert_true(whl_file_bytes > 5000)
self.assert_true(tar_gz_file_bytes > 5000)
# Get file sizes in kB to compare actual values with displayed values
whl_file_kb = whl_file_bytes / 1000.0
whl_line = self.get_text("tbody tr:nth-of-type(1) th")
whl_displayed_kb = float(whl_line.split("(")[1].split(" ")[0])
tar_gz_file_kb = tar_gz_file_bytes / 1000.0
tar_gz_line = self.get_text("tbody tr:nth-of-type(2) th")
tar_gz_displayed_kb = float(tar_gz_line.split("(")[1].split(" ")[0])
# Verify downloaded files are the correct size (account for rounding)
self.assert_true(abs(
math.floor(whl_file_kb) - math.floor(whl_displayed_kb)) < 2)
self.assert_true(abs(
math.floor(tar_gz_file_kb) - math.floor(tar_gz_displayed_kb)) < 2)
``` |
{
"source": "josephwccheng/basketball_game_video_analytics",
"score": 3
} |
#### File: josephwccheng/basketball_game_video_analytics/main.py
```python
from dotenv import load_dotenv
import os
import logging
import youtube_extractor
import json
def setup_tokens():
expected_env_vars = [
'YOUTUBE_DOWNLOAD',
'VIDEO_PATH'
]
# Refresh environment variables since load_dotenv doesn't override them if already set
for env_var in expected_env_vars:
if os.getenv(env_var) is not None:
del os.environ[env_var]
logging.info('Refreshed environment variable: {}'.format(env_var))
# Load environment variables saved in .env file
load_dotenv()
for env_var in expected_env_vars:
if os.getenv(env_var) is None:
raise ValueError(
'.env file is missing or {} has not been defined in the .env file'.format(
env_var)
)
# Step 1. Downloading Videos (Only Required once per video)
def download_videos():
with open('games.json') as json_file:
data = json.load(json_file)
ytExtractor = youtube_extractor.YoutubeExtractor()
ytExtractor.download(data[0]['url'])
def main():
setup_tokens()
# Step 1: Download videos only if the youtube download environment variable is true
if os.getenv('YOUTUBE_DOWNLOAD').lower() == "true":
download_videos()
else:
print("skipping video download")
# Step 2: Read the video from the .data folder
video_lists = os.listdir(os.getenv('VIDEO_PATH'))
if len(video_lists) < 0:
print("no videos in .data folder")
else:
print(f"first video is: {video_lists[0]}")
main()
``` |
{
"source": "josephwhite13/netmiko",
"score": 3
} |
#### File: netmiko/ericsson/ericsson_ipos.py
```python
import re
from netmiko.base_connection import BaseConnection
class EricssonIposSSH(BaseConnection):
def check_enable_mode(self, check_string="#"):
"""
Check if in enable mode. Return boolean.
"""
return super().check_enable_mode(check_string=check_string)
def enable(self, cmd="enable 15", pattern="ssword", re_flags=re.IGNORECASE):
"""Enter enable mode."""
return super().enable(cmd=cmd, pattern=pattern, re_flags=re_flags)
def disable_paging(self, command="terminal length 0", delay_factor=1):
"""Disable paging default to a Cisco CLI method.
:param command: Device command to disable pagination of output
:type command: str
:param delay_factor: See __init__: global_delay_factor
:type delay_factor: int
"""
return super().disable_paging(command=command, delay_factor=delay_factor)
def set_terminal_width(self, command="terminal width 512", delay_factor=1):
"""CLI terminals try to automatically adjust the line based on the width of the terminal.
This causes the output to get distorted when accessed programmatically.
Set terminal width to 511 which works on a broad set of devices.
:param command: Command string to send to the device
:type command: str
:param delay_factor: See __init__: global_delay_factor
:type delay_factor: int
"""
return super().set_terminal_width(command=command, delay_factor=delay_factor)
def send_config_set(self, config_commands=None, exit_config_mode=False, **kwargs):
"""Ericsson IPOS requires you not exit from configuration mode."""
return super().send_config_set(
config_commands=config_commands, exit_config_mode=exit_config_mode, **kwargs
)
def exit_enable_mode(self, exit_command="disable"):
"""
Exits enable (privileged exec) mode.
"""
return super().exit_enable_mode(exit_command=exit_command)
def check_config_mode(self, check_string=")#", pattern=""):
"""
Checks if the device is in configuration mode or not.
"""
return super().check_config_mode(check_string=check_string, pattern=pattern)
def config_mode(self, config_command="configure", pattern=""):
"""
Enter into configuration mode on remote device.
"""
if not pattern:
pattern = re.escape(self.base_prompt[:16])
return super().config_mode(config_command=config_command, pattern=pattern)
def exit_config_mode(self, exit_config="end", pattern="#"):
"""
Exit from configuration mode.
Ercisson output :
end Commit configuration changes and return to exec mode
"""
return super().exit_config_mode(exit_config=exit_config, pattern=pattern)
def save_config(self, cmd="save config", confirm=True, confirm_response="yes"):
"""Saves configuration"""
if confirm:
output = self.send_command_timing(
command_string=cmd, strip_prompt=False, strip_command=False
)
if confirm_response:
output += self.send_command_timing(
confirm_response, strip_prompt=False, strip_command=False
)
else:
output += self.send_command_timing(
self.RETURN, strip_prompt=False, strip_command=False
)
else:
output = self.send_command(
command_string=cmd, strip_prompt=False, strip_command=False
)
return output
def commit(self, confirm=False, confirm_delay=None, comment="", delay_factor=1):
"""
Commit the candidate configuration.
Commit the entered configuration. Raise an error and return the failure
if the commit fails.
Automatically enters configuration mode
"""
delay_factor = self.select_delay_factor(delay_factor)
if confirm_delay and not confirm:
raise ValueError(
"Invalid arguments supplied to commit method both confirm and check"
)
command_string = "commit"
commit_marker = "Transaction committed"
if confirm:
if confirm_delay:
command_string = f"commit confirmed {confirm_delay}"
else:
command_string = "commit confirmed"
commit_marker = "Commit confirmed ,it will be rolled back within"
if comment:
if '"' in comment:
raise ValueError("Invalid comment contains double quote")
comment = f'"{comment}"'
command_string += f" comment {comment}"
output = self.config_mode()
output += self.send_command_expect(
command_string,
strip_prompt=False,
strip_command=False,
delay_factor=delay_factor,
)
if commit_marker not in output:
raise ValueError(f"Commit failed with the following errors:\n\n{output}")
self.exit_config_mode()
return output
```
#### File: netmiko/extreme/extreme_slx_ssh.py
```python
import time
from netmiko.cisco_base_connection import CiscoSSHConnection
class ExtremeSlxSSH(CiscoSSHConnection):
"""Support for Extreme SLX."""
def enable(self, *args, **kwargs):
"""No enable mode on Extreme SLX."""
pass
def exit_enable_mode(self, *args, **kwargs):
"""No enable mode on Extreme Slx."""
pass
def special_login_handler(self, delay_factor=1):
"""Adding a delay after login."""
delay_factor = self.select_delay_factor(delay_factor)
self.write_channel(self.RETURN)
time.sleep(1 * delay_factor)
def save_config(
self,
cmd="copy running-config startup-config",
confirm=True,
confirm_response="y",
):
"""Save Config for Extreme SLX."""
return super().save_config(
cmd=cmd, confirm=confirm, confirm_response=confirm_response
)
```
#### File: netmiko/extreme/extreme_wing_ssh.py
```python
import time
from netmiko.cisco_base_connection import CiscoSSHConnection
class ExtremeWingSSH(CiscoSSHConnection):
"""Extreme WiNG support."""
def session_preparation(self):
"""Disable paging and set Max term width"""
self._test_channel_read(pattern=r">|#")
self.set_base_prompt()
self.set_terminal_width(command="terminal width 512", pattern="terminal")
self.disable_paging(command="no page")
time.sleep(0.3 * self.global_delay_factor)
self.clear_buffer()
```
#### File: netmiko/f5/f5_tmsh_ssh.py
```python
import time
from netmiko.base_connection import BaseConnection
class F5TmshSSH(BaseConnection):
def session_preparation(self):
"""Prepare the session after the connection has been established."""
self._test_channel_read()
self.set_base_prompt()
self.tmsh_mode()
self.set_base_prompt()
self._config_mode = False
cmd = 'run /util bash -c "stty cols 255"'
self.set_terminal_width(command=cmd, pattern="run")
self.disable_paging(
command="modify cli preference pager disabled display-threshold 0"
)
self.clear_buffer()
def tmsh_mode(self, delay_factor=1):
"""tmsh command is equivalent to config command on F5."""
delay_factor = self.select_delay_factor(delay_factor)
self.clear_buffer()
command = f"{self.RETURN}tmsh{self.RETURN}"
self.write_channel(command)
time.sleep(1 * delay_factor)
self.clear_buffer()
return None
def check_config_mode(self, check_string="", pattern=""):
"""Checks if the device is in configuration mode or not."""
return True
def config_mode(self, config_command=""):
"""No config mode for F5 devices."""
return ""
def exit_config_mode(self, exit_config=""):
"""No config mode for F5 devices."""
return ""
```
#### File: netmiko/netmiko/scp_handler.py
```python
import re
import os
import hashlib
import scp
import platform
class SCPConn(object):
"""
Establish a secure copy channel to the remote network device.
Must close the SCP connection to get the file to write to the remote filesystem
"""
def __init__(self, ssh_conn, socket_timeout=10.0, progress=None, progress4=None):
self.ssh_ctl_chan = ssh_conn
self.socket_timeout = socket_timeout
self.progress = progress
self.progress4 = progress4
self.establish_scp_conn()
def establish_scp_conn(self):
"""Establish the secure copy connection."""
ssh_connect_params = self.ssh_ctl_chan._connect_params_dict()
self.scp_conn = self.ssh_ctl_chan._build_ssh_client()
self.scp_conn.connect(**ssh_connect_params)
self.scp_client = scp.SCPClient(
self.scp_conn.get_transport(),
socket_timeout=self.socket_timeout,
progress=self.progress,
progress4=self.progress4,
)
def scp_transfer_file(self, source_file, dest_file):
"""Put file using SCP (for backwards compatibility)."""
self.scp_client.put(source_file, dest_file)
def scp_get_file(self, source_file, dest_file):
"""Get file using SCP."""
platform = self.ssh_ctl_chan.device_type
if "cisco_ios" in platform or "cisco_xe" in platform:
try:
self.scp_client.get(source_file, dest_file)
except EOFError:
pass
else:
self.scp_client.get(source_file, dest_file)
def scp_put_file(self, source_file, dest_file):
"""Put file using SCP."""
self.scp_client.put(source_file, dest_file)
def close(self):
"""Close the SCP connection."""
self.scp_conn.close()
class BaseFileTransfer(object):
"""Class to manage SCP file transfer and associated SSH control channel."""
def __init__(
self,
ssh_conn,
source_file,
dest_file,
file_system=None,
direction="put",
socket_timeout=10.0,
progress=None,
progress4=None,
hash_supported=True,
):
self.ssh_ctl_chan = ssh_conn
self.source_file = source_file
self.dest_file = dest_file
self.direction = direction
self.socket_timeout = socket_timeout
self.progress = progress
self.progress4 = progress4
auto_flag = (
"cisco_ios" in ssh_conn.device_type
or "cisco_xe" in ssh_conn.device_type
or "cisco_xr" in ssh_conn.device_type
)
if not file_system:
if auto_flag:
self.file_system = self.ssh_ctl_chan._autodetect_fs()
else:
raise ValueError("Destination file system not specified")
else:
self.file_system = file_system
if direction == "put":
self.source_md5 = self.file_md5(source_file) if hash_supported else None
self.file_size = os.stat(source_file).st_size
elif direction == "get":
self.source_md5 = (
self.remote_md5(remote_file=source_file) if hash_supported else None
)
self.file_size = self.remote_file_size(remote_file=source_file)
else:
raise ValueError("Invalid direction specified")
def __enter__(self):
"""Context manager setup"""
self.establish_scp_conn()
return self
def __exit__(self, exc_type, exc_value, traceback):
"""Context manager cleanup."""
self.close_scp_chan()
def establish_scp_conn(self):
"""Establish SCP connection."""
self.scp_conn = SCPConn(
self.ssh_ctl_chan,
socket_timeout=self.socket_timeout,
progress=self.progress,
progress4=self.progress4,
)
def close_scp_chan(self):
"""Close the SCP connection to the remote network device."""
self.scp_conn.close()
self.scp_conn = None
def remote_space_available(self, search_pattern=r"(\d+) \w+ free"):
"""Return space available on remote device."""
remote_cmd = f"dir {self.file_system}"
remote_output = self.ssh_ctl_chan.send_command_expect(remote_cmd)
match = re.search(search_pattern, remote_output)
if "kbytes" in match.group(0) or "Kbytes" in match.group(0):
return int(match.group(1)) * 1000
return int(match.group(1))
def _remote_space_available_unix(self, search_pattern=""):
"""Return space available on *nix system (BSD/Linux)."""
self.ssh_ctl_chan._enter_shell()
remote_cmd = f"/bin/df -k {self.file_system}"
remote_output = self.ssh_ctl_chan.send_command(
remote_cmd, expect_string=r"[\$#]"
)
# Try to ensure parsing is correct:
# Filesystem 1K-blocks Used Avail Capacity Mounted on
# /dev/bo0s3f 1264808 16376 1147248 1% /cf/var
remote_output = remote_output.strip()
output_lines = remote_output.splitlines()
# First line is the header; second is the actual file system info
header_line = output_lines[0]
filesystem_line = output_lines[1]
if "Filesystem" not in header_line or "Avail" not in header_line.split()[3]:
# Filesystem 1K-blocks Used Avail Capacity Mounted on
msg = "Parsing error, unexpected output from {}:\n{}".format(
remote_cmd, remote_output
)
raise ValueError(msg)
space_available = filesystem_line.split()[3]
if not re.search(r"^\d+$", space_available):
msg = "Parsing error, unexpected output from {}:\n{}".format(
remote_cmd, remote_output
)
raise ValueError(msg)
self.ssh_ctl_chan._return_cli()
return int(space_available) * 1024
def local_space_available(self):
"""Return space available on local filesystem."""
if platform.system() == "Windows":
import ctypes
free_bytes = ctypes.c_ulonglong(0)
ctypes.windll.kernel32.GetDiskFreeSpaceExW(
ctypes.c_wchar_p("."), None, None, ctypes.pointer(free_bytes)
)
return free_bytes.value
else:
destination_stats = os.statvfs(".")
return destination_stats.f_bsize * destination_stats.f_bavail
def verify_space_available(self, search_pattern=r"(\d+) \w+ free"):
"""Verify sufficient space is available on destination file system (return boolean)."""
if self.direction == "put":
space_avail = self.remote_space_available(search_pattern=search_pattern)
elif self.direction == "get":
space_avail = self.local_space_available()
if space_avail > self.file_size:
return True
return False
def check_file_exists(self, remote_cmd=""):
"""Check if the dest_file already exists on the file system (return boolean)."""
if self.direction == "put":
if not remote_cmd:
remote_cmd = f"dir {self.file_system}/{self.dest_file}"
remote_out = self.ssh_ctl_chan.send_command_expect(remote_cmd)
search_string = r"Directory of .*{0}".format(self.dest_file)
if (
"Error opening" in remote_out
or "No such file or directory" in remote_out
or "Path does not exist" in remote_out
):
return False
elif re.search(search_string, remote_out, flags=re.DOTALL):
return True
else:
raise ValueError("Unexpected output from check_file_exists")
elif self.direction == "get":
return os.path.exists(self.dest_file)
def _check_file_exists_unix(self, remote_cmd=""):
"""Check if the dest_file already exists on the file system (return boolean)."""
if self.direction == "put":
self.ssh_ctl_chan._enter_shell()
remote_cmd = f"ls {self.file_system}"
remote_out = self.ssh_ctl_chan.send_command(
remote_cmd, expect_string=r"[\$#]"
)
self.ssh_ctl_chan._return_cli()
return self.dest_file in remote_out
elif self.direction == "get":
return os.path.exists(self.dest_file)
def remote_file_size(self, remote_cmd="", remote_file=None):
"""Get the file size of the remote file."""
if remote_file is None:
if self.direction == "put":
remote_file = self.dest_file
elif self.direction == "get":
remote_file = self.source_file
if not remote_cmd:
remote_cmd = f"dir {self.file_system}/{remote_file}"
remote_out = self.ssh_ctl_chan.send_command(remote_cmd)
# Strip out "Directory of flash:/filename line
remote_out = re.split(r"Directory of .*", remote_out)
remote_out = "".join(remote_out)
# Match line containing file name
escape_file_name = re.escape(remote_file)
pattern = r".*({}).*".format(escape_file_name)
match = re.search(pattern, remote_out)
if match:
line = match.group(0)
# Format will be 26 -rw- 6738 Jul 30 2016 19:49:50 -07:00 filename
file_size = line.split()[2]
else:
raise IOError("Unable to parse 'dir' output in remote_file_size method")
if "Error opening" in remote_out or "No such file or directory" in remote_out:
raise IOError("Unable to find file on remote system")
else:
return int(file_size)
def _remote_file_size_unix(self, remote_cmd="", remote_file=None):
"""Get the file size of the remote file."""
if remote_file is None:
if self.direction == "put":
remote_file = self.dest_file
elif self.direction == "get":
remote_file = self.source_file
remote_file = f"{self.file_system}/{remote_file}"
if not remote_cmd:
remote_cmd = f"ls -l {remote_file}"
self.ssh_ctl_chan._enter_shell()
remote_out = self.ssh_ctl_chan.send_command(remote_cmd, expect_string=r"[\$#]")
self.ssh_ctl_chan._return_cli()
if "No such file or directory" in remote_out:
raise IOError("Unable to find file on remote system")
escape_file_name = re.escape(remote_file)
pattern = r"^.* ({}).*$".format(escape_file_name)
match = re.search(pattern, remote_out, flags=re.M)
if match:
# Format: -rw-r--r-- 1 pyclass wheel 12 Nov 5 19:07 /var/tmp/test3.txt
line = match.group(0)
file_size = line.split()[4]
return int(file_size)
raise ValueError(
"Search pattern not found for remote file size during SCP transfer."
)
def file_md5(self, file_name, add_newline=False):
"""Compute MD5 hash of file.
add_newline is needed to support Cisco IOS MD5 calculation which expects the newline in
the string
Args:
file_name: name of file to get md5 digest of
add_newline: add newline to end of file contents or not
"""
file_hash = hashlib.md5()
with open(file_name, "rb") as f:
while True:
file_contents = f.read(512)
if not file_contents:
if add_newline:
file_contents + b"\n"
break
file_hash.update(file_contents)
return file_hash.hexdigest()
@staticmethod
def process_md5(md5_output, pattern=r"=\s+(\S+)"):
"""
Process the string to retrieve the MD5 hash
Output from Cisco IOS (ASA is similar)
.MD5 of flash:file_name Done!
verify /md5 (flash:file_name) = 410db2a7015eaa42b1fe71f1bf3d59a2
"""
match = re.search(pattern, md5_output)
if match:
return match.group(1)
else:
raise ValueError(f"Invalid output from MD5 command: {md5_output}")
def compare_md5(self):
"""Compare md5 of file on network device to md5 of local file."""
if self.direction == "put":
remote_md5 = self.remote_md5()
return self.source_md5 == remote_md5
elif self.direction == "get":
local_md5 = self.file_md5(self.dest_file)
return self.source_md5 == local_md5
def remote_md5(self, base_cmd="verify /md5", remote_file=None):
"""Calculate remote MD5 and returns the hash.
This command can be CPU intensive on the remote device.
"""
if remote_file is None:
if self.direction == "put":
remote_file = self.dest_file
elif self.direction == "get":
remote_file = self.source_file
remote_md5_cmd = f"{base_cmd} {self.file_system}/{remote_file}"
dest_md5 = self.ssh_ctl_chan.send_command(remote_md5_cmd, max_loops=1500)
dest_md5 = self.process_md5(dest_md5)
return dest_md5
def transfer_file(self):
"""SCP transfer file."""
if self.direction == "put":
self.put_file()
elif self.direction == "get":
self.get_file()
def get_file(self):
"""SCP copy the file from the remote device to local system."""
source_file = f"{self.file_system}/{self.source_file}"
self.scp_conn.scp_get_file(source_file, self.dest_file)
self.scp_conn.close()
def put_file(self):
"""SCP copy the file from the local system to the remote device."""
destination = f"{self.file_system}/{self.dest_file}"
self.scp_conn.scp_transfer_file(self.source_file, destination)
# Must close the SCP connection to get the file written (flush)
self.scp_conn.close()
def verify_file(self):
"""Verify the file has been transferred correctly."""
return self.compare_md5()
def enable_scp(self, cmd=None):
"""
Enable SCP on remote device.
Defaults to Cisco IOS command
"""
if cmd is None:
cmd = ["ip scp server enable"]
elif not hasattr(cmd, "__iter__"):
cmd = [cmd]
self.ssh_ctl_chan.send_config_set(cmd)
def disable_scp(self, cmd=None):
"""
Disable SCP on remote device.
Defaults to Cisco IOS command
"""
if cmd is None:
cmd = ["no ip scp server enable"]
elif not hasattr(cmd, "__iter__"):
cmd = [cmd]
self.ssh_ctl_chan.send_config_set(cmd)
```
#### File: netmiko/tests/test_cisco_ios_serial.py
```python
from netmiko import ConnectHandler
import serial
def main():
"""
This will run an command via serial on an cisco ios switch and so
serial cable must be attached to the device
"""
serialhandle = {
"device_type": "cisco_ios_serial",
"port": "USB Serial", # can be COM<number> or any line you can get from
# serial.tools.list_ports.comports()
"username": "<username>",
"password": "<password>",
"secret": "<secret>",
"serial_settings": { # this are the default values
"baudrate": 9600,
"bytesize": serial.EIGHTBITS,
"parity": serial.PARITY_NONE,
"stopbits": serial.STOPBITS_ONE,
},
}
net_connect = ConnectHandler(**serialhandle)
net_connect.enable()
output = net_connect.send_command("show run")
net_connect.disconnect()
print(output)
if __name__ == "__main__":
main()
``` |
{
"source": "josephwhittington/make_json_from_images",
"score": 4
} |
#### File: josephwhittington/make_json_from_images/make_json.py
```python
import os
import json
from sys import argv
def main(argv):
parsed_args = parse_args(argv)
file_name_prop = parsed_args['first']
extentions_to_check = ['.jpg', '.jpeg', '.png']
dir_path = os.path.dirname(os.path.realpath(__file__))
json_obj = []
for file_name in os.listdir(dir_path):
if any(ext in file_name for ext in extentions_to_check):
working_obj = {file_name_prop: file_name}
if parsed_args['additional']:
for arg in parsed_args['additional']:
working_obj[arg] = ""
json_obj.append(working_obj)
make_json(json_obj, parsed_args['out'])
# saves the output to a json file
def make_json(obj, out):
f = open(out, 'w+')
f.write(json.dumps(obj))
f.close()
# parses the arguments into usable blocks
def parse_args(args):
flags = ['--first', '--additional', '--output']
configs = {
'first': '',
'out': '',
'additional': []
}
# loop through the arguments and look for flags
for arg in args:
if any(flag in arg for flag in flags):
if '--first' in arg:
configs['first'] = arg[arg.index('=')+1:]
elif '--output' in arg:
configs['out'] = arg[arg.index('=')+1:] + '.json'
elif '--additional' in arg:
additional_args = arg[arg.index('=')+1:].split(',')
configs['additional'] = additional_args
return configs
# Standard convention that allows the code to be run only when invoked or if the file is running as a program rather than an import
if __name__ == '__main__':
main(argv)
``` |
{
"source": "josephwillard/hy",
"score": 2
} |
#### File: hy/hy/importer.py
```python
from __future__ import absolute_import
import sys
import os
import ast
import inspect
import pkgutil
import re
import io
import types
import tempfile
import importlib
import __future__
from functools import partial
from hy.errors import HyTypeError
from hy.compiler import hy_compile
from hy.lex import tokenize, LexException
from hy.models import HyExpression, HySymbol
from hy._compat import string_types, PY3
hy_ast_compile_flags = (__future__.CO_FUTURE_DIVISION |
__future__.CO_FUTURE_PRINT_FUNCTION)
def ast_compile(ast, filename, mode):
"""Compile AST.
Parameters
----------
ast : instance of `ast.AST`
filename : str
Filename used for run-time error messages
mode: str
`compile` mode parameter
Returns
-------
out : instance of `types.CodeType`
"""
return compile(ast, filename, mode, hy_ast_compile_flags)
def hy_parse(source):
"""Parse a Hy source string.
Parameters
----------
source: string
Source code to parse.
Returns
-------
out : instance of `types.CodeType`
"""
source = re.sub(r'\A#!.*', '', source)
return HyExpression([HySymbol("do")] + tokenize(source + "\n"))
def hy_eval(hytree, namespace=None, module_name=None, ast_callback=None):
"""Evaluates a quoted expression and returns the value.
The optional second and third arguments specify the dictionary of globals
to use and the module name. The globals dictionary defaults to ``(local)``
and the module name defaults to the name of the current module.
Examples
--------
=> (eval '(print "Hello World"))
"Hello World"
If you want to evaluate a string, use ``read-str`` to convert it to a
form first:
=> (eval (read-str "(+ 1 1)"))
2
Parameters
----------
hytree: a Hy expression tree
Source code to parse.
namespace: dict, optional
Namespace in which to evaluate the Hy tree. Defaults to the calling
frame.
module_name: str, optional
Name of the module to which the Hy tree is assigned. Defaults to
the calling frame's module, if any, and '__eval__' otherwise.
ast_callback: callable, optional
A callback that is passed the Hy compiled tree and resulting
expression object, in that order, after compilation but before
evaluation.
Returns
-------
out : Result of evaluating the Hy compiled tree.
"""
if namespace is None:
frame = inspect.stack()[1][0]
namespace = inspect.getargvalues(frame).locals
if module_name is None:
m = inspect.getmodule(inspect.stack()[1][0])
module_name = '__eval__' if m is None else m.__name__
if not isinstance(module_name, string_types):
raise TypeError("Module name must be a string")
_ast, expr = hy_compile(hytree, module_name, get_expr=True)
# Spoof the positions in the generated ast...
for node in ast.walk(_ast):
node.lineno = 1
node.col_offset = 1
for node in ast.walk(expr):
node.lineno = 1
node.col_offset = 1
if ast_callback:
ast_callback(_ast, expr)
if not isinstance(namespace, dict):
raise TypeError("Globals must be a dictionary")
# Two-step eval: eval() the body of the exec call
eval(ast_compile(_ast, "<eval_body>", "exec"), namespace)
# Then eval the expression context and return that
return eval(ast_compile(expr, "<eval>", "eval"), namespace)
def cache_from_source(source_path):
"""Get the cached bytecode file name for a given source file name.
This function's name is set to mirror Python 3.x's
`importlib.util.cache_from_source`, which is also used when available.
Parameters
----------
source_path : str
Path of the source file
Returns
-------
out : str
Path of the corresponding bytecode file that may--or may
not--actually exist.
"""
if PY3:
return importlib.util.cache_from_source(source_path)
else:
# If source_path has a file extension, replace it with ".pyc".
# Otherwise, just append ".pyc".
d, f = os.path.split(source_path)
return os.path.join(d, re.sub(r"(?:\.[^.]+)?\Z", ".pyc", f))
def _hy_code_from_file(filename, loader_type=None):
"""Use PEP-302 loader to produce code for a given Hy source file."""
full_fname = os.path.abspath(filename)
fname_path, fname_file = os.path.split(full_fname)
modname = os.path.splitext(fname_file)[0]
sys.path.insert(0, fname_path)
try:
if loader_type is None:
loader = pkgutil.get_loader(modname)
else:
loader = loader_type(modname, full_fname)
code = loader.get_code(modname)
finally:
sys.path.pop(0)
return code
def _get_code_from_file(run_name, fname=None,
hy_src_check=lambda x: x.endswith('.hy')):
"""A patch of `runpy._get_code_from_file` that will also run and cache Hy
code.
"""
if fname is None and run_name is not None:
fname = run_name
# Check for bytecode first. (This is what the `runpy` version does!)
with open(fname, "rb") as f:
code = pkgutil.read_code(f)
if code is None:
if hy_src_check(fname):
code = _hy_code_from_file(fname, loader_type=HyLoader)
else:
# Try normal source
with open(fname, "rb") as f:
# This code differs from `runpy`'s only in that we
# force decoding into UTF-8.
source = f.read().decode('utf-8')
code = compile(source, fname, 'exec')
return (code, fname) if PY3 else code
if PY3:
importlib.machinery.SOURCE_SUFFIXES.insert(0, '.hy')
_py_source_to_code = importlib.machinery.SourceFileLoader.source_to_code
def _could_be_hy_src(filename):
return (os.path.isfile(filename) and
(filename.endswith('.hy') or
not any(filename.endswith(ext)
for ext in importlib.machinery.SOURCE_SUFFIXES[1:])))
def _hy_source_to_code(self, data, path, _optimize=-1):
if _could_be_hy_src(path):
source = data.decode("utf-8")
try:
hy_tree = hy_parse(source)
data = hy_compile(hy_tree, self.name)
except (HyTypeError, LexException) as e:
if e.source is None:
e.source = source
e.filename = path
raise
return _py_source_to_code(self, data, path, _optimize=_optimize)
importlib.machinery.SourceFileLoader.source_to_code = _hy_source_to_code
# This is actually needed; otherwise, pre-created finders assigned to the
# current dir (i.e. `''`) in `sys.path` will not catch absolute imports of
# directory-local modules!
sys.path_importer_cache.clear()
# Do this one just in case?
importlib.invalidate_caches()
# XXX: These and the 2.7 counterparts below aren't truly cross-compliant.
# They're useful for testing, though.
HyImporter = importlib.machinery.FileFinder
HyLoader = importlib.machinery.SourceFileLoader
else:
import imp
import py_compile
import marshal
import struct
import traceback
from pkgutil import ImpImporter, ImpLoader
def _could_be_hy_src(filename):
return (filename.endswith('.hy') or
(os.path.isfile(filename) and
not any(filename.endswith(s[0]) for s in imp.get_suffixes())))
class HyLoader(ImpLoader, object):
def __init__(self, fullname, filename, fileobj=None, etc=None):
"""This constructor is designed for some compatibility with
SourceFileLoader."""
if etc is None and filename is not None:
if _could_be_hy_src(filename):
etc = ('.hy', 'U', imp.PY_SOURCE)
if fileobj is None:
fileobj = io.open(filename, 'rU', encoding='utf-8')
super(HyLoader, self).__init__(fullname, fileobj, filename, etc)
def exec_module(self, module, fullname=None):
fullname = self._fix_name(fullname)
code = self.get_code(fullname)
eval(code, module.__dict__)
def load_module(self, fullname=None):
"""Same as `pkgutil.ImpLoader`, with an extra check for Hy
source"""
fullname = self._fix_name(fullname)
ext_type = self.etc[0]
mod_type = self.etc[2]
mod = None
pkg_path = os.path.join(self.filename, '__init__.hy')
if ext_type == '.hy' or (
mod_type == imp.PKG_DIRECTORY and
os.path.isfile(pkg_path)):
was_in_sys = fullname in sys.modules
if was_in_sys:
mod = sys.modules[fullname]
else:
mod = sys.modules.setdefault(
fullname, imp.new_module(fullname))
# TODO: Should we set these only when not in `sys.modules`?
if mod_type == imp.PKG_DIRECTORY:
mod.__file__ = pkg_path
mod.__path__ = [self.filename]
mod.__package__ = fullname
else:
# mod.__path__ = self.filename
mod.__file__ = self.get_filename(fullname)
mod.__package__ = '.'.join(fullname.split('.')[:-1])
mod.__name__ = fullname
try:
self.exec_module(mod, fullname=fullname)
except Exception:
# Follow Python 2.7 logic and only remove a new, bad
# module; otherwise, leave the old--and presumably
# good--module in there.
if not was_in_sys:
del sys.modules[fullname]
raise
if mod is None:
self._reopen()
try:
mod = imp.load_module(fullname, self.file, self.filename,
self.etc)
finally:
if self.file:
self.file.close()
mod.__loader__ = self
return mod
def _reopen(self):
"""Same as `pkgutil.ImpLoader`, with an extra check for Hy
source"""
if self.file and self.file.closed:
ext_type = self.etc[0]
if ext_type == '.hy':
self.file = io.open(self.filename, 'rU', encoding='utf-8')
else:
super(HyLoader, self)._reopen()
def byte_compile_hy(self, fullname=None):
fullname = self._fix_name(fullname)
if fullname is None:
fullname = self.fullname
try:
hy_source = self.get_source(fullname)
hy_tree = hy_parse(hy_source)
hy_ast = hy_compile(hy_tree, fullname)
code = compile(hy_ast, self.filename, 'exec',
hy_ast_compile_flags)
except (HyTypeError, LexException) as e:
if e.source is None:
e.source = hy_source
e.filename = self.filename
raise
if not sys.dont_write_bytecode:
try:
hyc_compile(code)
except IOError:
pass
return code
def get_code(self, fullname=None):
"""Same as `pkgutil.ImpLoader`, with an extra check for Hy
source"""
fullname = self._fix_name(fullname)
ext_type = self.etc[0]
if ext_type == '.hy':
# Looks like we have to manually check for--and update--
# the bytecode.
t_py = long(os.stat(self.filename).st_mtime)
pyc_file = cache_from_source(self.filename)
if os.path.isfile(pyc_file):
t_pyc = long(os.stat(pyc_file).st_mtime)
if t_pyc is not None and t_pyc >= t_py:
with open(pyc_file, 'rb') as f:
if f.read(4) == imp.get_magic():
t = struct.unpack('<I', f.read(4))[0]
if t == t_py:
self.code = marshal.load(f)
if self.code is None:
# There's no existing bytecode, or bytecode timestamp
# is older than the source file's.
self.code = self.byte_compile_hy(fullname)
if self.code is None:
super(HyLoader, self).get_code(fullname=fullname)
return self.code
def _get_delegate(self):
return HyImporter(self.filename).find_module('__init__')
class HyImporter(ImpImporter, object):
def __init__(self, path=None):
# We need to be strict about the types of files this importer will
# handle. To start, if the path is not the current directory in
# (represented by '' in `sys.path`), then it must be a supported
# file type or a directory. If it isn't, this importer is not
# suitable: throw an exception.
if path == '' or os.path.isdir(path) or (
os.path.isfile(path) and path.endswith('.hy')):
self.path = path
else:
raise ImportError('Invalid path: {}'.format(path))
def find_loader(self, fullname):
return self.find_module(fullname, path=None)
def find_module(self, fullname, path=None):
subname = fullname.split(".")[-1]
if subname != fullname and self.path is None:
return None
if self.path is None:
path = None
else:
path = [os.path.realpath(self.path)]
fileobj, file_path, etc = None, None, None
# The following are excerpts from the later pure Python
# implementations of the `imp` module (e.g. in Python 3.6).
if path is None:
path = sys.path
for entry in path:
if (os.path.isfile(entry) and subname == '__main__' and
entry.endswith('.hy')):
file_path = entry
fileobj = io.open(file_path, 'rU', encoding='utf-8')
etc = ('.hy', 'U', imp.PY_SOURCE)
break
else:
file_path = os.path.join(entry, subname)
path_init = os.path.join(file_path, '__init__.hy')
if os.path.isfile(path_init):
fileobj = None
etc = ('', '', imp.PKG_DIRECTORY)
break
file_path = file_path + '.hy'
if os.path.isfile(file_path):
fileobj = io.open(file_path, 'rU', encoding='utf-8')
etc = ('.hy', 'U', imp.PY_SOURCE)
break
else:
try:
fileobj, file_path, etc = imp.find_module(subname, path)
except (ImportError, IOError):
return None
return HyLoader(fullname, file_path, fileobj, etc)
sys.path_hooks.append(HyImporter)
sys.path_importer_cache.clear()
_py_compile_compile = py_compile.compile
def hyc_compile(file_or_code, cfile=None, dfile=None, doraise=False):
"""Write a Hy file, or code object, to pyc.
This is a patched version of Python 2.7's `py_compile.compile`.
Also, it tries its best to write the bytecode file atomically.
Parameters
----------
file_or_code : str or instance of `types.CodeType`
A filename for a Hy or Python source file or its corresponding code
object.
cfile : str, optional
The filename to use for the bytecode file. If `None`, use the
standard bytecode filename determined by `cache_from_source`.
dfile : str, optional
The filename to use for compile-time errors.
doraise : bool, default False
If `True` raise compilation exceptions; otherwise, ignore them.
Returns
-------
out : str
The resulting bytecode file name. Python 3.x returns this, but
Python 2.7 doesn't; this function does for convenience.
"""
if isinstance(file_or_code, types.CodeType):
codeobject = file_or_code
filename = codeobject.co_filename
else:
filename = file_or_code
with open(filename, 'rb') as f:
source_str = f.read().decode('utf-8')
try:
flags = None
if _could_be_hy_src(filename):
hy_tree = hy_parse(source_str)
source = hy_compile(hy_tree, '<hyc_compile>')
flags = hy_ast_compile_flags
codeobject = compile(source, dfile or filename, 'exec', flags)
except Exception as err:
if isinstance(err, (HyTypeError, LexException)) and err.source is None:
err.source = source_str
err.filename = filename
py_exc = py_compile.PyCompileError(err.__class__, err,
dfile or filename)
if doraise:
raise py_exc
else:
traceback.print_exc()
return
timestamp = long(os.stat(filename).st_mtime)
if cfile is None:
cfile = cache_from_source(filename)
f = tempfile.NamedTemporaryFile('wb', dir=os.path.split(cfile)[0],
delete=False)
try:
f.write('\0\0\0\0')
f.write(struct.pack('<I', timestamp))
f.write(marshal.dumps(codeobject))
f.flush()
f.seek(0, 0)
f.write(imp.get_magic())
# Make sure it's written to disk.
f.flush()
os.fsync(f.fileno())
f.close()
# Rename won't replace an existing dest on Windows.
if os.name == 'nt' and os.path.isfile(cfile):
os.unlink(cfile)
os.rename(f.name, cfile)
except OSError:
try:
os.unlink(f.name)
except OSError:
pass
return cfile
py_compile.compile = hyc_compile
# We create a separate version of runpy, "runhy", that prefers Hy source over
# Python.
runhy = importlib.import_module('runpy')
runhy._get_code_from_file = partial(_get_code_from_file,
hy_src_check=_could_be_hy_src)
del sys.modules['runpy']
runpy = importlib.import_module('runpy')
_runpy_get_code_from_file = runpy._get_code_from_file
runpy._get_code_from_file = _get_code_from_file
``` |
{
"source": "josephwillard/logpy",
"score": 3
} |
#### File: logpy/tests/test_cons.py
```python
from functools import reduce
from itertools import chain, cycle
from collections import Iterable, OrderedDict
from kanren.cons import cons, ConsPair, car, cdr, is_cons, is_null
def assert_all_equal(*tests):
def _equal(x, y):
assert x, y
return y
reduce(_equal, tests)
def test_cons():
assert_all_equal(cons('a', None), cons('a', []), ['a'])
assert cons('a', ()) == ('a',)
assert cons('a', []) == ['a']
assert cons(None, 'a').car is None
assert cons(None, 'a').cdr == 'a'
assert cons((), 'a') == ConsPair((), 'a')
assert cons([], 'a') == ConsPair([], 'a')
assert cons('a', None) == ['a']
assert cons('a', ['b', 'c']) == ['a', 'b', 'c']
assert cons('a', ('b', 'c')) == ('a', 'b', 'c')
assert type(cons(('a', 1), {'b': 2})) == ConsPair
assert cons(('a', 1), OrderedDict({'b': 2})) == OrderedDict([('a', 1),
('b', 2)])
assert cons(['a', 'b'], 'c') == ConsPair(['a', 'b'], 'c')
assert cons(('a', 'b'), 'c') == ConsPair(('a', 'b'), 'c')
assert cons(['a', 'b'], ['c', 'd']) == [['a', 'b'], 'c', 'd']
assert cons(('a', 'b'), ['c', 'd']) == [('a', 'b'), 'c', 'd']
assert cons(['a', 'b'], ('c', 'd')) == (['a', 'b'], 'c', 'd')
assert type(cons(1, iter([3, 4]))) == chain
assert list(cons([1, 2], iter([3, 4]))) == [[1, 2], 3, 4]
assert list(cons(1, iter([2, 3]))) == [1, 2, 3]
assert cons('a', cons('b', 'c')) == cons(['a', 'b'], 'c')
assert cons(cons('a', 'b'), cons('c', 'd')) == cons([cons('a', 'b'), 'c'],
'd')
def test_car_cdr():
assert car(cons('a', 'b')) == 'a'
z = car(cons(iter([]), 1))
expected = iter([])
assert type(z) == type(expected)
assert list(z) == list(expected)
z = cdr(cons(1, iter([])))
expected = iter([])
assert isinstance(z, Iterable)
assert list(z) == list(expected)
assert car(iter([1])) == 1
assert list(cdr(iter([1]))) == []
assert list(cons(car(iter([1])), cdr(iter([1])))) == [1]
assert list(cdr(iter([1, 2, 3]))) == [2, 3]
assert car(cons(['a', 'b'], 'a')) == ['a', 'b']
assert car(cons(('a', 'b'), 'a')) == ('a', 'b')
assert cdr(cons('a', 'b')) == 'b'
assert cdr(cons('a', ())) == ()
assert cdr(cons('a', [])) == []
assert cdr(cons('a', ('b',))) == ('b',)
assert cdr(cons('a', ['b'])) == ['b']
assert car(OrderedDict([(1, 2), (3, 4)])) == (1, 2)
assert cdr(OrderedDict([(1, 2), (3, 4)])) == [(3, 4)]
assert cdr(OrderedDict({(1): 2})) == []
def test_is_cons():
assert is_cons(cons(1, 'hi'))
assert is_cons((1, 2))
assert is_cons([1, 2])
assert is_cons(OrderedDict({(1): 2}))
assert is_cons(iter([1]))
assert not is_cons({})
assert not is_cons(set())
assert not is_cons(set([1, 2]))
assert not is_cons('hi')
assert not is_cons('hi')
assert not is_cons(1)
assert not is_cons(iter([]))
assert not is_cons(OrderedDict({}))
assert not is_cons(())
assert not is_cons([])
def test_is_null():
assert is_null(None)
assert is_null([])
assert is_null(tuple())
assert is_null(OrderedDict())
assert is_null(iter([]))
assert not is_null(object)
assert not is_null([1])
assert not is_null((1,))
assert not is_null(OrderedDict([(1, 2)]))
assert not is_null(iter([1]))
assert not is_null(cycle([5]))
``` |
{
"source": "josephwillard/symbolic-pymc",
"score": 2
} |
#### File: relations/theano/distributions.py
```python
from unification import var
from etuples import etuple
from kanren import conde, eq
from kanren.facts import fact, Relation
from . import constant_neq
from .. import concat
from ...theano.meta import mt
derived_dist = Relation("derived_dist")
stable_dist = Relation("stable_dist")
generalized_gamma_dist = Relation("generalized_gamma_dist")
uniform_mt = mt.UniformRV(var(), var(), size=var(), rng=var(), name=var())
normal_mt = mt.NormalRV(var(), var(), size=var(), rng=var(), name=var())
cauchy_mt = mt.CauchyRV(var(), var(), size=var(), rng=var(), name=var())
halfcauchy_mt = mt.HalfCauchyRV(var(), var(), size=var(), rng=var(), name=var())
gamma_mt = mt.GammaRV(var(), var(), size=var(), rng=var(), name=var())
exponential_mt = mt.ExponentialRV(var(), size=var(), rng=var(), name=var())
# TODO: Add constraints for different variations of this. Also, consider a
# check for exact equality of the two dists, or simply normalize/canonicalize
# the graph first.
fact(
derived_dist,
mt.true_div(
mt.NormalRV(0.0, 1.0, size=var("_ratio_norm_size"), rng=var("_ratio_norm_rng"), name=var()),
mt.NormalRV(0.0, 1.0, size=var("_ratio_norm_size"), rng=var(), name=var()),
),
mt.CauchyRV(0.0, 1.0, size=var("_ratio_norm_size"), rng=var("_ratio_norm_rng")),
)
# TODO:
# fact(stable_dist,
# normal_mt, ('StableRV',
# 2., 0.,
# normal_mt.owner.inputs[1],
# normal_mt.owner.inputs[1]))
# fact(stable_dist,
# cauchy_mt, ('StableRV',
# 1., 0.,
# cauchy_mt.owner.inputs[1],
# cauchy_mt.owner.inputs[1]))
# TODO: Weibull, Gamma, Exponential, Half-normal
# fact(generalized_gamma_dist,
# None,
# None)
def scale_loc_transform(in_expr, out_expr):
"""Create relations for lifting and sinking scale and location parameters of distributions.
I.e. f(a + b*x) -> a + b * f(x)
For example, `in_expr`: f(a + b*x) == `out_expr`: a + b * f(x).
TODO: Match larger distribution families and perform transforms from there.
XXX: PyMC3 rescaling issue (?) doesn't allow us to take the more general
approach, which involves separate scale and location rewrites.
"""
# Scale and location transform expression "pattern" for a Normal term.
normal_mt = mt.NormalRV(var(), var(), size=var(), rng=var(), name=var())
n_name_lv = normal_mt.name
n_mean_lv, n_sd_lv, n_size_lv, n_rng_lv = normal_mt.owner.inputs
offset_name_mt = var()
rct_norm_offset_mt = etuple(
mt.add,
n_mean_lv,
etuple(
mt.mul,
n_sd_lv,
mt.NormalRV(0.0, 1.0, size=n_size_lv, rng=n_rng_lv, name=offset_name_mt),
),
)
# Scale and location transform expression "pattern" for a Cauchy term.
cauchy_mt = mt.CauchyRV(var(), var(), size=var(), rng=var(), name=var())
c_name_lv = cauchy_mt.name
c_mean_lv, c_beta_lv, c_size_lv, c_rng_lv = cauchy_mt.owner.inputs
rct_cauchy_offset_mt = etuple(
mt.add,
c_mean_lv,
etuple(
mt.mul,
c_beta_lv,
mt.CauchyRV(0.0, 1.0, size=c_size_lv, rng=c_rng_lv, name=offset_name_mt),
),
)
# TODO:
# uniform_mt = mt.UniformRV(var(), var(), size=var(), rng=var(), name=var())
# u_name_lv = uniform_mt.name
# u_a_lv, u_b_lv, u_size_lv, u_rng_lv = uniform_mt.owner.inputs
# rct_uniform_scale_mt = etuple(
# mt.mul,
# u_b_lv,
# mt.UniformRV(0.0, 1.0, size=u_size_lv, rng=u_rng_lv, name=offset_name_mt),
# )
# rct_uniform_loc_mt = etuple(mt.add, u_c_lv,
# mt.UniformRV(u_a_lv, u_b_lv,
# size=u_size_lv,
# rng=u_rng_lv,
# name=offset_name_mt))
rels = conde(
[
eq(in_expr, normal_mt),
constant_neq(n_sd_lv, 1),
constant_neq(n_mean_lv, 0),
eq(out_expr, rct_norm_offset_mt),
concat(n_name_lv, "_offset", offset_name_mt),
],
[
eq(in_expr, cauchy_mt),
constant_neq(c_beta_lv, 1),
# TODO: Add a positivity constraint for the scale.
constant_neq(c_mean_lv, 0),
eq(out_expr, rct_cauchy_offset_mt),
concat(c_name_lv, "_offset", offset_name_mt),
],
# TODO:
# [eq(in_expr, uniform_mt),
# lall(
# constant_eq(u_a_lv, 0),
# eq(out_expr, rct_uniform_scale_mt),
# concat(u_name_lv, "_scale", offset_name_mt),
# )],
)
return rels
```
#### File: symbolic_pymc/tensorflow/meta.py
```python
import types
import inspect
import tensorflow as tf
import tensorflow_probability as tfp
from inspect import Parameter, Signature
from collections import OrderedDict
from collections.abc import Sequence
from functools import partial
from cachetools import cachedmethod, Cache
from unification import Var, var, isvar
from google.protobuf.message import Message
from tensorflow.python.framework import (
tensor_util,
op_def_registry,
op_def_library,
tensor_shape,
ops,
)
from tensorflow.core.framework.op_def_pb2 import OpDef
from tensorflow.core.framework.node_def_pb2 import NodeDef
from tensorflow_probability import distributions as tfd
from ..meta import (
MetaSymbol,
MetaSymbolType,
MetaOp,
MetaVariable,
MetaReificationError,
meta_reify_iter,
_metatize,
metatize,
)
from .. import meta
from ..utils import HashableNDArray
tf_metatize_cache = Cache(50)
class MetaOpDefLibrary(object):
"""A singleton-like object that holds correspondences between TF Python API functions and the `OpDef`s they construct.
It provides a map of `OpDef` names (lower-cased) to the Python API
functions in `tensorflow.raw_ops`, as well as `inspect.Signature` objects
for said functions so that default values and lists of arguments (keywords
included) can be more easily used.
"""
lower_op_name_to_raw = {
op_name.lower(): op_name
for op_name in dir(tf.raw_ops)
if callable(getattr(tf.raw_ops, op_name))
}
opdef_signatures = {}
def __init__(self):
#
# We need this in order to construct "Const" tensors directly, since
# the "value" attr in a meta `NodeDef` is just a NumPy array and not
# the `TensorProto` expected by `raw_ops.Const`.
#
def mt_const(value, dtype, name=None):
return tf.raw_ops.Const(
value=tensor_util.make_tensor_proto(value), dtype=dtype, name=name
)
opdef = op_def_registry.get("Const")
self.opdef_signatures[opdef.name] = self.make_opdef_sig(opdef, mt_const)
@classmethod
def get_op_info(cls, opdef):
"""Return the TF Python API function signature for a given `OpDef`.
Parameter
---------
opdef: str or `OpDef` object (meta or base)
"""
if isinstance(opdef, str):
opdef_name = opdef
opdef = op_def_registry.get(opdef_name)
else:
opdef_name = opdef.name
opdef_sig = cls.opdef_signatures.get(opdef_name, None)
if opdef_sig is None and opdef is not None:
opdef_func = getattr(tf.raw_ops, opdef.name, None)
opdef_sig = cls.make_opdef_sig(opdef, opdef_func)
cls.opdef_signatures[opdef.name] = opdef_sig
return opdef_sig
@classmethod
def make_opdef_sig(cls, opdef, opdef_py_func=None):
"""Create a `Signature` object for an `OpDef`.
Annotations are include so that one can partially verify arguments.
"""
if opdef_py_func:
#
# We assume we're dealing with a function from `tf.raw_ops`.
# Those functions have only the necessary `input_arg`s and `attr`
# inputs as arguments.
#
opdef_func_sig = Signature.from_callable(opdef_py_func)
params = opdef_func_sig.parameters
else:
#
# We're crafting an `Operation` at a low-level via `apply_op`
# (like the functions in `tf.raw_ops` do)
#
input_args = OrderedDict([(a.name, a.type or a.type_attr) for a in opdef.input_arg])
attrs = OrderedDict([(a.name, a) for a in opdef.attr])
params = OrderedDict()
opdef_py_func = partial(op_def_library.apply_op, opdef.name)
for i_name, i_type in input_args.items():
p = Parameter(i_name, Parameter.POSITIONAL_OR_KEYWORD, annotation=i_type)
params[i_name] = p
# These are the ambiguities we're attempting to overcome
# with the `tf.raw_ops` functions above.
for a_name, a_value in attrs.items():
# TODO: Check
if a_value.type == "type":
# This is a type value that will most likely be inferred
# from/by the inputs.
# TODO: We could check for an `allowed_values` attribute.
continue
default_value = Parameter.empty
# if a_value.HasField('default_value'):
# # TODO: Check `a_value.type` and extract Python value.
# default_value = a_value.default_value
p = Parameter(
a_name,
Parameter.POSITIONAL_OR_KEYWORD,
default=default_value,
annotation=a_value.type,
)
params[a_name] = p
# Always assume that a name can be specified.
if "name" not in params:
params["name"] = Parameter("name", Parameter.POSITIONAL_OR_KEYWORD, default=None)
opdef_sig = Signature(
params.values(), return_annotation=[(o.name, o.type_attr) for o in opdef.output_arg]
)
return opdef_sig, opdef_py_func
op_def_lib = MetaOpDefLibrary()
def _metatize_tf_object(obj):
try:
tf_obj = tf.convert_to_tensor(obj)
except (TypeError, ValueError):
raise ValueError(f"Error converting {obj} to a TensorFlow tensor.")
return _metatize(tf_obj)
def load_dispatcher():
"""Set/override dispatcher to default to TF objects."""
from tensorflow.python.ops.gen_linalg_ops import _SvdOutput
def _metatize_tf_svd(obj):
"""Turn a TensorFlow `Svd` object/tuple into a standard tuple."""
return meta._metatize(tuple(obj))
meta._metatize.add((_SvdOutput,), _metatize_tf_svd)
def _metatize_tf_eager(obj):
"""Catch eager tensor metatize issues early."""
raise AttributeError(
f"TensorFlow Operation not available; "
"try recreating the object with eager-mode disabled"
" (e.g. within `tensorflow.python.eager.context.graph_mode`)"
)
meta._metatize.add((ops.EagerTensor,), _metatize_tf_eager)
meta._metatize.add((object,), _metatize_tf_object)
meta._metatize.add((HashableNDArray,), _metatize_tf_object)
for new_cls in TFlowMetaSymbol.base_subclasses():
meta._metatize.add((new_cls.base,), new_cls._metatize)
meta._metatize.add((TFlowMetaOpDef.base,), TFlowMetaOpDef._metatize)
# Apply TF-specific `kanren` settings
from ..relations import tensorflow
return meta._metatize
class TFlowMetaSymbol(MetaSymbol):
__slots__ = ()
@classmethod
def _metatize(cls, obj):
res = super()._metatize(obj)
res.validate_objs()
return res
def validate_objs(self):
# If there is no base object associated with the inputs, then we can't
# trust a base object associated with this object (e.g. for the case in
# which metatize altered a property in an input).
try:
rands = self.rands
except NotImplementedError:
return
for prop in rands:
if isinstance(prop, MetaSymbol) and prop.obj is None:
self.reset()
break
class OpDefFactoryType(MetaSymbolType):
__opdefs__ = {}
def __call__(cls, obj=None):
if obj is not None:
obj_hash = obj.name # obj.SerializeToString()
opdef = cls.__opdefs__.get(obj_hash, None)
else:
obj_hash = None
opdef = None
if opdef is None:
opdef = super().__call__(obj=obj)
if obj is not None:
cls.__opdefs__[obj_hash] = opdef
return opdef
class TFlowMetaOpDef(TFlowMetaSymbol, metaclass=OpDefFactoryType):
"""A meta `OpDef`.
This is like an `Op` node in Theano.
Some useful info/links:
- https://stackoverflow.com/questions/41147734/looking-for-source-code-of-from-gen-nn-ops-in-tensorflow/41149557#41149557
- A better way to view an `OpDef`:
>>> from google.protobuf import json_format
>>> print(json_format.MessageToJson(opdef))
- If you want to use an `OpDef` to construct a node, see
`op_def_library.apply_op`.
"""
base = OpDef
__slots__ = ("_attr",)
def __init__(self, obj=None):
super().__init__(obj=obj)
self._attr = {o.name: o for o in obj.attr}
@property
def attr(self):
return self._attr
def __str__(self):
return f"{self.__class__.__name__}({self.obj.name})"
def _repr_pretty_(self, p, cycle):
if cycle:
p.text(f"{self.__class__.__name__}(...)")
else:
with p.group(2, f"{self.__class__.__name__}(", ")"):
p.breakable(sep="")
p.text(self.obj.name)
def __eq__(self, other):
if self is other:
return True
if not (type(self) == type(other)):
return False
assert self.base == other.base
return self.obj.name == other.obj.name
def __hash__(self):
return hash((self.base, self.obj.name))
def reify(self):
return self.obj
class TFlowMetaNodeDef(TFlowMetaSymbol):
"""A meta `NodeDef`.
NOTE: We're ignoring `node_def.input`; it's just an unnecessary hassle.
"""
base = NodeDef
__slots__ = ["op", "name", "attr", "_frozen_attr"]
@classmethod
def _metatize(cls, obj):
res = super()._metatize(obj)
if obj.op != "Const" and "node_attrs" in meta._lvar_defaults_enabled:
res.attr = var()
if "names" in meta._lvar_defaults_enabled:
res.name = var()
return res
@classmethod
def _protobuf_convert(cls, k, v):
"""Convert a small subset of protobuf objects.
FYI: This would cover a lot at once (but not any meta object
conversions):
from google.protobuf.json_format import MessageToDict
MessageToDict(obj, use_integers_for_enums=True)
"""
if k == "shape":
return metatize(tensor_shape.as_shape(v.shape))
elif k == "dtype":
return tf.as_dtype(v.type).name
elif k == "T":
return tf.as_dtype(v.type).name
elif k == "value":
return tensor_util.MakeNdarray(v.tensor).view(HashableNDArray)
else:
# Consider only the narrow case where a single object is converted
# (e.g. a Python builtin type under `v.b`, `v.f`, etc.)
v = tuple(v for k, v in v.ListFields())
if len(v) == 1:
return v[0]
else:
raise TypeError(f"Could not convert {k}")
def __init__(self, op, name, attr, obj=None):
super().__init__(obj=obj)
self.op = metatize(op)
assert name is not None
self.name = name if isvar(name) else str(name)
if not isvar(attr):
opdef_sig, _ = op_def_lib.get_op_info(self.op)
_attr = dict()
for k, v in attr.items():
if isinstance(v, Message):
try:
v = self._protobuf_convert(k, v)
except TypeError:
v = var()
_attr[k] = v
self.attr = _attr
else:
self.attr = attr
@property
def frozen_attr(self):
if getattr(self, "_frozen_attr", None) is not None:
return self._frozen_attr
if isvar(self.attr):
self._frozen_attr = self.attr
else:
self._frozen_attr = frozenset(self.attr.items())
return self._frozen_attr
def __eq__(self, other):
if self is other:
return True
if not (type(self) == type(other)):
return False
if (
self.op == other.op
and self.name == other.name
and self.frozen_attr == other.frozen_attr
):
return True
return False
def __hash__(self):
return hash((hash(self.op), hash(self.name), hash(self.frozen_attr)))
class TFlowMetaOp(TFlowMetaSymbol):
"""A meta `Operation`.
This is like an `Apply` node in Theano.
TODO: This whole thing should probably be a "NodeDef" class?
"""
base = tf.Operation
__slots__ = ["op_def", "node_def", "inputs", "_name", "_type", "_outputs", "_default_output"]
@classmethod
def _metatize(cls, obj):
"""Reformat inputs to match the OpDef."""
new_input = ops._reconstruct_sequence_inputs(obj.op_def, obj.inputs, obj.node_def.attr)
new_args = [
getattr(obj, s) if s != "inputs" else new_input for s in getattr(cls, "__props__", [])
]
res = cls(*new_args, obj=obj)
res.validate_objs()
return res
def __init__(self, op_def, node_def, inputs, outputs=None, obj=None):
"""Create a TensorFlow meta `Operation`.
The real signature of `tf.Operation.__init__` includes the graph
object, so we can't really the signature directly. This is part of the
reason why we have `TFlowMetaOpFactory.__call__` and
`TFlowMetaTensor.operator` + `TFlowMetaTensor.inputs` that do not
directly use `__all_props__`/`TFlowMetaTensor.rands` and construct the
objects directly.
"""
super().__init__(obj=obj)
if isinstance(op_def, str):
op_def = op_def_registry.get(op_def)
self.op_def = metatize(op_def)
self.node_def = metatize(node_def)
if isvar(inputs):
self.inputs = inputs
else:
# Inputs are supposed to be immutable, so we're able to convert
# lists to tuples.
def _convert_inputs(arg, nested):
if nested and isinstance(arg, list):
arg = tuple(metatize(i) for i in arg)
else:
arg = metatize(arg)
return arg
if not isvar(self.op_def):
self.inputs = tuple(
_convert_inputs(i, hasattr(info, "number_attr"))
for i, info in zip(inputs, self.op_def.obj.input_arg)
)
else:
self.inputs = tuple(_convert_inputs(i, False) for i in inputs)
if outputs is not None:
if isvar(outputs):
self._outputs = outputs
else:
self._outputs = tuple(metatize(o) for o in outputs)
@property
def type(self):
if getattr(self, "_type", None) is not None:
return self._type
if isvar(self.op_def):
self._type = var()
else:
self._type = self.op_def.obj.name
return self._type
@property
def name(self):
if getattr(self, "_name", None) is not None:
return self._name
if isvar(self.node_def):
self._name = var()
else:
self._name = self.node_def.name
return self._name
@property
def outputs(self):
"""Compute outputs for this meta `Operation`."""
if getattr(self, "_outputs", None) is not None:
return self._outputs
if isvar(self.op_def):
self._outputs = var()
else:
if isvar(self.node_def) or not isinstance(getattr(self.node_def, "attr", None), dict):
node_attr = {}
else:
node_attr = self.node_def.attr
operator = TFlowMetaOperator(self.op_def, self.node_def)
if isvar(self.inputs):
inputs = (None,) * len(operator._apply_func_sig.parameters)
apply_defaults = False
else:
inputs = self.inputs
apply_defaults = True
apply_arguments = operator.input_args(
*inputs, apply_defaults=apply_defaults, **node_attr
)
# TODO: The above could probably be simplified into a
# NodeDef-from-input-args function.
out_types_mt = operator.output_meta_types(inputs=apply_arguments)
mt_outs = tuple(
o_type(self, i, o_dtype) for i, (o_type, o_dtype) in enumerate(out_types_mt)
)
self._outputs = mt_outs
return self._outputs
@property
def default_output(self):
"""Return the default output for this `Operation`.
TODO: It might be worth considering a direct approach, and not one that
requires the generation of all meta outputs.
"""
if getattr(self, "_default_output", None):
return self._default_output
mt_outs = self.outputs
if isinstance(mt_outs, Sequence) and len(mt_outs) == 1:
out_var = mt_outs[0]
else:
out_var = mt_outs
self._default_output = out_var
return self._default_output
def reify(self):
if self.obj and not isinstance(self.obj, Var):
return self.obj
if isvar(self.inputs):
return self
op_inputs, op_inputs_unreified = meta_reify_iter(self.inputs)
node_attr = getattr(self.node_def, "attr", None)
if node_attr is None or isvar(node_attr):
return self
operator = TFlowMetaOperator(self.op_def, self.node_def)
op_attrs, op_attrs_unreified = meta_reify_iter(
# Only use NodeDef attrs that appear in the OpDef's call signature.
# Other NodeDef attrs, like dtype and shape, can be computed.
{k: v for k, v in node_attr.items() if k in operator._apply_func_sig.parameters}
)
if not (op_inputs_unreified or op_attrs_unreified or isvar(self.name)):
#
# An operation with this name might already exist in the graph
#
try:
existing_op = ops.get_default_graph().get_operation_by_name(self.name)
except KeyError:
#
# There is no such `Operation`, so we attempt to create it
#
apply_arguments = operator.input_args(*op_inputs, name=self.name, **op_attrs)
tf_out = operator._apply_func(**apply_arguments)
op_tf = tf_out.op
else:
#
# An `Operation` with this name exists, let's make sure it's
# equivalent to this meta `Operation`
#
if self != mt(existing_op):
raise MetaReificationError(
f"An Operation with the name {self.name}"
" already exists in the graph and is not"
" equal to this meta object."
)
op_tf = existing_op
assert op_tf is not None
self._obj = op_tf
return self.obj
return self
class TFlowMetaTensor(TFlowMetaSymbol, MetaVariable):
base = tf.Tensor
__slots__ = ("op", "value_index", "dtype", "_shape", "_name", "_operator")
@classmethod
@cachedmethod(lambda cls: tf_metatize_cache)
def _metatize(cls, obj):
"""Cache Tensors specifically."""
return super()._metatize(obj)
def __init__(self, op, value_index, dtype, obj=None):
self.op = metatize(op)
# TODO: Sync this value with `op.node_def.attr['dtype']` and/or
# `op.node_def.attr['T']`?
self.dtype = dtype
self.value_index = value_index
super().__init__(obj=obj)
@property
def shape(self):
if getattr(self, "_shape", None):
return self._shape
if self.obj is not None and not isinstance(self.obj, Var):
self._shape = metatize(self.obj.shape)
else:
self._shape = TFlowMetaTensorShape(var())
return self._shape
@property
def name(self):
if getattr(self, "_name", None):
return self._name
if isinstance(getattr(self.op, "name", None), str) and not isvar(self.value_index):
name = f"{self.op.name}:{self.value_index}"
else:
name = var()
if self.obj is not None and not isinstance(self.obj, Var):
assert name == self.obj.name
self._name = name
return self._name
@property
def base_operator(self):
if getattr(self, "_operator", None):
return self._operator
if isvar(self.op) or (not isvar(self.op.inputs) and len(self.op.inputs) == 0):
raise NotImplementedError(f"{self} does not have a base_operator.")
self._operator = TFlowMetaOperator(self.op.op_def, self.op.node_def)
return self._operator
@property
def base_arguments(self):
# TODO: In keeping with our desire to return logic variables in cases
# where params aren't given/inferred, we could return something like
# `cons(var(), var())` here (although that wouldn't be necessarily
# imply that the result is a proper list/tuple).
if isvar(self.op) or (not isvar(self.op.inputs) and len(self.op.inputs) == 0):
raise NotImplementedError(f"{self} does not have base arguments.")
return self.op.inputs
def reify(self):
if self.obj is not None and not isinstance(self.obj, Var):
return self.obj
if (not self.op) or isvar(self.op):
op_res = super().reify()
return op_res
tf_op = self.op.reify()
if not MetaSymbol.is_meta(tf_op):
if not MetaSymbol.is_meta(self.value_index):
tf_res = tf_op.outputs[self.value_index]
elif len(tf_op.outputs) == 1:
tf_res = tf_op.outputs[0]
else:
# TODO: Anything else we should/can do here?
return self
self._obj = tf_res
return tf_res
return self
def __truediv__(self, y):
# TODO: TF performs some dtype logic (using `dtype.base_dtype`) and casting here.
return mt.realdiv(self, y, name="truediv")
def __rtruediv__(self, x):
# TODO: TF performs some dtype logic (using `dtype.base_dtype`) and casting here.
return mt.realdiv(x, self, name="truediv")
def __add__(self, y):
# TODO: If `self.dtype == tf.dtypes.string`, use `mt.add`
return mt.addv2(self, y, name="add")
def __radd__(self, x):
# TODO: If `x.dtype == tf.dtypes.string`, use `mt.add`
return mt.addv2(x, self, name="add")
def __sub__(self, y):
return mt.sub(self, y, name="sub")
def __rsub__(self, x):
return mt.sub(x, self, name="sub")
def __mul__(self, y):
return mt.mul(self, y, name="mul")
def __rmul__(self, x):
return mt.mul(x, self, name="mul")
def __abs__(self):
return mt.abs(self, name="Abs")
def __pow__(self, y):
return mt.pow(self, y, name="pow")
def __neg__(self):
return mt.neg(self, name="Neg")
class TFlowMetaTensorShape(TFlowMetaSymbol):
base = tf.TensorShape
__slots__ = ("dims", "_rank")
def __init__(self, dims, obj=None):
super().__init__(obj=obj)
self.dims = dims
if self.dims is not None and not isvar(self.dims):
# TODO: Just like the comment in `TFlowMetaTensor.inputs`,
# `self.dims` should be something like `cons(var(), ...)` and not a
# straight logic variable.
self.dims = tuple(tensor_shape.as_dimension(d).value for d in self.dims)
@property
def rank(self):
if getattr(self, "_rank", None):
return self._rank
if self.dims is not None and not isvar(self.dims):
rank = len(self.dims)
else:
# TODO: How do we represent/tie in len(var())?
rank = var()
self._rank = rank
return self._rank
@property
def ndims(self):
return self.rank
def as_list(self):
if self.dims is not None and not isvar(self.dims):
return list(self.dims)
else:
return self.dims
def __hash__(self):
return hash((self.base, self.dims))
class TFlowMetaOperator(TFlowMetaSymbol, MetaOp):
"""A class that implements the notion of an operator on top of TensorFlow's OpDef and NodeDef objects.
With this abstraction, we can better model operators by distinguishing
parameterized operators and their respective parameter values from the
operator's inputs, which may have similar properties across the entire
family of operators (i.e. across all parameter values).
For example, addition is commutative in its arguments, so modeling addition
as an operator parameterized on dtypes and/or names, we may want to
preserve the distinction of the operators inputs and its parameterized
values so that we can implement commutativity exclusively on the
non-dtype/name inputs.
"""
base = None
__slots__ = ("op_def", "node_def", "_apply_func_sig", "_apply_func")
@classmethod
def get_metaopdef(cls, name):
"""Obtain a MetaOpDef for a given string name.
This is more flexible because it ignores things like string case
(when the non-`raw_ops` name differs from the TF user-level API).
"""
raw_op_name = op_def_lib.lower_op_name_to_raw.get(name.lower(), name)
op_def = op_def_registry.get(raw_op_name)
if op_def is not None:
return TFlowMetaOpDef(obj=op_def)
def __init__(self, op_def, node_def=None, obj=None):
assert obj is None
super().__init__(None)
self.op_def = op_def
if isinstance(self.op_def, str):
self.op_def = self.get_metaopdef(self.op_def)
if self.op_def is None:
raise ValueError(f"Could not find an OpDef for {op_def}")
if isvar(self.op_def):
self._apply_func_sig, self._apply_func = None, None
else:
self._apply_func_sig, self._apply_func = op_def_lib.get_op_info(self.op_def.obj)
self.node_def = node_def
def reify(self):
return self
def output_meta_types(self, inputs=None):
"""Return a list of tuples containing object types and corresponding dtypes for the outputs of this OpDef.
This work is done in
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/framework/op_def_library.py#L337.
Would be very nice if the dtype inference and/or `NodeDef` generation
was abstracted-out from that function.
"""
if isvar(self.op_def):
return None
type_map = {k: None for k, v in self.op_def.attr.items() if v.type == "type"}
input_type_map = {i.name: i.type_attr for i in self.op_def.obj.input_arg}
if inputs:
# Infer/verify types from inputs
for i_name, i_value in inputs.items():
type_name = input_type_map.get(i_name, None)
if type_name is None:
continue
i_dtype = getattr(i_value, "dtype", None)
dtype = type_map[type_name]
if i_dtype is not None and not isvar(i_dtype):
if dtype is None or isvar(dtype):
# The input dtype is more informative, so use it.
type_map[type_name] = i_dtype
else:
# They're both informative and should be the same
assert dtype == i_dtype
def infer_output_types(o):
"""Get dtypes for specific outputs using known dtype info and API inputs."""
# Get the explicit type from a `NodeDef` attribute.
type_name = o.type_attr
# Existing dtype value
dtype = type_map[type_name]
# New value
_dtype = None
# The information could be in the `NodeDef`
if isinstance(getattr(self.node_def, "attr", None), dict):
_dtype = self.node_def.attr.get(type_name)
# It could also be in the inputs (i.e. when called via the API
# route)
# TODO: If it's in the inputs, we should just create the
# corresponding `NodeDef`.
if inputs and type_name in inputs:
_dtype = inputs.get(type_name)
if _dtype is None:
_dtype = var()
if not isvar(_dtype):
try:
_dtype = tf.dtypes.as_dtype(_dtype).base_dtype
except TypeError:
_dtype = var()
# Make sure dtypes derived from `NodeDef` info and API inputs are
# consistent.
if dtype is None or isvar(dtype):
# Newly inferred dtype is at least as informative as the
# current one
dtype = _dtype
type_map[type_name] = dtype
elif _dtype is None or isvar(_dtype):
# Newly inferred dtype is less informative
pass
else:
assert dtype == _dtype
return (TFlowMetaTensor, dtype)
# TODO: We could update missing dtype information in input meta
# types and `NodeDef`s.
# TODO: We also have permissible dtype information from objects in the
# array `self.obj.attr` under the field `allowed_values`.
return tuple(infer_output_types(o) for o in self.op_def.obj.output_arg)
def input_args(self, *args, apply_defaults=True, **kwargs):
"""Make args and kwargs conform to an OpDef's "apply function" arguments.
In order to do this, we effectively need to map `OpDef` and `NodeDef`
values to `tf.raw_ops.*` function arguments (i.e. the reverse of what
`op_def_library._apply_op_helper` does).
Returns an `OrderedDict`.
"""
kwargs = OrderedDict(
(k, v)
for k, v in kwargs.items()
# Filter out the optional keyword arguments so they we only pass
# expected arguments to the `OpDef`'s apply function.
if k in self._apply_func_sig.parameters
)
op_args = self._apply_func_sig.bind_partial(*args, **kwargs)
if apply_defaults:
op_args.apply_defaults()
return op_args.arguments
def __api_call__(self, *args, **kwargs):
"""Create the meta object(s) using the TF Python API's operator functions.
Each meta `OpDef` is associated with a TF Python function
(`self._apply_func`) that is used to construct its `Operation`s.
See `TFlowMetaTensor.operator` and `TFlowMetaTensor.operator`.
"""
apply_arguments = self.input_args(*args, **kwargs)
if not meta._auto_reification_disabled:
op_args, op_args_unreified = meta_reify_iter(apply_arguments)
else:
op_args, op_args_unreified = apply_arguments, True
if not op_args_unreified:
res_var = None
# name = op_args.get("name", None)
#
# if name is not None:
# #
# # An operation with this name might already exist in the graph
# #
#
# from tensorflow.python.framework import ops
#
# try:
# this_op = ops.get_default_graph().get_operation_by_name(name)
# except KeyError:
# pass
# else:
# # TODO: Make sure the existing `Operation` matches our arguments
# assert this_op.type == self.op_def.obj.name
#
# this_op = mt(this_op)
# op_inputs, op_node_def = self.op_args_to_operation_inputs(op_args)
# assert op_inputs == this_op.inputs
# assert op_node_def == this_op.node_def
# res_var = this_op.default_output
if res_var is None:
#
# We create the `Operation` in the graph
#
tf_out = self._apply_func(**op_args)
# Ensure that the original meta objects will be available
# for use in the `metatize` that follows
tf_metatize_cache.update(
{
k: v
for k, v in zip(op_args.values(), apply_arguments.values())
if isinstance(k, tf.Tensor)
}
)
res_var = metatize(tf_out)
if "names" in meta._lvar_defaults_enabled:
# This should also reset the NodeDef's `obj`
res_var.op.node_def.name = var()
res_var.op.reset()
res_var.reset()
if "node_attrs" in meta._lvar_defaults_enabled:
# This should also reset the NodeDef's `obj`
res_var.op.node_def.attr = var()
res_var.op.reset()
res_var.reset()
else:
#
# If we're here, we have to create the meta objects manually.
#
op_input_args, node_def = self.op_args_to_operation_inputs(apply_arguments)
op_mt = TFlowMetaOp(self.op_def, node_def, op_input_args)
res_var = op_mt.default_output
return res_var
def op_args_to_operation_inputs(self, apply_arguments):
"""Map an `OpDef`'s "apply function" arguments to `Operation` inputs and a meta `NodeDef`."""
if isvar(self.op_def):
return None
op_def_tf = self.op_def.obj
op_inputs = tuple(
apply_arguments.get(i.name) for i in op_def_tf.input_arg if i.name in apply_arguments
)
# TODO: Include inferred attr values (e.g. dtypes).
if "node_attrs" not in meta._lvar_defaults_enabled:
node_attr = {a.name: apply_arguments.get(a.name, a) for a in op_def_tf.attr}
else:
node_attr = var()
if "names" not in meta._lvar_defaults_enabled:
op_name = apply_arguments.get("name", op_def_tf.name) or op_def_tf.name
else:
op_name = var()
node_def = TFlowMetaNodeDef(op_def_tf.name, op_name, node_attr)
return op_inputs, node_def
def __call__(self, *inputs, **kwargs):
if self.node_def is not None:
op = TFlowMetaOp(self.op_def, self.node_def, inputs)
res = op.default_output
if isvar(res):
return op.outputs
else:
return res
else:
return self.__api_call__(*inputs, **kwargs)
class TFlowMetaAccessor(object):
"""An accessor object that simplifies the use of meta objects.
Instances of this class can be used to implicitly convert TensorFlow
functions and objects into meta objects.
"""
namespaces = [tf, tf.raw_ops, tfp, tfd]
def __init__(self, namespace=None):
if namespace is None:
from symbolic_pymc.tensorflow import meta # pylint: disable=import-self
self.namespaces += [meta]
else:
self.namespaces = [namespace]
def __call__(self, x):
return metatize(x)
def __getattr__(self, obj):
ns_obj = next((getattr(ns, obj) for ns in self.namespaces if hasattr(ns, obj)), None)
if ns_obj is None:
# Try caller's namespace
frame = inspect.currentframe()
f_back = frame.f_back
if f_back:
ns_obj = f_back.f_locals.get(obj, None)
if ns_obj is None:
ns_obj = f_back.f_globals.get(obj)
if isinstance(ns_obj, types.ModuleType):
# It's a sub-module, so let's create another
# `TheanoMetaAccessor` and check within there.
meta_obj = TFlowMetaAccessor(namespace=ns_obj)
else:
# Check for a an OpDef first
meta_obj = TFlowMetaOperator.get_metaopdef(obj)
if meta_obj is not None:
# We assume that the user requested an `Operation`
# constructor/helper. Return the meta `OpDef`, because
# it implements a constructor/helper-like `__call__`.
if meta_obj is not None:
meta_obj = TFlowMetaOperator(meta_obj, None)
# elif isinstance(ns_obj, (types.FunctionType, partial)):
# # It's a function, so let's provide a wrapper that converts
# # to-and-from theano and meta objects.
# @wraps(ns_obj)
# def meta_obj(*args, **kwargs):
# args = [o.reify() if hasattr(o, "reify") else o for o in args]
# res = ns_obj(*args, **kwargs)
# return metatize(res)
else:
# Hopefully, it's convertible to a meta object...
meta_obj = metatize(ns_obj)
# Finally, we store the result as a meta namespace attribute, or raise
# an exception.
if isinstance(
meta_obj, (MetaSymbol, MetaSymbolType, TFlowMetaOperator, types.FunctionType)
):
setattr(self, obj, meta_obj)
return getattr(self, obj)
elif isinstance(meta_obj, TFlowMetaAccessor):
setattr(self, obj, meta_obj)
return meta_obj
else:
raise AttributeError(f"Meta object for {obj} not found.")
mt = TFlowMetaAccessor()
load_dispatcher()
```
#### File: symbolic_pymc/theano/dispatch.py
```python
import theano.tensor as tt
from collections.abc import Mapping
from kanren.term import term, operator, arguments
from unification.core import _reify, _unify, reify
from cons.core import _car, _cdr
from etuples import etuplize
from etuples.core import ExpressionTuple
from .meta import TheanoMetaSymbol
from ..meta import metatize
from ..dispatch import unify_MetaSymbol
tt_class_abstractions = tuple(c.base for c in TheanoMetaSymbol.base_subclasses())
_unify.add(
(TheanoMetaSymbol, tt_class_abstractions, Mapping),
lambda u, v, s: unify_MetaSymbol(u, metatize(v), s),
)
_unify.add(
(tt_class_abstractions, TheanoMetaSymbol, Mapping),
lambda u, v, s: unify_MetaSymbol(metatize(u), v, s),
)
_unify.add(
(tt_class_abstractions, tt_class_abstractions, Mapping),
lambda u, v, s: unify_MetaSymbol(metatize(u), metatize(v), s),
)
def _reify_TheanoClasses(o, s):
meta_obj = metatize(o)
return reify(meta_obj, s)
_reify.add((tt_class_abstractions, Mapping), _reify_TheanoClasses)
operator.add((tt.Variable,), lambda x: operator(metatize(x)))
_car.add((tt.Variable,), lambda x: operator(metatize(x)))
arguments.add((tt.Variable,), lambda x: arguments(metatize(x)))
_cdr.add((tt.Variable,), lambda x: arguments(metatize(x)))
term.add((tt.Op, ExpressionTuple), lambda op, args: term(metatize(op), args))
etuplize.add(tt_class_abstractions, lambda x, shallow=False: etuplize(metatize(x), shallow))
__all__ = []
```
#### File: symbolic_pymc/theano/opt.py
```python
import types
import theano
import theano.tensor as tt
from functools import wraps
from theano.gof.opt import LocalOptimizer
from unification import var, variables
from kanren import run
from etuples.core import ExpressionTuple
from .meta import MetaSymbol
def eval_and_reify_meta(x):
"""Get Theano objects from combinations of `etuple`s and meta objects."""
res = x
# Create base objects from the resulting meta object
if isinstance(res, ExpressionTuple):
res = res.eval_obj
if isinstance(res, MetaSymbol):
res = res.reify()
if MetaSymbol.is_meta(res):
raise ValueError("Kanren results not fully reifiable: {}".format(res))
return res
class FunctionGraph(theano.gof.fg.FunctionGraph):
"""A version of `FunctionGraph` that knows not to merge non-deterministic `Op`s.
TODO: Add a check to `MergeFeature.process_node` and submit
a PR to Theano.
"""
def __init__(
self,
inputs,
outputs,
features=None,
clone=True,
memo=None,
update_mapping=None,
copy_inputs=True,
copy_orphans=None,
):
if clone:
if copy_orphans is None:
copy_orphans = copy_inputs
self.memo = theano.gof.graph.clone_get_equiv(
inputs, outputs, copy_inputs, copy_orphans, memo
)
inputs = [self.memo[i] for i in inputs]
outputs = [self.memo[o] for o in outputs]
super().__init__(inputs, outputs, features=features, clone=False, update_mapping=None)
def attach_feature(self, feature):
if isinstance(feature, theano.gof.opt.MergeFeature):
_process_node = feature.process_node
@wraps(feature.process_node)
def _f(self, fgraph, node):
if getattr(node.op, "nondeterministic", False):
return
return _process_node(fgraph, node)
feature.process_node = types.MethodType(_f, feature)
return super().attach_feature(feature)
def replace(self, r, new_r, reason=None, verbose=None, remove_dup_inputs=True):
"""See `theano.gof.fg.FunctionGraph.replace`.
The original `FunctionGraph.replace` will not replace the actual
input list. This one will.
"""
super().replace(r, new_r, reason=reason, verbose=verbose)
if r in self.inputs:
# TODO: Is there a reason to do this in-place instead?
# Is anyone supposed to hold a reference to the original inputs
# list?
# Remove duplicate inputs, if any.
if remove_dup_inputs and new_r in self.inputs:
self.inputs.remove(new_r)
assert r not in self.variables
new_inputs = [new_r if i == r else i for i in self.inputs]
self.inputs = new_inputs
# TODO: Inputs-changed callback?
assert r not in self.inputs
def clone_get_equiv(self, *args, **kwargs):
fg, var_map = super().clone_get_equiv(*args, **kwargs)
fg.__class__ = self.__class__
return fg, var_map
class KanrenRelationSub(LocalOptimizer):
"""A local optimizer that uses miniKanren goals to match and replace terms in a Theano `FunctionGraph`.
TODO: Only uses *one* miniKanren `run` result (chosen by a configurable
filter function). We might want an option to produce multiple graphs, but
I imagine that would involve an entirely different optimizer type.
"""
reentrant = True
def __init__(
self,
kanren_relation,
relation_lvars=None,
results_filter=lambda x: next(x, None),
node_filter=lambda x: False,
):
"""Create a `KanrenRelationSub`.
Parameters
----------
kanren_relation: kanren.Relation or goal
The miniKanren relation store or goal to use. Custom goals should
take an input and output argument, respectively.
relation_lvars: Iterable
A collection of terms to be considered logic variables by miniKanren
(i.e. Theano terms used as "unknowns" in `kanren_relation`).
results_filter: function
A function that returns a single result from a stream of
miniKanren results. The default function returns the first result.
node_filter: function
A function taking a single node as an argument that returns `True`
when the node should be skipped.
"""
self.kanren_relation = kanren_relation
self.relation_lvars = relation_lvars or []
self.results_filter = results_filter
self.node_filter = node_filter
super().__init__()
def adjust_outputs(self, node, new_node, old_node=None):
"""Make adjustments for multiple outputs.
This handles (some) nodes with multiple outputs by returning a list
with the appropriate length and containing the new node (at the correct
index if `default_output` is available and correct, or 0--and it
happens to be the correct one).
TODO: We should be able to get the correct index from the something
like `node.outputs.index(old_node)`, but we don't exactly have
`old_node` unless the miniKanren results give it to us.
"""
res = list(node.outputs)
try:
new_node_idx = res.index(old_node)
except ValueError:
# Guesstimate it
new_node_idx = getattr(node.op, "default_output", 0) or 0
res[new_node_idx] = new_node
return res
def transform(self, node):
if not isinstance(node, tt.Apply):
return False
if self.node_filter(node):
return False
try:
input_expr = node.default_output()
except AttributeError:
input_expr = node.outputs
with variables(*self.relation_lvars):
q = var()
kanren_results = run(None, q, self.kanren_relation(input_expr, q))
chosen_res = self.results_filter(kanren_results)
if chosen_res:
if isinstance(chosen_res, ExpressionTuple):
chosen_res = eval_and_reify_meta(chosen_res)
if isinstance(chosen_res, dict):
chosen_res = list(chosen_res.items())
if isinstance(chosen_res, list):
# We got a dictionary of replacements
new_node = {eval_and_reify_meta(k): eval_and_reify_meta(v) for k, v in chosen_res}
assert all(k in node.fgraph.variables for k in new_node.keys())
elif isinstance(chosen_res, tt.Variable):
# Attempt to automatically format the output for multi-output
# `Apply` nodes.
new_node = self.adjust_outputs(node, eval_and_reify_meta(chosen_res))
else:
raise ValueError(
"Unsupported FunctionGraph replacement variable type: {chosen_res}"
)
return new_node
else:
return False
```
#### File: tests/tensorflow/conftest.py
```python
import pytest
@pytest.fixture(autouse=True)
def setup_module():
import symbolic_pymc.meta
from symbolic_pymc.meta import base_metatize
import symbolic_pymc.tensorflow.meta as tm
_metatize = tm.load_dispatcher()
symbolic_pymc.meta._metatize = _metatize
# Let's make sure we have a clean graph slate
from tensorflow.compat.v1 import reset_default_graph
reset_default_graph()
yield
symbolic_pymc.meta._metatize = base_metatize
```
#### File: tests/tensorflow/__init__.py
```python
from functools import wraps
from tensorflow.python.eager.context import graph_mode
def run_in_graph_mode(f):
@wraps(f)
def _f(*args, **kwargs):
with graph_mode():
return f()
return _f
```
#### File: tests/tensorflow/test_graph.py
```python
import numpy as np
import tensorflow as tf
from symbolic_pymc.tensorflow.graph import normalize_tf_graph
from tests.tensorflow import run_in_graph_mode
@run_in_graph_mode
def test_normalize():
tf.config.optimizer.set_experimental_options(
{
"shape_optimizations": True,
"arithmetic_optimzation": True,
"function_optimization": True,
"min_graph_nodes": 0,
}
)
with tf.Graph().as_default() as norm_graph:
a_tf = tf.compat.v1.placeholder("float")
const_log_tf = 0.5 * np.log(2.0 * np.pi) + tf.math.log(a_tf)
normal_const_log_tf = normalize_tf_graph(const_log_tf)
# Grappler appears to put log ops before const
assert normal_const_log_tf.op.inputs[0].op.type == "Log"
assert normal_const_log_tf.op.inputs[1].op.type == "Const"
```
#### File: symbolic-pymc/tests/test_utils.py
```python
import numpy as np
from unification import var
from symbolic_pymc.meta import MetaSymbol, MetaOp
from symbolic_pymc.utils import meta_diff, eq_lvar, HashableNDArray
class SomeOp(object):
def __repr__(self):
return "<SomeOp>"
class SomeType(object):
def __init__(self, field1, field2):
self.field1 = field1
self.field2 = field2
def __repr__(self):
return f"SomeType({self.field1}, {self.field2})"
def __str__(self):
return f"SomeType<{self.field1}, {self.field2}>"
class SomeMetaSymbol(MetaSymbol):
__slots__ = ("field1", "field2", "_blah")
base = SomeType
def __init__(self, obj=None):
super().__init__(obj)
self.field1 = 1
self.field2 = 2
self._blah = "a"
class SomeMetaOp(MetaOp):
__slots__ = ()
base = SomeOp
def output_meta_types(self):
return [SomeMetaSymbol]
def __call__(self, *args, **kwargs):
return SomeMetaSymbol(*args, **kwargs)
class SomeOtherMetaSymbol(MetaSymbol):
__slots__ = ("field1", "field2")
base = SomeType
def __init__(self, field1, field2, obj=None):
super().__init__(obj)
self.field1 = field1
self.field2 = field2
class SomeOtherOp(object):
def __repr__(self):
return "<SomeOp>"
class SomeOtherMetaOp(SomeMetaOp):
base = SomeOtherOp
def test_parts_unequal():
s0 = SomeMetaSymbol()
s1 = SomeOtherMetaSymbol(1, 2)
res = meta_diff(s0, s1)
assert res.reason == "types"
assert res.path(s0) is s0
assert res.objects == (s0, s1)
res = meta_diff(s0, s1, cmp_types=False)
assert res is None
s2 = SomeOtherMetaSymbol(1, 3)
res = meta_diff(s0, s2, cmp_types=False)
assert res.path(s2) == 3
assert res.path(s1) == 2
assert res.reason == "ne_fn"
assert res.objects == (2, 3)
res = meta_diff(SomeMetaOp(), SomeMetaOp())
assert res is None
op1 = SomeMetaOp()
op2 = SomeOtherMetaOp()
res = meta_diff(op1, op2, cmp_types=False)
assert res.path(op1) is op1
assert res.reason == "bases"
assert res.objects == (op1.base, op2.base)
a = SomeOtherMetaSymbol(1, [2, SomeOtherMetaSymbol(3, 4)])
b = SomeOtherMetaSymbol(1, [2, SomeOtherMetaSymbol(3, 5)])
res = meta_diff(a, b)
assert res.path(a) == 4
assert res.path(b) == 5
assert res.reason == "ne_fn"
assert res.objects == (4, 5)
a = SomeOtherMetaSymbol(1, [2, SomeOtherMetaSymbol(3, 4)])
b = SomeOtherMetaSymbol(1, [2, SomeOtherMetaSymbol(3, 4)])
res = meta_diff(a, b)
assert res is None
a = SomeOtherMetaSymbol(1, [2, SomeOtherMetaSymbol(3, 4), 5])
b = SomeOtherMetaSymbol(1, [2, SomeOtherMetaSymbol(3, 4)])
res = meta_diff(a, b)
assert res is not None
assert res.reason == "seq len"
a = SomeOtherMetaSymbol(1, ["a", "b"])
b = SomeOtherMetaSymbol(1, 2)
res = meta_diff(a, b, cmp_types=False)
assert res is not None
assert res.reason == "ne_fn"
a = SomeOtherMetaSymbol(1, ["a", "b"])
b = SomeOtherMetaSymbol(1, "ab")
res = meta_diff(a, b, cmp_types=False)
assert res is not None
a = SomeOtherMetaSymbol(1, {"a": 1, "b": 2})
b = SomeOtherMetaSymbol(1, {"b": 2, "a": 1})
res = meta_diff(a, b)
assert res is None
a = SomeOtherMetaSymbol(1, {"a": 1, "b": 2})
b = SomeOtherMetaSymbol(1, {"b": 3, "a": 1})
res = meta_diff(a, b)
assert res.reason == "ne_fn"
assert res.objects == (2, 3)
assert res.path(a) == 2
assert res.path(b) == 3
a = SomeOtherMetaSymbol(1, {"a": 1, "b": 2})
b = SomeOtherMetaSymbol(1, {"a": 1, "c": 2})
res = meta_diff(a, b)
assert res.reason == "map keys"
assert res.path(a) == {"a": 1, "b": 2}
assert res.objects == ([("a", 1), ("b", 2)], [("a", 1), ("c", 2)])
def test_eq_lvar():
a = SomeOtherMetaSymbol(1, [2, SomeOtherMetaSymbol(3, 4)])
b = SomeOtherMetaSymbol(1, [2, SomeOtherMetaSymbol(3, 4)])
assert eq_lvar(a, b) is True
a = SomeOtherMetaSymbol(1, [2, SomeOtherMetaSymbol(3, 4)])
b = SomeOtherMetaSymbol(1, [2, var()])
assert eq_lvar(a, b) is False
a = SomeOtherMetaSymbol(1, [2, var()])
b = SomeOtherMetaSymbol(1, [2, var()])
assert eq_lvar(a, b) is True
a = SomeOtherMetaSymbol(1, [2, {"a": var()}])
b = SomeOtherMetaSymbol(1, [2, {"a": var()}])
assert eq_lvar(a, b) is True
a = SomeOtherMetaSymbol(1, [3, var()])
b = SomeOtherMetaSymbol(1, [2, var()])
assert eq_lvar(a, b) is False
def test_HashableNDArray():
a = np.r_[[1, 2], 3]
a_h = a.view(HashableNDArray)
b = np.r_[[1, 2], 3]
b_h = b.view(HashableNDArray)
assert hash(a_h) == hash(b_h)
assert a_h == b_h
assert not a_h != b_h
c = np.r_[[1, 2], 4]
c_h = c.view(HashableNDArray)
assert hash(a_h) != hash(c_h)
assert a_h != c_h
```
#### File: tests/theano/conftest.py
```python
import pytest
@pytest.fixture(autouse=True)
def setup_module():
import symbolic_pymc.meta
from symbolic_pymc.meta import base_metatize
import symbolic_pymc.theano.meta as tm
_metatize = tm.load_dispatcher()
symbolic_pymc.meta._metatize = _metatize
yield
symbolic_pymc.meta._metatize = base_metatize
```
#### File: tests/theano/test_relations.py
```python
import pytest
import numpy as np
import theano
import theano.tensor as tt
from functools import partial
from unification import var
from etuples import etuple, etuplize
from kanren import run, eq
from kanren.core import lall
from kanren.graph import reduceo, walko, applyo
from symbolic_pymc.theano.meta import mt
from symbolic_pymc.theano.opt import eval_and_reify_meta
from symbolic_pymc.theano.random_variables import observed, NormalRV, HalfCauchyRV, MvNormalRV
from symbolic_pymc.relations.theano import non_obs_walko
from symbolic_pymc.relations.theano.conjugates import conjugate
from symbolic_pymc.relations.theano.distributions import scale_loc_transform, constant_neq
from symbolic_pymc.relations.theano.linalg import normal_normal_regression, normal_qr_transform
def test_constant_neq():
q_lv = var()
res = run(0, q_lv, eq(q_lv, mt(1)), constant_neq(q_lv, np.array(1.0)))
assert not res
# TODO: If `constant_neq` was a true constraint, this would work.
# res = run(0, q_lv, constant_neq(q_lv, np.array(1.0)), eq(q_lv, mt(1)))
# assert not res
# TODO: If `constant_neq` was a true constraint, this would work.
# res = run(0, q_lv, constant_neq(q_lv, np.array(1.0)), eq(q_lv, mt(2)))
# assert res == (mt(2),)
res = run(0, q_lv, eq(q_lv, mt(2)), constant_neq(q_lv, np.array(1.0)))
assert res == (mt(2),)
def test_scale_loc_transform():
tt.config.compute_test_value = "ignore"
rand_state = theano.shared(np.random.RandomState())
mu_a = NormalRV(0.0, 100 ** 2, name="mu_a", rng=rand_state)
sigma_a = HalfCauchyRV(5, name="sigma_a", rng=rand_state)
mu_b = NormalRV(0.0, 100 ** 2, name="mu_b", rng=rand_state)
sigma_b = HalfCauchyRV(5, name="sigma_b", rng=rand_state)
county_idx = np.r_[1, 1, 2, 3]
# We want the following for a, b:
# N(m, S) -> m + N(0, 1) * S
a = NormalRV(mu_a, sigma_a, size=(len(county_idx),), name="a", rng=rand_state)
b = NormalRV(mu_b, sigma_b, size=(len(county_idx),), name="b", rng=rand_state)
radon_est = a[county_idx] + b[county_idx] * 7
eps = HalfCauchyRV(5, name="eps", rng=rand_state)
radon_like = NormalRV(radon_est, eps, name="radon_like", rng=rand_state)
radon_like_rv = observed(tt.as_tensor_variable(np.r_[1.0, 2.0, 3.0, 4.0]), radon_like)
q_lv = var()
(expr_graph,) = run(
1, q_lv, non_obs_walko(partial(reduceo, scale_loc_transform), radon_like_rv, q_lv)
)
radon_like_rv_opt = expr_graph.reify()
assert radon_like_rv_opt.owner.op == observed
radon_like_opt = radon_like_rv_opt.owner.inputs[1]
radon_est_opt = radon_like_opt.owner.inputs[0]
# These should now be `tt.add(mu_*, ...)` outputs.
a_opt = radon_est_opt.owner.inputs[0].owner.inputs[0]
b_opt = radon_est_opt.owner.inputs[1].owner.inputs[0].owner.inputs[0]
# Make sure NormalRV gets replaced with an addition
assert a_opt.owner.op == tt.add
assert b_opt.owner.op == tt.add
# Make sure the first term in the addition is the old NormalRV mean
mu_a_opt = a_opt.owner.inputs[0].owner.inputs[0]
assert "mu_a" == mu_a_opt.name == mu_a.name
mu_b_opt = b_opt.owner.inputs[0].owner.inputs[0]
assert "mu_b" == mu_b_opt.name == mu_b.name
# Make sure the second term in the addition is the standard NormalRV times
# the old std. dev.
assert a_opt.owner.inputs[1].owner.op == tt.mul
assert b_opt.owner.inputs[1].owner.op == tt.mul
sigma_a_opt = a_opt.owner.inputs[1].owner.inputs[0].owner.inputs[0]
assert sigma_a_opt.owner.op == sigma_a.owner.op
sigma_b_opt = b_opt.owner.inputs[1].owner.inputs[0].owner.inputs[0]
assert sigma_b_opt.owner.op == sigma_b.owner.op
a_std_norm_opt = a_opt.owner.inputs[1].owner.inputs[1]
assert a_std_norm_opt.owner.op == NormalRV
assert a_std_norm_opt.owner.inputs[0].data == 0.0
assert a_std_norm_opt.owner.inputs[1].data == 1.0
b_std_norm_opt = b_opt.owner.inputs[1].owner.inputs[1]
assert b_std_norm_opt.owner.op == NormalRV
assert b_std_norm_opt.owner.inputs[0].data == 0.0
assert b_std_norm_opt.owner.inputs[1].data == 1.0
def test_mvnormal_conjugate():
"""Test that we can produce the closed-form distribution for the conjugate
multivariate normal-regression with normal-prior model.
"""
# import symbolic_pymc.theano.meta as tm
#
# tm.load_dispatcher()
tt.config.cxx = ""
tt.config.compute_test_value = "ignore"
a_tt = tt.vector("a")
R_tt = tt.matrix("R")
F_t_tt = tt.matrix("F")
V_tt = tt.matrix("V")
a_tt.tag.test_value = np.r_[1.0, 0.0]
R_tt.tag.test_value = np.diag([10.0, 10.0])
F_t_tt.tag.test_value = np.c_[-2.0, 1.0]
V_tt.tag.test_value = np.diag([0.5])
beta_rv = MvNormalRV(a_tt, R_tt, name="\\beta")
E_y_rv = F_t_tt.dot(beta_rv)
Y_rv = MvNormalRV(E_y_rv, V_tt, name="Y")
y_tt = tt.as_tensor_variable(np.r_[-3.0])
y_tt.name = "y"
Y_obs = observed(y_tt, Y_rv)
q_lv = var()
(expr_graph,) = run(1, q_lv, walko(conjugate, Y_obs, q_lv))
fgraph_opt = expr_graph.eval_obj
fgraph_opt_tt = fgraph_opt.reify()
# Check that the SSE has decreased from prior to posterior.
# TODO: Use a better test.
beta_prior_mean_val = a_tt.tag.test_value
F_val = F_t_tt.tag.test_value
beta_post_mean_val = fgraph_opt_tt.owner.inputs[0].tag.test_value
priorp_err = np.square(y_tt.data - F_val.dot(beta_prior_mean_val)).sum()
postp_err = np.square(y_tt.data - F_val.dot(beta_post_mean_val)).sum()
# First, make sure the prior and posterior means are simply not equal.
with pytest.raises(AssertionError):
np.testing.assert_array_equal(priorp_err, postp_err)
# Now, make sure there's a decrease (relative to the observed point).
np.testing.assert_array_less(postp_err, priorp_err)
@pytest.mark.xfail(strict=True)
def test_normal_normal_regression():
tt.config.compute_test_value = "ignore"
theano.config.cxx = ""
np.random.seed(9283)
N = 10
M = 3
a_tt = tt.vector("a")
R_tt = tt.vector("R")
X_tt = tt.matrix("X")
V_tt = tt.vector("V")
a_tt.tag.test_value = np.random.normal(size=M)
R_tt.tag.test_value = np.abs(np.random.normal(size=M))
X = np.random.normal(10, 1, size=N)
X = np.c_[np.ones(10), X, X * X]
X_tt.tag.test_value = X
V_tt.tag.test_value = np.ones(N)
beta_rv = NormalRV(a_tt, R_tt, name="\\beta")
E_y_rv = X_tt.dot(beta_rv)
E_y_rv.name = "E_y"
Y_rv = NormalRV(E_y_rv, V_tt, name="Y")
y_tt = tt.as_tensor_variable(Y_rv.tag.test_value)
y_tt.name = "y"
y_obs_rv = observed(y_tt, Y_rv)
y_obs_rv.name = "y_obs"
#
# Use the relation with identify/match `Y`, `X` and `beta`.
#
y_args_tail_lv, b_args_tail_lv = var(), var()
beta_lv = var()
y_args_lv, y_lv, Y_lv, X_lv = var(), var(), var(), var()
(res,) = run(
1,
(beta_lv, y_args_tail_lv, b_args_tail_lv),
applyo(mt.observed, y_args_lv, y_obs_rv),
eq(y_args_lv, (y_lv, Y_lv)),
normal_normal_regression(Y_lv, X_lv, beta_lv, y_args_tail_lv, b_args_tail_lv),
)
# TODO FIXME: This would work if non-op parameters (e.g. names) were covered by
# `operator`/`car`. See `TheanoMetaOperator`.
assert res[0].eval_obj.obj == beta_rv
assert res[0] == etuplize(beta_rv)
assert res[1] == etuplize(Y_rv)[2:]
assert res[2] == etuplize(beta_rv)[1:]
#
# Use the relation with to produce `Y` from given `X` and `beta`.
#
X_new_mt = mt(tt.eye(N, M))
beta_new_mt = mt(NormalRV(0, 1, size=M))
Y_args_cdr_mt = etuplize(Y_rv)[2:]
Y_lv = var()
(res,) = run(1, Y_lv, normal_normal_regression(Y_lv, X_new_mt, beta_new_mt, Y_args_cdr_mt))
Y_out_mt = res.eval_obj
Y_new_mt = etuple(mt.NormalRV, mt.dot(X_new_mt, beta_new_mt)) + Y_args_cdr_mt
Y_new_mt = Y_new_mt.eval_obj
assert Y_out_mt == Y_new_mt
@pytest.mark.xfail(strict=True)
def test_normal_qr_transform():
np.random.seed(9283)
N = 10
M = 3
X_tt = tt.matrix("X")
X = np.random.normal(10, 1, size=N)
X = np.c_[np.ones(10), X, X * X]
X_tt.tag.test_value = X
V_tt = tt.vector("V")
V_tt.tag.test_value = np.ones(N)
a_tt = tt.vector("a")
R_tt = tt.vector("R")
a_tt.tag.test_value = np.random.normal(size=M)
R_tt.tag.test_value = np.abs(np.random.normal(size=M))
beta_rv = NormalRV(a_tt, R_tt, name="\\beta")
E_y_rv = X_tt.dot(beta_rv)
E_y_rv.name = "E_y"
Y_rv = NormalRV(E_y_rv, V_tt, name="Y")
y_tt = tt.as_tensor_variable(Y_rv.tag.test_value)
y_tt.name = "y"
y_obs_rv = observed(y_tt, Y_rv)
y_obs_rv.name = "y_obs"
(res,) = run(1, var("q"), normal_qr_transform(y_obs_rv, var("q")))
new_node = {eval_and_reify_meta(k): eval_and_reify_meta(v) for k, v in res}
# Make sure the old-to-new `beta` conversion is correct.
t_Q, t_R = np.linalg.qr(X)
Coef_new_value = np.linalg.inv(t_R)
np.testing.assert_array_almost_equal(
Coef_new_value, new_node[beta_rv].owner.inputs[0].tag.test_value
)
# Make sure the new `beta_tilde` has the right standard normal distribution
# parameters.
beta_tilde_node = new_node[beta_rv].owner.inputs[1]
np.testing.assert_array_almost_equal(
np.r_[0.0, 0.0, 0.0], beta_tilde_node.owner.inputs[0].tag.test_value
)
np.testing.assert_array_almost_equal(
np.r_[1.0, 1.0, 1.0], beta_tilde_node.owner.inputs[1].tag.test_value
)
Y_new = new_node[y_obs_rv].owner.inputs[1]
assert Y_new.owner.inputs[0].owner.inputs[1] == beta_tilde_node
np.testing.assert_array_almost_equal(t_Q, Y_new.owner.inputs[0].owner.inputs[0].tag.test_value)
def test_basic_scan_transform():
def f_pow2(x_tm1):
return 2 * x_tm1
state = theano.tensor.scalar("state")
n_steps = theano.tensor.iscalar("nsteps")
output, updates = theano.scan(
f_pow2, [], state, [], n_steps=n_steps, truncate_gradient=-1, go_backwards=False
)
assert np.array_equal(output.eval({state: 1.0, n_steps: 4}), np.r_[2.0, 4.0, 8.0, 16.0])
def mul_trans(in_expr, out_expr):
"""Equate `2 * x` with `5 * x` in a Theano `scan`.
I.e. from left-to-right, replace `2 * x[t-1]` with `5 * x[t-1]`.
"""
arg_lv = var()
inputs_lv, info_lv = var(), var()
in_scan_lv = mt.Scan(inputs_lv, [mt.mul(2, arg_lv)], info_lv)
out_scan_lv = mt.Scan(inputs_lv, [mt.mul(5, arg_lv)], info_lv)
return lall(eq(in_expr, in_scan_lv), eq(out_expr, out_scan_lv))
q_lv = var()
(output_mt,) = run(1, q_lv, walko(partial(reduceo, mul_trans), output, q_lv))
output_new = output_mt.eval_obj.reify()
assert output_new != output
assert np.array_equal(output_new.eval({state: 1.0, n_steps: 4}), np.r_[5.0, 25.0, 125.0, 625.0])
```
#### File: tests/theano/test_rv.py
```python
import numpy as np
import theano.tensor as tt
from pytest import importorskip
from symbolic_pymc.theano.random_variables import NormalRV, MvNormalRV, PolyaGammaRV
def rv_numpy_tester(rv, *params, size=None):
"""Test for correspondence between `RandomVariable` and NumPy shape and
broadcast dimensions.
"""
tt.config.compute_test_value = "ignore"
test_rv = rv(*params, size=size)
param_vals = [tt.gof.op.get_test_value(p) for p in params]
size_val = None if size is None else tt.gof.op.get_test_value(size)
test_val = getattr(np.random, rv.name)(*param_vals, size=size_val)
test_shp = np.shape(test_val)
# This might be a little too harsh, since purely symbolic `tensor.vector`
# inputs have no broadcastable information, yet, they can take
# broadcastable values.
# E.g.
# x_tt = tt.vector('x')
# # non-symbolic value is broadcastable!
# x_tt.tag.test_value = np.array([5])
# # non-symbolic value is not broadcastable.
# x_tt.tag.test_value = np.array([5, 4])
#
# In the end, there's really no clear way to determine this without full
# evaluation of a symbolic node, and that mostly defeats the purpose.
# Unfortunately, this is what PyMC3 resorts to when constructing its
# `TensorType`s (and shapes).
test_bcast = [s == 1 for s in test_shp]
np.testing.assert_array_equal(test_rv.type.broadcastable, test_bcast)
eval_args = {
p: v
for p, v in zip(params, param_vals)
if isinstance(p, tt.Variable) and not isinstance(p, tt.Constant)
}
np.testing.assert_array_equal(test_rv.shape.eval(eval_args), test_shp)
np.testing.assert_array_equal(np.shape(test_rv.eval(eval_args)), test_shp)
def test_normalrv():
rv_numpy_tester(NormalRV, 0.0, 1.0)
rv_numpy_tester(NormalRV, 0.0, 1.0, size=[3])
# Broadcast sd over independent means...
rv_numpy_tester(NormalRV, [0.0, 1.0, 2.0], 1.0)
rv_numpy_tester(NormalRV, [0.0, 1.0, 2.0], 1.0, size=[3, 3])
rv_numpy_tester(NormalRV, [0], [1], size=[1])
rv_numpy_tester(NormalRV, tt.as_tensor_variable([0]), [1], size=[1])
rv_numpy_tester(NormalRV, tt.as_tensor_variable([0]), [1], size=tt.as_tensor_variable([1]))
def test_mvnormalrv():
rv_numpy_tester(MvNormalRV, [0], np.diag([1]))
rv_numpy_tester(MvNormalRV, [0], np.diag([1]), size=[1])
rv_numpy_tester(MvNormalRV, [0], np.diag([1]), size=[4])
rv_numpy_tester(MvNormalRV, [0], np.diag([1]), size=[4, 1])
rv_numpy_tester(MvNormalRV, [0], np.diag([1]), size=[4, 1, 1])
rv_numpy_tester(MvNormalRV, [0], np.diag([1]), size=[1, 5, 8])
rv_numpy_tester(MvNormalRV, [0, 1, 2], np.diag([1, 1, 1]))
# Broadcast cov matrix across independent means?
# Looks like NumPy doesn't support that (and it's probably better off for
# it).
# rv_numpy_tester(MvNormalRV, [[0, 1, 2], [4, 5, 6]], np.diag([1, 1, 1]))
def test_polyagammarv():
_ = importorskip("pypolyagamma")
# Sampled values should be scalars
pg_rv = PolyaGammaRV(1.1, -10.5)
assert pg_rv.eval().shape == ()
pg_rv = PolyaGammaRV(1.1, -10.5, size=[1])
assert pg_rv.eval().shape == (1,)
pg_rv = PolyaGammaRV(1.1, -10.5, size=[2, 3])
bcast_smpl = pg_rv.eval()
assert bcast_smpl.shape == (2, 3)
# Make sure they're not all equal
assert np.all(np.abs(np.diff(bcast_smpl.flat)) > 0.0)
pg_rv = PolyaGammaRV(np.r_[1.1, 3], -10.5)
bcast_smpl = pg_rv.eval()
assert bcast_smpl.shape == (2,)
assert np.all(np.abs(np.diff(bcast_smpl.flat)) > 0.0)
pg_rv = PolyaGammaRV(np.r_[1.1, 3], -10.5, size=(2, 3))
bcast_smpl = pg_rv.eval()
assert bcast_smpl.shape == (2, 2, 3)
assert np.all(np.abs(np.diff(bcast_smpl.flat)) > 0.0)
``` |
{
"source": "josephwinston/AutobahnCpp",
"score": 2
} |
#### File: AutobahnCpp/test/serialize.py
```python
import sys
import binascii
import msgpack
import struct
from autobahn.wamp.serializer import MsgPackSerializer
from autobahn.wamp import message
from autobahn.wamp import role
serializer = MsgPackSerializer()
serializer._serializer.ENABLE_V5 = False
def send(msg):
bytes, _ = serializer.serialize(msg)
l = struct.pack("!I", len(bytes))
sys.stdout.write(l)
sys.stdout.write(bytes)
msgs = []
## HELLO
##
roles = [
role.RolePublisherFeatures(),
role.RoleSubscriberFeatures(),
role.RoleCallerFeatures(),
role.RoleCalleeFeatures()
]
msgs.append(message.Hello("foobar", roles))
## CHALLENGE
##
msgs.append(message.Challenge("cookie"))
## HEARTBEAT
##
msgs.append(message.Heartbeat(3, 7, "throw me away"))
for msg in msgs:
send(msg)
``` |
{
"source": "josephwinston/cudarray",
"score": 2
} |
#### File: cudarray/examples/benchmark_conv.py
```python
import time
import numpy as np
import theano
import theano.tensor as T
from theano.sandbox.cuda.basic_ops import gpu_from_host, host_from_gpu
from pylearn2.sandbox.cuda_convnet.filter_acts import FilterActs
from pylearn2.sandbox.cuda_convnet.weight_acts import WeightActs
from pylearn2.sandbox.cuda_convnet.img_acts import ImageActs
import cudarray as ca
def avg_running_time(fun):
n_iter = 20
start_time = time.time()
for _ in range(n_iter):
fun()
duration = time.time() - start_time
return duration / float(n_iter)
def allclose(a, b):
atol = 1e-3
rtol = 1e-3
return np.allclose(a, b, atol=atol, rtol=rtol)
def benchmark(n_imgs, n_channels, img_shape, n_filters, filter_shape, pad):
print('\nn_imgs: %i, n_channels: %i, img_shape: (%i, %i), '
% ((n_imgs, n_channels) + img_shape)
+ 'n_filters: %i, filter_shape: (%i, %i), pad: %i'
% ((n_filters,) + filter_shape + (pad,)))
# Setup arrays
padding = (pad, pad)
strides = (1, 1)
img_h, img_w = img_shape
filter_h, filter_w = filter_shape
convout_h = img_h + 2*pad - filter_h + 1
convout_w = img_w + 2*pad - filter_w + 1
imgs_bc01_shape = (n_imgs, n_channels, img_h, img_w)
filters_bc01_shape = (n_filters, n_channels, filter_h, filter_w)
imgs_bc01 = np.random.randn(n_imgs, n_channels, img_h, img_w)
imgs_c01b = np.transpose(imgs_bc01, (1, 2, 3, 0))
filters_fc01 = np.random.randn(n_filters, n_channels, filter_h, filter_w)
filters_c01f = np.transpose(filters_fc01, (1, 2, 3, 0))
convout_bc01 = np.random.randn(n_imgs, n_filters, convout_h, convout_w)
convout_c01b = np.transpose(convout_bc01, (1, 2, 3, 0))
imgs_bc01_t = theano.shared(imgs_bc01.astype(theano.config.floatX))
imgs_c01b_t = theano.shared(imgs_c01b.astype(theano.config.floatX))
filters_fc01_t = theano.shared(filters_fc01.astype(theano.config.floatX))
filters_c01f_t = theano.shared(filters_c01f.astype(theano.config.floatX))
convout_bc01_t = theano.shared(convout_bc01.astype(theano.config.floatX))
convout_c01b_t = theano.shared(convout_c01b.astype(theano.config.floatX))
imgs_bc01_ca = ca.array(imgs_bc01)
filters_fc01_ca = ca.array(filters_fc01)
convout_bc01_ca = ca.array(convout_bc01)
# Forward propagation
print('fprop')
convout_cc_op = FilterActs(stride=1, partial_sum=4, pad=pad)
convout_cc_expr = convout_cc_op(imgs_c01b_t, filters_c01f_t)
convout_cc_fun = theano.function([], convout_cc_expr)
convout_cc = convout_cc_fun()
convout_cc = np.transpose(convout_cc, (3, 0, 1, 2))
def convout_ca_fun():
convout = ca.nnet.conv_bc01(imgs_bc01_ca, filters_fc01_ca, padding,
strides)
return convout
convout_ca = np.array(convout_ca_fun())
print(' correct: ' + str(allclose(convout_ca, convout_cc)))
duration_cc = avg_running_time(convout_cc_fun)
duration_ca = avg_running_time(convout_ca_fun)
print(' avg. duration: cuda_convnet: %.4f ca: %.4f'
% (duration_cc, duration_ca))
print(' speedup: %.2f' % (duration_cc/duration_ca))
del convout_cc_op
del convout_cc_expr
del convout_cc_fun
# Back propagation, imgs
print('bprop_imgs')
dimgs_cc_op = ImageActs(stride=1, partial_sum=1, pad=pad)
dimgs_cc_expr = dimgs_cc_op(convout_c01b_t, filters_c01f_t)
dimgs_cc_fun = theano.function([], dimgs_cc_expr)
dimgs_cc = dimgs_cc_fun()
dimgs_cc = np.transpose(dimgs_cc, (3, 0, 1, 2))
def dimgs_ca_fun():
return ca.nnet.conv_bc01_bprop_imgs(filters_fc01_ca, convout_bc01_ca,
img_shape, padding, strides)
dimgs_ca = np.array(dimgs_ca_fun())
print(' correct: ' + str(allclose(dimgs_ca, dimgs_cc)))
duration_cc = avg_running_time(dimgs_cc_fun)
duration_ca = avg_running_time(dimgs_ca_fun)
print(' avg. duration: cuda_convnet: %.4f ca: %.4f'
% (duration_cc, duration_ca))
print(' speedup: %.2f' % (duration_cc/duration_ca))
del dimgs_cc_op
del dimgs_cc_expr
del dimgs_cc_fun
# Back propagation, filters
dfilters_cc_op = WeightActs(stride=1, partial_sum=1, pad=pad)
dfilters_cc_expr = dfilters_cc_op(imgs_c01b_t, convout_c01b_t,
T.as_tensor_variable(filter_shape))
dfilters_cc_fun = theano.function([], dfilters_cc_expr)
dfilters_cc = dfilters_cc_fun()[0]
dfilters_cc = np.transpose(dfilters_cc, (3, 0, 1, 2))
def dfilters_ca_fun():
return ca.nnet.conv_bc01_bprop_filters(imgs_bc01_ca, convout_bc01_ca,
filter_shape, padding, strides)
dfilters_ca = np.array(dfilters_ca_fun())
print('bprop_filters')
print(' correct: ' + str(allclose(dfilters_ca, dfilters_cc)))
duration_cc = avg_running_time(dfilters_cc_fun)
duration_ca = avg_running_time(dfilters_ca_fun)
print(' avg. duration: cuda_convnet: %.4f ca: %.4f'
% (duration_cc, duration_ca))
print(' speedup: %.2f' % (duration_cc/duration_ca))
def run():
np.random.seed(1)
# Configurations are given in the form
# (n_imgs, n_channels, img_shape, n_filters, filter_shape, padding)
configurations = [
# From the original paper
# http://arxiv.org/abs/1312.5851
(128, 3, (32, 32), 96, (11, 11), 0),
(128, 96, (32, 32), 256, (7, 7), 0),
(128, 256, (16, 16), 384, (5, 5), 0),
(128, 384, (16, 16), 384, (5, 5), 0),
(128, 384, (16, 16), 384, (3, 3), 0),
# From <NAME>
# http://benanne.github.io/2014/05/12/fft-convolutions-in-theano.html
(64, 3, (96, 96), 128, (16, 16), 0),
(64, 128, (32, 32), 64, (8, 8), 0),
(128, 32, (54, 54), 64, (6, 6), 0),
(128, 128, (16, 16), 128, (8, 8), 0),
(128, 1024, (32, 32), 128, (4, 4), 0),
# Exotic shapes and padding
(5, 3, (5, 5), 16, (3, 3), 1),
(64, 32, (32, 32), 32, (5, 5), 2),
(64, 1, (17, 19), 32, (7, 7), 4),
(64, 3, (9, 16), 32, (7, 7), 4),
# Typical CNN layers for CIFAR-10
(128, 3, (32, 32), 64, (5, 5), 2),
(128, 64, (16, 16), 64, (5, 5), 2),
(128, 64, (8, 8), 64, (5, 5), 2),
]
for conf in configurations:
benchmark(*conf)
if __name__ == '__main__':
run()
``` |
{
"source": "josephwinston/gitfs",
"score": 3
} |
#### File: utils/decorators/while_not.py
```python
import time
import threading
from functools import wraps
class while_not(object):
def __init__(self, event, wait=0.2):
self.event = event
self.wait = wait
def __call__(self, f):
@wraps(f)
def decorated(obj, *args, **kwargs):
if not self.event:
raise ValueError("Except that %s to not be None %s" %
obj.__class__.__name__)
if not isinstance(self.event, threading._Event):
raise TypeError("%s should be of type threading.Event" %
self.event)
while self.event.is_set():
time.sleep(self.wait)
return f(obj, *args, **kwargs)
return decorated
```
#### File: gitfs/views/commit.py
```python
import os
from errno import ENOENT
from pygit2 import GIT_FILEMODE_TREE, GIT_FILEMODE_BLOB,\
GIT_FILEMODE_BLOB_EXECUTABLE, GIT_FILEMODE_LINK
from fuse import FuseOSError
from gitfs.utils import split_path_into_components
from gitfs.cache import lru_cache
from .read_only import ReadOnlyView
VALID_FILE_MODES = [GIT_FILEMODE_BLOB, GIT_FILEMODE_BLOB_EXECUTABLE,
GIT_FILEMODE_LINK, GIT_FILEMODE_TREE]
class CommitView(ReadOnlyView):
def __init__(self, *args, **kwargs):
super(CommitView, self).__init__(*args, **kwargs)
try:
self.commit = self.repo.revparse_single(self.commit_sha1)
except KeyError:
raise FuseOSError(ENOENT)
def _validate_commit_path(self, tree, path_components):
"""
Checks if a particular path is valid in the context of the commit
which is being browsed.
:param tree: a commit tree or a pygit2 tree
:param path_components: the components of the path to be checked
as a list (e.g.: ['totally', 'random', 'path'])
:type path_components: list
:returns: True if the path is valid, False otherwise
"""
is_valid = False
for entry in tree:
valid_mode = (entry.name == path_components[0] and
entry.filemode in VALID_FILE_MODES)
if valid_mode and len(path_components) == 1:
return True
elif valid_mode and len(path_components) > 1:
is_valid = self._validate_commit_path(self.repo[entry.id],
path_components[1:])
if is_valid:
return is_valid
return is_valid
def read(self, path, size, offset, fh):
data = self.repo.get_blob_data(self.commit.tree, path)
return data[offset:offset + size]
def readlink(self, path):
obj_name = os.path.split(path)[1]
return self.repo.get_blob_data(self.commit.tree, obj_name)
def getattr(self, path, fh=None):
'''
Returns a dictionary with keys identical to the stat C structure of
stat(2).
st_atime, st_mtime and st_ctime should be floats.
NOTE: There is an incombatibility between Linux and Mac OS X
concerning st_nlink of directories. Mac OS X counts all files inside
the directory, while Linux counts only the subdirectories.
'''
if not path:
return
attrs = super(CommitView, self).getattr(path, fh)
attrs.update({
'st_ctime': self.commit.commit_time,
'st_mtime': self.commit.commit_time,
})
stats = self.repo.get_git_object_default_stats(self.commit.tree, path)
if stats is None:
raise FuseOSError(ENOENT)
attrs.update(stats)
return attrs
def access(self, path, mode):
if hasattr(self, "relative_path") and self.relative_path != '/':
path_elems = split_path_into_components(self.relative_path)
is_valid_path = self._validate_commit_path(self.commit.tree,
path_elems)
if not is_valid_path:
raise FuseOSError(ENOENT)
return 0
def readdir(self, path, fh):
dir_tree = self.commit.tree
# If the relative_path is not empty, fetch the git tree corresponding
# to the directory that we are in.
tree_name = os.path.split(path)[1]
if tree_name:
dir_tree = self.repo.get_git_object(self.commit.tree, path)
dir_entries = ['.', '..'] + [entry.name for entry in dir_tree]
for entry in dir_entries:
yield entry
```
#### File: tests/integrations/base.py
```python
from datetime import datetime
import os
import subprocess
class Sh:
def __init__(self, cwd=None):
self.command = ""
self.cwd = cwd
def __getattr__(self, item):
self.command += item + " "
return self
def __call__(self, *args, **kwargs):
command = self.command + " ".join(args)
self.command = ""
return subprocess.Popen(command, shell=True, stdout=subprocess.PIPE,
cwd=self.cwd).stdout.read()
class pull:
def __init__(self, sh):
self.sh = sh
def __enter__(self):
self.sh.git.pull("origin", "master")
def __exit__(self, exc_type, exc_val, exc_tb):
pass
class BaseTest(object):
def setup(self):
self.mount_path = "%s" % os.environ["MOUNT_PATH"]
self.repo_name = os.environ["REPO_NAME"]
self.repo_path = os.environ["REPO_PATH"]
self.current_path = "%s/current" % self.mount_path
self.sh = Sh(os.environ["REMOTE"])
self.last_commit_hash = self.commit_hash()
@property
def today(self):
now = datetime.now()
return now.strftime("%Y-%m-%d")
def commit_hash(self, index=0):
return self.sh.git.log("--pretty=%H").splitlines()[index]
def commit_message(self, index=0):
return self.sh.git.log("--pretty=%B").splitlines()[index]
def get_commits_by_date(self, date=None):
if date is None:
date = self.today
lines = self.sh.git.log("--before", '"%s 23:59:59"' % date,
"--after", '"%s 00:00:00"' % date,
'--pretty="%ai %H"').splitlines()
lines = map(lambda line: line.split(), lines)
return map(lambda tokens: "%s-%s" % (tokens[1].replace(":", "-"),
tokens[3][:10]), lines)
def get_commit_dates(self):
return list(set(self.sh.git.log("--pretty=%ad", "--date=short").
splitlines()))
def assert_commit_message(self, message):
assert message == self.commit_message()
def assert_new_commit(self, steps=1):
current_index = 0
while self.commit_hash(current_index) != self.last_commit_hash:
current_index += 1
self.last_commit_hash = self.commit_hash(0)
assert current_index == steps
def assert_file_content(self, file_path, content):
with open(self.repo_path + "/" + file_path) as f:
assert f.read() == content
```
#### File: tests/workers/test_sync.py
```python
from Queue import Empty
import pygit2
import pytest
from mock import MagicMock, patch
from gitfs.worker.sync import SyncWorker
class TestSyncWorker(object):
def test_work(self):
mocked_queue = MagicMock()
mocked_idle = MagicMock(side_effect=ValueError)
mocked_queue.get.side_effect = Empty()
worker = SyncWorker("name", "email", "name", "email",
strategy="strategy", commit_queue=mocked_queue)
worker.on_idle = mocked_idle
worker.timeout = 1
worker.min_idle_times = 1
with pytest.raises(ValueError):
worker.work()
mocked_queue.get.assert_called_once_with(timeout=1, block=True)
assert mocked_idle.call_count == 1
def test_on_idle_with_commits_and_merges(self):
mocked_sync = MagicMock()
mocked_syncing = MagicMock()
mocked_commit = MagicMock()
mocked_syncing.is_set.return_value = False
with patch.multiple("gitfs.worker.sync", syncing=mocked_syncing,
writers=MagicMock(value=0)):
worker = SyncWorker("name", "email", "name", "email",
strategy="strategy")
worker.commits = "commits"
worker.commit = mocked_commit
worker.sync = mocked_sync
commits = worker.on_idle()
mocked_commit.assert_called_once_with("commits")
assert mocked_syncing.set.call_count == 1
assert mocked_sync.call_count == 1
assert commits is None
def test_merge(self):
mocked_strategy = MagicMock()
mocked_repo = MagicMock()
upstream = "origin"
branch = "master"
worker = SyncWorker("name", "email", "name", "email",
strategy=mocked_strategy,
repository=mocked_repo,
upstream=upstream, branch=branch)
worker.merge()
mocked_strategy.assert_called_once_with(branch, branch, upstream)
assert mocked_repo.commits.update.call_count == 1
def test_sync(self):
upstream = "origin"
branch = "master"
mocked_repo = MagicMock()
mocked_merge = MagicMock()
mocked_sync_done = MagicMock()
mocked_syncing = MagicMock()
mocked_push_successful = MagicMock()
mocked_fetch = MagicMock()
mocked_strategy = MagicMock()
mocked_repo.behind = True
mocked_push_successful.set.side_effect = ValueError
with patch.multiple('gitfs.worker.sync', sync_done=mocked_sync_done,
syncing=mocked_syncing,
push_successful=mocked_push_successful,
fetch=mocked_fetch):
worker = SyncWorker("name", "email", "name", "email",
repository=mocked_repo,
strategy=mocked_strategy,
upstream=upstream, branch=branch)
worker.merge = mocked_merge
worker.sync()
assert mocked_syncing.clear.call_count == 1
assert mocked_push_successful.clear.call_count == 1
assert mocked_sync_done.clear.call_count == 1
assert mocked_sync_done.set.call_count == 1
assert mocked_fetch.set.call_count == 1
assert mocked_push_successful.set.call_count == 1
assert mocked_repo.behind is False
mocked_repo.push.assert_called_once_with(upstream, branch)
def test_commit_with_just_one_job(self):
mocked_repo = MagicMock()
message = 'just a simple message'
jobs = [{'params': {'message': message}}]
author = ("name", "email")
worker = SyncWorker(author[0], author[1], author[0], author[1],
strategy="strategy",
repository=mocked_repo)
worker.commit(jobs)
mocked_repo.commit.assert_called_once_with(message, author, author)
assert mocked_repo.commits.update.call_count == 1
strategy = pygit2.GIT_CHECKOUT_FORCE
mocked_repo.checkout_head.assert_called_once_with(strategy=strategy)
def test_commit_with_more_than_one_job(self):
mocked_repo = MagicMock()
message = 'just a simple message'
jobs = [{'params': {'message': message, 'add': ['path1', 'path2'],
'remove': []}},
{'params': {'message': message, 'remove': ['path2'],
'add': []}}]
author = ("name", "email")
worker = SyncWorker(author[0], author[1], author[0], author[1],
strategy="strategy",
repository=mocked_repo)
worker.commit(jobs)
asserted_message = "Update 2 items"
mocked_repo.commit.assert_called_once_with(asserted_message, author,
author)
assert mocked_repo.commits.update.call_count == 1
strategy = pygit2.GIT_CHECKOUT_FORCE
mocked_repo.checkout_head.assert_called_once_with(strategy=strategy)
def test_switch_to_idle_mode(self):
mocked_queue = MagicMock()
mocked_idle = MagicMock(side_effect=ValueError)
mocked_idle_event = MagicMock()
mocked_queue.get.side_effect = Empty()
with patch.multiple('gitfs.worker.sync', idle=mocked_idle_event):
worker = SyncWorker("name", "email", "name", "email",
strategy="strategy", commit_queue=mocked_queue)
worker.on_idle = mocked_idle
worker.timeout = 1
worker.min_idle_times = -1
with pytest.raises(ValueError):
worker.work()
mocked_queue.get.assert_called_once_with(timeout=1, block=True)
assert mocked_idle_event.set.call_count == 1
assert mocked_idle.call_count == 1
``` |
{
"source": "josephwkim/schedulize",
"score": 4
} |
#### File: josephwkim/schedulize/audit_parser.py
```python
import numpy as np
import re
import pandas as pd
def audit_info(fpath, fullPath=False,string=False):
courses_dict = {'Course Number': [], 'Grade': []}
if fullPath == False and string == False:
text_lines = open("data\\audits\\"+fpath, "r").read().splitlines()
elif string == True:
text_lines = fpath.splitlines()
else:
text_lines = open(fpath, "r").read().splitlines()
past_courses = np.zeros((64, 2), dtype='int')
grades_dict = {"A": 4, "B": 3, "C": 2, "D": 1, "R": 0}
i = 0
for line in text_lines:
# Parse for all the course numbers xx-xxx
course_numbers = re.findall("\d\d\-\d\d\d", line)
if course_numbers != []:
# Course Number
past_courses[i, 0] = int(course_numbers[0][0:2]) * 1000 + int(
course_numbers[0][3:6])
# Course Grade (A=4, B=3, C=2, D=1, R=0, else=-1)
grade = line[52:53]
try:
grade_num = grades_dict[grade]
except:
grade_num = -1
past_courses[i, 1] = grade_num
# Print all past course numbers and their respective grades
#print("Course:", past_courses[i, 0], "Grade:", past_courses[i, 1])
courses_dict['Course Number'].append(past_courses[i, 0])
courses_dict['Grade'].append(past_courses[i, 1])
i = i + 1
audit = pd.DataFrame(courses_dict)
return audit
def getGPA(audit):
grades = audit["Grade"]
total = 0
num = 0
for grade in grades:
if grade > -1:
total += grade
num += 1
if num == 0:
gpa = 3
else:
gpa = total / num
return gpa
```
#### File: josephwkim/schedulize/collaborative_filtering.py
```python
import numpy as np
import pandas as pd
import os
from audit_parser import audit_info
train_data = dict()
train_data["Audit"] = []
train_data["Schedule"] = []
X_audits = dict()
X_schedules = dict()
X_audits_grades = dict()
auditPath = "data/audits/"
schedulePath = "data/schedules/"
def loadAudits(path):
print("Loading Audits...")
def pullAudit(path):
if (os.path.isdir(path) == False):
if path[-3:] == "txt":
print(path)
audit = audit_info(path, fullPath=True)
train_data["Audit"].append(audit)
else:
for fileName in os.listdir(path):
pullAudit(path + "/" + fileName)
pullAudit(path)
print("Done!")
print()
def inputData(auditList, scheList):
individual = 0
def inputAudits(auditList):
nonlocal individual
for i in range(len(auditList)):
df = auditList[i]
courseNumbers = df["Course Number"]
courseGrades = df["Grade"]
X_audits[individual] = []
X_audits_grades[individual] = []
for j in range(len(courseNumbers)):
if courseGrades[j] >= 0:
str_course_num = str(courseNumbers[j])
if len(str_course_num) == 4:
str_course_num = "0" + str_course_num
X_audits[individual].append(str_course_num)
X_audits_grades[individual].append((str_course_num,
str(courseGrades[j])))
individual += 1
inputAudits(auditList)
def buildRecommender():
print("Building Recommender System...")
print()
loadAudits(auditPath)
inputData(train_data["Audit"], train_data["Schedule"])
X_data = []
X_courses = []
X_grades = []
for key, values in X_audits_grades.items():
X_data.append(values)
for course_grade in X_data:
course, grade = zip(*course_grade)
X_courses.append(course)
X_grades.append(grade)
X_courses = np.array(X_courses)
X_grades = np.array(X_grades)
allClasses = []
for student in X_courses:
for courses in student:
allClasses.append(courses)
allClasses = list(set(allClasses))
n, m = X_courses.shape[0], len(allClasses) # Num Users, Num Classes
dfR = pd.DataFrame(0, index=np.arange(len(X_courses)), columns=allClasses)
for row in range(n):
takenClasses = X_courses[row]
for col in dfR:
courseNum = str(col)
if courseNum in takenClasses:
dfR.loc[dfR.index[row], col] = 1
dfY = pd.DataFrame(0, index=np.arange(len(X_courses)), columns=allClasses)
for row in range(n):
takenClasses, earnedGrades = X_courses[row], X_grades[row]
for col in dfY:
courseNum = str(col)
if courseNum in takenClasses:
index = list(takenClasses).index(courseNum)
dfY.loc[dfY.index[row], col] = int(earnedGrades[index])
features = 20
Y = np.array(dfY).T # Matrix with Grades
X = np.random.rand(m, features)
Theta = np.random.rand(n, features)
R = np.array(dfR).T # Binary Matrix denoting Classes Taken
"""
print("n Students:", n)
print("m Classes:", m)
print("Y:", Y.shape)
print("R:", R.shape)
print("X:", X.shape)
print("Theta:", Theta.shape)
print()
"""
"""
# sci-kit learn's non-negative matrix factorization doesn't seem to work very well on this
from sklearn.decomposition import NMF
Y[Y < 0] = 0 # take care of lower bound
model = NMF(n_components=features, init='random', random_state=0)
X = model.fit_transform(Y)
Theta = model.components_.T
"""
# SKIPPED REGULARIZATION - ADD LATER
def costFunction(X, Y, Theta, R):
M = np.power((np.dot(X, Theta.T)) - Y, 2)
J = (1/2) * np.sum(np.multiply(R, M))
return J
def gradientFunction(X, Y, Theta, R):
grad_all = ((np.dot(X, Theta.T)) - Y)
grad_R = np.multiply(R, grad_all)
X_grad = np.zeros(X.shape)
Theta_grad = np.zeros(Theta.shape)
for k in range(X.shape[1]):
X_grad[:, k] = np.dot(grad_R, Theta[:, k])
for l in range(Theta.shape[1]):
Theta_grad[:, l] = np.dot(grad_R.T, X[:, l])
return X_grad, Theta_grad
print("Optimizing via Gradient Descent...")
iterations = 250
learning_rate = 0.01
for i in range(iterations):
cost = costFunction(X, Y, Theta, R)
X_grad, Theta_grad = gradientFunction(X, Y, Theta, R)
X -= learning_rate * X_grad
Theta -= learning_rate * Theta_grad
if (i + 1) == iterations:
print("Iteration", i + 1)
print("Cost:", cost)
print("Done!")
print()
return X, Theta, allClasses
def makePrediction(model, user):
X, Theta = model
p = np.dot(X, Theta.T)
predictions = p[:, user]
return sorted(predictions, reverse=True)
# List Predictions & Compile Departmental Scores
def compileDepartScores(courses, pList):
departDict = dict()
departCounter = dict()
for h in range(len(courses)):
course = courses[h]
depart = str(course)[0:2]
if depart not in departDict:
departDict[depart] = []
departCounter[depart] = []
for i in range(len(pList)):
course, prediction = courses[i], pList[i]
# print("Predicted Rating for", course, "is", prediction)
depart = str(course)[0:2]
departDict[depart].append(prediction)
departCounter[depart].append(1)
for key, values in departDict.items():
departDict[key] = np.sum(values) / len(values)
# return [(k, departDict[k]) for k in sorted(departDict, key=departDict.get, reverse=True)]
return departDict
``` |
{
"source": "josephw-ml/model-analysis",
"score": 2
} |
#### File: fairness/post_export_metrics/fairness_indicators.py
```python
from typing import Any, Dict, List, Optional, Tuple
import tensorflow as tf
from tensorflow_model_analysis import types
from tensorflow_model_analysis.post_export_metrics import metric_keys
from tensorflow_model_analysis.post_export_metrics import post_export_metrics
from tensorflow_model_analysis.proto import metrics_for_slice_pb2 as metrics_pb2
from tensorflow_model_analysis.slicer import slicer_lib as slicer
# pylint: disable=protected-access
@post_export_metrics._export('fairness_indicators')
class _FairnessIndicators(post_export_metrics._ConfusionMatrixBasedMetric):
"""Metrics that can be used to evaluate the following fairness metrics.
* Demographic Parity or Equality of Outcomes.
For each slice measure the Positive* Rate, or the percentage of all
examples receiving positive scores.
* Equality of Opportunity
Equality of Opportunity attempts to match the True Positive* rate
(aka recall) of different data slices.
* Equality of Odds
In addition to looking at Equality of Opportunity, looks at equalizing the
False Positive* rates of slices as well.
The choice to focus on these metrics as a starting point is based primarily on
the paper Equality of Opportunity in Supervised Learning and the excellent
visualization created as a companion to the paper.
https://arxiv.org/abs/1610.02413
http://research.google.com/bigpicture/attacking-discrimination-in-ml/
* Note that these fairness formulations assume that a positive prediction is
associated with a positive outcome for the user--in certain contexts such as
abuse, positive predictions translate to non-opportunity. You may want to use
the provided negative rates for comparison instead.
"""
_thresholds = ... # type: List[float]
_example_weight_key = ... # type: str
_labels_key = ... # type: str
_metric_tag = None # type: str
# We could use the same keys as the ConfusionMatrix metrics, but with the way
# that post_export_metrics are currently implemented, if both
# post_export_metrics were specified we would pop the matrices/thresholds in
# the first call, and have issues with the second.
thresholds_key = metric_keys.FAIRNESS_CONFUSION_MATRIX_THESHOLDS
matrices_key = metric_keys.FAIRNESS_CONFUSION_MATRIX_MATRICES
def __init__(self,
thresholds: Optional[List[float]] = None,
example_weight_key: Optional[str] = None,
target_prediction_keys: Optional[List[str]] = None,
labels_key: Optional[str] = None,
metric_tag: Optional[str] = None,
tensor_index: Optional[int] = None) -> None:
if not thresholds:
thresholds = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
# Determine the number of threshold digits to display as part of the metric
# key. We want lower numbers for readability, but allow differentiation
# between close thresholds.
self._key_digits = 2
for t in thresholds:
if len(str(t)) - 2 > self._key_digits:
self._key_digits = len(str(t)) - 2
super().__init__(
thresholds,
example_weight_key,
target_prediction_keys,
labels_key,
metric_tag,
tensor_index=tensor_index)
def get_metric_ops(
self, features_dict: types.TensorTypeMaybeDict,
predictions_dict: types.TensorTypeMaybeDict,
labels_dict: types.TensorTypeMaybeDict
) -> Dict[str, Tuple[types.TensorType, types.TensorType]]:
values, update_ops = self.confusion_matrix_metric_ops(
features_dict, predictions_dict, labels_dict)
# True positive rate is computed by confusion_matrix_metric_ops as 'recall'.
# pytype: disable=unsupported-operands
values['tnr'] = tf.math.divide_no_nan(values['tn'],
values['tn'] + values['fp'])
values['fpr'] = tf.math.divide_no_nan(values['fp'],
values['fp'] + values['tn'])
values['positive_rate'] = tf.math.divide_no_nan(
values['tp'] + values['fp'],
values['tp'] + values['fp'] + values['tn'] + values['fn'])
values['fnr'] = tf.math.divide_no_nan(values['fn'],
values['fn'] + values['tp'])
values['negative_rate'] = tf.math.divide_no_nan(
values['tn'] + values['fn'],
values['tp'] + values['fp'] + values['tn'] + values['fn'])
values['false_discovery_rate'] = tf.math.divide_no_nan(
values['fp'], values['fp'] + values['tp'])
values['false_omission_rate'] = tf.math.divide_no_nan(
values['fn'], values['fn'] + values['tn'])
# pytype: enable=unsupported-operands
update_op = tf.group(update_ops['fn'], update_ops['tn'], update_ops['fp'],
update_ops['tp'])
value_op = tf.transpose(
a=tf.stack([
values['fn'], values['tn'], values['fp'], values['tp'],
values['precision'], values['recall']
]))
output_dict = {
self._metric_key(self.matrices_key): (value_op, update_op),
self._metric_key(self.thresholds_key): (tf.identity(self._thresholds),
tf.no_op()),
}
for i, threshold in enumerate(self._thresholds):
output_dict[self._metric_key(
metric_keys.base_key(
'positive_rate@%.*f' %
(self._key_digits, threshold)))] = (values['positive_rate'][i],
update_op)
output_dict[self._metric_key(
metric_keys.base_key(
'true_positive_rate@%.*f' %
(self._key_digits, threshold)))] = (values['recall'][i],
update_op)
output_dict[self._metric_key(
metric_keys.base_key(
'false_positive_rate@%.*f' %
(self._key_digits, threshold)))] = (values['fpr'][i], update_op)
output_dict[self._metric_key(
metric_keys.base_key(
'negative_rate@%.*f' %
(self._key_digits, threshold)))] = (values['negative_rate'][i],
update_op)
output_dict[self._metric_key(
metric_keys.base_key(
'true_negative_rate@%.*f' %
(self._key_digits, threshold)))] = (values['tnr'][i], update_op)
output_dict[self._metric_key(
metric_keys.base_key(
'false_negative_rate@%.*f' %
(self._key_digits, threshold)))] = (values['fnr'][i], update_op)
output_dict[self._metric_key(
metric_keys.base_key('false_discovery_rate@%.*f' %
(self._key_digits, threshold)))] = (
values['false_discovery_rate'][i], update_op)
output_dict[self._metric_key(
metric_keys.base_key('false_omission_rate@%.*f' %
(self._key_digits, threshold)))] = (
values['false_omission_rate'][i], update_op)
return output_dict # pytype: disable=bad-return-type
def populate_stats_and_pop(
self, unused_slice_key: slicer.SliceKeyType, combine_metrics: Dict[str,
Any],
output_metrics: Dict[str, metrics_pb2.MetricValue]) -> None:
matrices = combine_metrics.pop(self._metric_key(self.matrices_key))
thresholds = combine_metrics.pop(self._metric_key(self.thresholds_key))
# We assume that thresholds are already sorted.
if len(matrices) != len(thresholds):
raise ValueError(
'matrices should have the same length as thresholds, but lengths '
'were: matrices: %d, thresholds: %d' %
(len(matrices), len(thresholds)))
for threshold, raw_matrix in zip(thresholds, matrices):
# Adds confusion matrix table as well as ratios used for fairness metrics.
if isinstance(threshold, types.ValueWithTDistribution):
threshold = threshold.unsampled_value
output_matrix = post_export_metrics._create_confusion_matrix_proto(
raw_matrix, threshold)
(output_metrics[self._metric_key(metric_keys.FAIRNESS_CONFUSION_MATRIX)]
.confusion_matrix_at_thresholds.matrices.add().CopyFrom(output_matrix))
# If the fairness_indicator in enabled, the slicing inside the tfx evaluator
# config will also be added into this metrics as a subgroup key.
# However, handling the subgroup metrics with slices is still TBD.
@post_export_metrics._export('fairness_auc')
class _FairnessAuc(post_export_metrics._PostExportMetric):
"""Metric that computes bounded AUC for predictions in [0, 1].
This metrics calculates the subgroup auc, the background positive subgroup
negative auc and background negative subgroup positive auc. For more
explanation about the concepts of these auc metrics, please refer to paper
[Measuring and Mitigating Unintended Bias in Text
Classification](https://ai.google/research/pubs/pub46743)
"""
_target_prediction_keys = ... # type: List[str]
_labels_key = ... # type: str
_metric_tag = None # type: str
_tensor_index = ... # type: int
def __init__(self,
subgroup_key: str,
example_weight_key: Optional[str] = None,
num_buckets: int = post_export_metrics._DEFAULT_NUM_BUCKETS,
target_prediction_keys: Optional[List[str]] = None,
labels_key: Optional[str] = None,
metric_tag: Optional[str] = None,
tensor_index: Optional[int] = None) -> None:
"""Create a metric that computes fairness auc.
Predictions should be one of:
(a) a single float in [0, 1]
(b) a dict containing the LOGISTIC key
(c) a dict containing the PREDICTIONS key, where the prediction is
in [0, 1]
Label should be a single float that is either exactly 0 or exactly 1
(soft labels, i.e. labels between 0 and 1 are *not* supported).
Args:
subgroup_key: The key inside the feature column to indicate where this
example belongs to the subgroup or not. The expected mapping tensor of
this key should contain an integer/float value that's either 1 or 0.
example_weight_key: The key of the example weight column in the features
dict. If None, all predictions are given a weight of 1.0.
num_buckets: The number of buckets used for the curve. (num_buckets + 1)
is used as the num_thresholds in tf.metrics.auc().
target_prediction_keys: If provided, the prediction keys to look for in
order.
labels_key: If provided, a custom label key.
metric_tag: If provided, a custom metric tag. Only necessary to
disambiguate instances of the same metric on different predictions.
tensor_index: Optional index to specify class predictions to calculate
metrics on in the case of multi-class models.
"""
self._subgroup_key = subgroup_key
self._example_weight_key = example_weight_key
self._curve = 'ROC'
self._num_buckets = num_buckets
self._metric_name = metric_keys.FAIRNESS_AUC
self._subgroup_auc_metric = self._metric_key(self._metric_name +
'/subgroup_auc/' +
self._subgroup_key)
self._bpsn_auc_metric = self._metric_key(
f'{self._metric_name}/bpsn_auc/{self._subgroup_key}')
self._bnsp_auc_metric = self._metric_key(self._metric_name + '/bnsp_auc/' +
self._subgroup_key)
super().__init__(
target_prediction_keys=target_prediction_keys,
labels_key=labels_key,
metric_tag=metric_tag,
tensor_index=tensor_index)
def check_compatibility(self, features_dict: types.TensorTypeMaybeDict,
predictions_dict: types.TensorTypeMaybeDict,
labels_dict: types.TensorTypeMaybeDict) -> None:
post_export_metrics._check_feature_present(features_dict,
self._example_weight_key)
post_export_metrics._check_feature_present(features_dict,
self._subgroup_key)
self._get_labels_and_predictions(predictions_dict, labels_dict)
def get_metric_ops(
self, features_dict: types.TensorTypeMaybeDict,
predictions_dict: types.TensorTypeMaybeDict,
labels_dict: types.TensorTypeMaybeDict
) -> Dict[str, Tuple[types.TensorType, types.TensorType]]:
# Note that we have to squeeze predictions, labels, weights so they are all
# N element vectors (otherwise some of them might be N x 1 tensors, and
# multiplying a N element vector with a N x 1 tensor uses matrix
# multiplication rather than element-wise multiplication).
predictions, labels = self._get_labels_and_predictions(
predictions_dict, labels_dict)
predictions = post_export_metrics._flatten_to_one_dim(
tf.cast(predictions, tf.float64))
labels = post_export_metrics._flatten_to_one_dim(
tf.cast(labels, tf.float64))
weights = tf.ones_like(predictions)
subgroup = post_export_metrics._flatten_to_one_dim(
tf.cast(features_dict[self._subgroup_key], tf.bool))
if self._example_weight_key:
weights = post_export_metrics._flatten_to_one_dim(
tf.cast(features_dict[self._example_weight_key], tf.float64))
predictions, labels, weights = (
post_export_metrics
._create_predictions_labels_weights_for_fractional_labels(
predictions, labels, weights))
# To let subgroup tensor match the size with prediction, labels and weights
# above.
subgroup = tf.concat([subgroup, subgroup], axis=0)
labels_bool = tf.cast(labels, tf.bool)
pos_subgroup = tf.math.logical_and(labels_bool, subgroup)
neg_subgroup = tf.math.logical_and(
tf.math.logical_not(labels_bool), subgroup)
pos_background = tf.math.logical_and(labels_bool,
tf.math.logical_not(subgroup))
neg_background = tf.math.logical_and(
tf.math.logical_not(labels_bool), tf.math.logical_not(subgroup))
bnsp = tf.math.logical_or(pos_subgroup, neg_background)
bpsn = tf.math.logical_or(neg_subgroup, pos_background)
ops_dict = {}
# Add subgroup auc.
ops_dict.update(
post_export_metrics._build_auc_metrics_ops(
self._subgroup_auc_metric, labels, predictions,
tf.multiply(weights, tf.cast(subgroup, tf.float64)),
self._num_buckets + 1, self._curve))
# Add backgroup positive subgroup negative auc.
ops_dict.update(
post_export_metrics._build_auc_metrics_ops(
self._bpsn_auc_metric, labels, predictions,
tf.multiply(weights, tf.cast(bpsn, tf.float64)),
self._num_buckets + 1, self._curve))
# Add backgroup negative subgroup positive auc.
ops_dict.update(
post_export_metrics._build_auc_metrics_ops(
self._bnsp_auc_metric, labels, predictions,
tf.multiply(weights, tf.cast(bnsp, tf.float64)),
self._num_buckets + 1, self._curve))
return ops_dict
def populate_stats_and_pop(
self, slice_key: slicer.SliceKeyType, combine_metrics: Dict[str, Any],
output_metrics: Dict[str, metrics_pb2.MetricValue]) -> None:
for metrics_key in (self._subgroup_auc_metric, self._bpsn_auc_metric,
self._bnsp_auc_metric):
if slice_key:
combine_metrics.pop(metric_keys.lower_bound_key(metrics_key))
combine_metrics.pop(metric_keys.upper_bound_key(metrics_key))
combine_metrics.pop(metrics_key)
else:
post_export_metrics._populate_to_auc_bounded_value_and_pop(
combine_metrics, output_metrics, metrics_key)
# pylint: enable=protected-access
```
#### File: tensorflow_model_analysis/eval_saved_model/encoding.py
```python
import tensorflow as tf
from tensorflow_model_analysis import types
from google.protobuf import any_pb2
from tensorflow.core.protobuf import meta_graph_pb2
# Names for the various collections
TFMA_VERSION_COLLECTION = 'evaluation_only/metadata/tfma_version'
METRICS_COLLECTION = 'evaluation_only/metrics'
PREDICTIONS_COLLECTION = 'evaluation_only/predictions'
INPUT_EXAMPLE_COLLECTION = 'evaluation_only/label_graph/input_example'
LABELS_COLLECTION = 'evaluation_only/label_graph/labels'
FEATURES_COLLECTION = 'evaluation_only/label_graph/features'
EXAMPLE_REF_COLLECTION = 'evaluation_only/label_graph/example_ref'
# Suffixes for the collection names
KEY_SUFFIX = 'key'
NODE_SUFFIX = 'node'
VALUE_OP_SUFFIX = 'value_op'
UPDATE_OP_SUFFIX = 'update_op'
# Encoding prefixes for keys
_TUPLE_KEY_PREFIX = b'$Tuple$'
_BYTES_KEY_PREFIX = b'$Bytes$'
def with_suffix(name: str, suffix: str) -> str:
return '%s/%s' % (name, suffix) # pytype: disable=bad-return-type
def encode_key(key: types.FPLKeyType) -> bytes:
"""Encode a dictionary key as a string.
For encoding dictionary keys in the prediction, label and feature
dictionaries. We assume that they are either Tuples of bytes, or bytes.
Implementation details:
Strings are encoded as $Bytes$<String>
Tuples of strings are encoded as:
$Tuple$<len(tuple)>$len(tuple[0])$...$len(tuple[n])$tuple[0]$...$tuple[n]
e.g. ('apple', 'banana', 'cherry') gets encoded as
$Tuple$3$5$6$6$apple$banana$cherry
Args:
key: Dictionary key to encode.
Returns:
Encoded dictionary key.
Raises:
TypeError: Dictionary key is not either a Tuple of bytes/unicode,
or bytes/unicode.
"""
if isinstance(key, tuple):
if not all(isinstance(elem, (bytes, str)) for elem in key):
raise TypeError('if key is tuple, all elements should be strings. '
'key was: %s' % key)
utf8_keys = [tf.compat.as_bytes(elem) for elem in key]
length_strs = [tf.compat.as_bytes('%d' % len(key)) for key in utf8_keys]
return (_TUPLE_KEY_PREFIX + tf.compat.as_bytes('%d' % len(length_strs)) +
b'$' + b'$'.join(length_strs) + b'$' + b'$'.join(utf8_keys))
elif isinstance(key, (bytes, str)):
return b'$Bytes$' + tf.compat.as_bytes(key)
else:
raise TypeError('key has unrecognised type: type: %s, value %s' %
(type(key), key))
def decode_key(encoded_key: bytes) -> types.FPLKeyType:
"""Decode an encoded dictionary key encoded with encode_key.
Args:
encoded_key: Dictionary key, encoded with encode_key.
Returns:
Decoded dictionary key.
Raises:
ValueError: We couldn't decode the encoded key.
"""
if encoded_key.startswith(_TUPLE_KEY_PREFIX):
parts = encoded_key[len(_TUPLE_KEY_PREFIX):].split(b'$', 1)
if len(parts) != 2:
raise ValueError('invalid encoding: %s' % encoded_key)
elem_count = int(parts[0])
parts = parts[1].split(b'$', elem_count)
if len(parts) != elem_count + 1:
raise ValueError('invalid encoding: %s' % encoded_key)
lengths = map(int, parts[:elem_count])
parts = parts[elem_count]
elems = []
for length in lengths:
elems.append(parts[:length].decode('utf8'))
parts = parts[length + 1:] # Add one for the $ delimiter
return tuple(elems)
elif encoded_key.startswith(_BYTES_KEY_PREFIX):
return encoded_key[len(_BYTES_KEY_PREFIX):].decode('utf8')
else:
raise ValueError('invalid encoding: %s' % encoded_key)
def encode_tensor_node(node: types.TensorType) -> any_pb2.Any:
"""Encode a "reference" to a Tensor/SparseTensor as a TensorInfo in an Any.
We put the Tensor / SparseTensor in a TensorInfo, which we then wrap in an
Any so that it can be added to the CollectionDef.
Args:
node: Tensor node.
Returns:
Any proto wrapping a TensorInfo.
"""
any_buf = any_pb2.Any()
tensor_info = tf.compat.v1.saved_model.utils.build_tensor_info(node)
any_buf.Pack(tensor_info)
return any_buf
def decode_tensor_node(graph: tf.Graph,
encoded_tensor_node: any_pb2.Any) -> types.TensorType:
"""Decode an encoded Tensor node encoded with encode_tensor_node.
Decodes the encoded Tensor "reference", and returns the node in the given
graph corresponding to that Tensor.
Args:
graph: Graph the Tensor
encoded_tensor_node: Encoded Tensor.
Returns:
Decoded Tensor.
"""
tensor_info = meta_graph_pb2.TensorInfo()
encoded_tensor_node.Unpack(tensor_info)
return tf.compat.v1.saved_model.utils.get_tensor_from_tensor_info(
tensor_info, graph)
```
#### File: eval_saved_model/example_trainers/fake_sequence_to_prediction.py
```python
import tensorflow as tf
from tensorflow_model_analysis.eval_saved_model import export
from tensorflow_model_analysis.eval_saved_model import util
def simple_fake_sequence_to_prediction(export_path, eval_export_path):
"""Trains and exports a fake_sequence_to_prediction model."""
input_feature_spec = {
'values_t1': tf.io.VarLenFeature(dtype=tf.float32),
'values_t2': tf.io.VarLenFeature(dtype=tf.float32),
'values_t3': tf.io.VarLenFeature(dtype=tf.float32)
}
label_feature_spec = dict(input_feature_spec)
label_feature_spec['label'] = tf.io.FixedLenFeature([1], dtype=tf.float32)
def _make_embedding_and_sparse_values(features):
"""Make "embedding" and "sparse_values" features."""
embedding_dim = 3
sparse_dims = 3
sparse_timesteps = 3
# Create a three-dimensional "embedding" based on the value of the feature
# The embedding is simply [1, 1, 1] * feature_value
# (or [0, 0, 0] if the feature is missing).
batch_size = tf.cast(
tf.shape(input=features['values_t1'])[0], dtype=tf.int64)
ones = tf.ones(shape=[embedding_dim])
dense_t1 = tf.sparse.to_dense(features['values_t1'])
dense_t2 = tf.sparse.to_dense(features['values_t2'])
dense_t3 = tf.sparse.to_dense(features['values_t3'])
embedding_t1 = ones * dense_t1
embedding_t2 = ones * dense_t2
embedding_t3 = ones * dense_t3
embeddings = tf.stack([embedding_t1, embedding_t2, embedding_t3], axis=1)
features['embedding'] = embeddings
del features['values_t1']
del features['values_t2']
del features['values_t3']
# Make the "sparse_values" feature.
sparse_values = tf.squeeze(
tf.concat([
dense_t1, dense_t1**2, dense_t1**3, dense_t2, dense_t2**2, dense_t2
**3, dense_t3, dense_t3**2, dense_t3**3
],
axis=0))
sparse_total_elems = batch_size * sparse_dims * sparse_timesteps
seq = tf.range(0, sparse_total_elems, dtype=tf.int64)
batch_num = seq % batch_size
timestep = tf.compat.v1.div(seq, batch_size * sparse_dims)
offset = tf.compat.v1.div(seq, batch_size) % sparse_dims
sparse_indices = tf.stack([batch_num, timestep, offset], axis=1)
features['sparse_values'] = tf.SparseTensor(
indices=sparse_indices,
values=sparse_values,
dense_shape=[batch_size, sparse_timesteps, sparse_dims])
def model_fn(features, labels, mode, config):
"""Model function for custom estimator."""
del config
dense_values = tf.sparse.to_dense(
features['sparse_values'], validate_indices=False)
a = tf.Variable(1.0, dtype=tf.float32, name='a')
b = tf.Variable(2.0, dtype=tf.float32, name='b')
c = tf.Variable(3.0, dtype=tf.float32, name='c')
d = tf.Variable(4.0, dtype=tf.float32, name='d')
e = tf.Variable(5.0, dtype=tf.float32, name='e')
f = tf.Variable(6.0, dtype=tf.float32, name='f')
predictions = (
a * tf.reduce_sum(input_tensor=features['embedding'][:, 0, :], axis=1) +
b * tf.reduce_sum(input_tensor=features['embedding'][:, 1, :], axis=1) +
c * tf.reduce_sum(input_tensor=features['embedding'][:, 2, :], axis=1) +
d * tf.reduce_sum(input_tensor=dense_values[:, 0, :], axis=1) +
e * tf.reduce_sum(input_tensor=dense_values[:, 1, :], axis=1) +
f * tf.reduce_sum(input_tensor=dense_values[:, 2, :], axis=1))
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions={'score': predictions},
export_outputs={
'score': tf.estimator.export.RegressionOutput(predictions)
})
loss = tf.compat.v1.losses.mean_squared_error(
labels, tf.expand_dims(predictions, axis=-1))
optimizer = tf.compat.v1.train.GradientDescentOptimizer(
learning_rate=0.0001)
train_op = optimizer.minimize(
loss=loss, global_step=tf.compat.v1.train.get_global_step())
return tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
eval_metric_ops={
'mean_squared_error':
tf.compat.v1.metrics.mean_squared_error(
labels, tf.expand_dims(predictions, axis=-1)),
'mean_prediction':
tf.compat.v1.metrics.mean(predictions),
},
predictions=predictions)
def train_input_fn():
"""Train input function."""
def make_example_with_label(values_t1=None, values_t2=None, values_t3=None):
"""Make example with label."""
effective_t1 = 0.0
effective_t2 = 0.0
effective_t3 = 0.0
args = {}
if values_t1 is not None:
args['values_t1'] = float(values_t1)
effective_t1 = values_t1
if values_t2 is not None:
args['values_t2'] = float(values_t2)
effective_t2 = values_t2
if values_t3 is not None:
args['values_t3'] = float(values_t3)
effective_t3 = values_t3
label = (3 * effective_t1 + 6 * effective_t2 + 9 * effective_t3 + 4 *
(effective_t1 + effective_t1**2 + effective_t1**3) + 5 *
(effective_t2 + effective_t2**2 + effective_t2**3) + 6 *
(effective_t3 + effective_t3**2 + effective_t3**3))
args['label'] = float(label)
return util.make_example(**args)
examples = [
make_example_with_label(values_t1=1.0),
make_example_with_label(values_t2=1.0),
make_example_with_label(values_t3=1.0),
make_example_with_label(values_t1=2.0, values_t2=3.0),
make_example_with_label(values_t1=5.0, values_t3=7.0),
make_example_with_label(values_t2=11.0, values_t3=13.0),
make_example_with_label(values_t1=2.0, values_t2=3.0, values_t3=5.0),
]
serialized_examples = [x.SerializeToString() for x in examples]
features = tf.io.parse_example(
serialized=serialized_examples, features=label_feature_spec)
_make_embedding_and_sparse_values(features)
label = features.pop('label')
return features, label
def serving_input_receiver_fn():
"""Serving input receiver function."""
serialized_tf_example = tf.compat.v1.placeholder(
dtype=tf.string, shape=[None], name='input_example_tensor')
receiver_tensors = {'examples': serialized_tf_example}
features = tf.io.parse_example(
serialized=serialized_tf_example, features=input_feature_spec)
_make_embedding_and_sparse_values(features)
return tf.estimator.export.ServingInputReceiver(features, receiver_tensors)
def eval_input_receiver_fn():
"""Eval input receiver function."""
serialized_tf_example = tf.compat.v1.placeholder(
dtype=tf.string, shape=[None], name='input_example_tensor')
receiver_tensors = {'examples': serialized_tf_example}
features = tf.io.parse_example(
serialized=serialized_tf_example, features=label_feature_spec)
_make_embedding_and_sparse_values(features)
return export.EvalInputReceiver(
features=features,
receiver_tensors=receiver_tensors,
labels=features['label'])
estimator = tf.estimator.Estimator(model_fn=model_fn)
estimator.train(input_fn=train_input_fn, steps=10)
export_dir = None
eval_export_dir = None
if export_path:
export_dir = estimator.export_saved_model(
export_dir_base=export_path,
serving_input_receiver_fn=serving_input_receiver_fn)
if eval_export_path:
eval_export_dir = export.export_eval_savedmodel(
estimator=estimator,
export_dir_base=eval_export_path,
eval_input_receiver_fn=eval_input_receiver_fn)
return export_dir, eval_export_dir
```
#### File: eval_saved_model/example_trainers/fixed_prediction_classifier_identity_label.py
```python
import tensorflow as tf
from tensorflow_model_analysis.eval_saved_model import export
from tensorflow_model_analysis.eval_saved_model.example_trainers import fixed_prediction_classifier
from tensorflow_model_analysis.eval_saved_model.example_trainers import util
def simple_fixed_prediction_classifier_identity_label(export_path,
eval_export_path):
"""Exports a simple fixed prediction classifier."""
estimator = tf.estimator.Estimator(
model_fn=fixed_prediction_classifier.model_fn)
estimator.train(input_fn=fixed_prediction_classifier.train_input_fn, steps=1)
serving_input_receiver_fn = (
tf.estimator.export.build_parsing_serving_input_receiver_fn(
feature_spec={
'classes': tf.io.VarLenFeature(dtype=tf.string),
'scores': tf.io.VarLenFeature(dtype=tf.float32)
}))
eval_input_receiver_fn = export.build_parsing_eval_input_receiver_fn(
feature_spec={
'classes': tf.io.VarLenFeature(dtype=tf.string),
'scores': tf.io.VarLenFeature(dtype=tf.float32),
'label': tf.io.FixedLenFeature([1], dtype=tf.int64),
'language': tf.io.FixedLenFeature([1], dtype=tf.string),
'age': tf.io.FixedLenFeature([1], dtype=tf.float32),
},
label_key='label')
return util.export_model_and_eval_model(
estimator=estimator,
serving_input_receiver_fn=serving_input_receiver_fn,
eval_input_receiver_fn=eval_input_receiver_fn,
export_path=export_path,
eval_export_path=eval_export_path)
```
#### File: eval_saved_model/example_trainers/fixed_prediction_estimator_extra_fields.py
```python
import tensorflow as tf
from tensorflow_model_analysis.eval_saved_model import export
from tensorflow_model_analysis.eval_saved_model.example_trainers import util
from tensorflow.python.estimator.canned import metric_keys
from tensorflow.python.estimator.canned import prediction_keys
def simple_fixed_prediction_estimator_extra_fields(export_path,
eval_export_path,
include_metrics=True):
"""Exports a simple fixed prediction estimator that parses extra fields."""
def model_fn(features, labels, mode, config):
"""Model function for custom estimator."""
del config
predictions = features['prediction']
predictions_dict = {
prediction_keys.PredictionKeys.PREDICTIONS: predictions,
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions_dict,
export_outputs={
tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
tf.estimator.export.RegressionOutput(predictions)
})
loss = tf.compat.v1.losses.mean_squared_error(predictions, labels)
train_op = tf.compat.v1.assign_add(tf.compat.v1.train.get_global_step(), 1)
eval_metric_ops = {}
if include_metrics:
eval_metric_ops[
metric_keys.MetricKeys.LOSS_MEAN] = tf.compat.v1.metrics.mean(loss)
return tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
predictions=predictions_dict,
eval_metric_ops=eval_metric_ops)
def train_input_fn():
"""Train input function."""
return {
'prediction': tf.constant([[1.0], [2.0], [3.0], [4.0]]),
}, tf.constant([[1.0], [2.0], [3.0], [4.0]]),
feature_spec = {'prediction': tf.io.FixedLenFeature([1], dtype=tf.float32)}
eval_feature_spec = {
'prediction':
tf.io.FixedLenFeature([1], dtype=tf.float32),
'label':
tf.io.FixedLenFeature([1], dtype=tf.float32),
'fixed_float':
tf.io.FixedLenFeature([1], dtype=tf.float32, default_value=0.0),
'fixed_string':
tf.io.FixedLenFeature([1], dtype=tf.string, default_value=''),
'fixed_int':
tf.io.FixedLenFeature([1], dtype=tf.int64, default_value=0),
'var_float':
tf.io.VarLenFeature(dtype=tf.float32),
'var_string':
tf.io.VarLenFeature(dtype=tf.string),
'var_int':
tf.io.VarLenFeature(dtype=tf.int64),
}
estimator = tf.estimator.Estimator(model_fn=model_fn)
estimator.train(input_fn=train_input_fn, steps=1)
return util.export_model_and_eval_model(
estimator=estimator,
serving_input_receiver_fn=(
tf.estimator.export.build_parsing_serving_input_receiver_fn(
feature_spec)),
eval_input_receiver_fn=export.build_parsing_eval_input_receiver_fn(
eval_feature_spec, label_key='label'),
export_path=export_path,
eval_export_path=eval_export_path)
```
#### File: tensorflow_model_analysis/evaluators/legacy_poisson_bootstrap.py
```python
from typing import Any, Dict, Iterable, Iterator, List, Optional, Tuple, Type, Union
import apache_beam as beam
import numpy as np
from tensorflow_model_analysis import types
from tensorflow_model_analysis.slicer import slicer_lib as slicer
from google.protobuf import message
DEFAULT_NUM_BOOTSTRAP_SAMPLES = 20
# TFMA v1 uses Text for its keys while TFMA v2 uses MetricKey
_MetricsDict = Dict[Any, Any]
@beam.ptransform_fn
@beam.typehints.with_input_types(Tuple[slicer.SliceKeyType, types.Extracts])
@beam.typehints.with_output_types(Tuple[slicer.SliceKeyType, _MetricsDict])
def ComputeWithConfidenceIntervals( # pylint: disable=invalid-name
sliced_extracts: beam.pvalue.PCollection,
compute_per_slice_metrics_cls: Type[beam.PTransform],
num_bootstrap_samples: Optional[int] = DEFAULT_NUM_BOOTSTRAP_SAMPLES,
random_seed_for_testing: Optional[int] = None,
**kwargs) -> beam.pvalue.PCollection:
"""PTransform for computing metrics using T-Distribution values.
Args:
sliced_extracts: Incoming PCollection consisting of slice key and extracts.
compute_per_slice_metrics_cls: PTransform class that takes a PCollection of
(slice key, extracts) as input and returns (slice key, dict of metrics) as
output. The class will be instantiated multiple times to compute metrics
both with and without sampling. The class will be initialized using kwargs
'compute_with_sampling' and 'random_seed_for_testing' along with any
kwargs passed in **kwargs.
num_bootstrap_samples: Number of replicas to use in calculating uncertainty
using bootstrapping. If 1 is provided (default), aggregate metrics will be
calculated with no uncertainty. If num_bootstrap_samples is > 0, multiple
samples of each slice will be calculated using the Poisson bootstrap
method. To calculate standard errors, num_bootstrap_samples should be 20
or more in order to provide useful data. More is better, but you pay a
performance cost.
random_seed_for_testing: Seed to use for unit testing, because
nondeterministic tests stink. Each partition will use this value + i.
**kwargs: Additional args to pass to compute_per_slice_metrics_cls init.
Returns:
PCollection of (slice key, dict of metrics)
"""
if not num_bootstrap_samples:
num_bootstrap_samples = 1
# TODO(ckuhn): Cap the number of bootstrap samples at 20.
if num_bootstrap_samples < 1:
raise ValueError('num_bootstrap_samples should be > 0, got %d' %
num_bootstrap_samples)
output_results = (
sliced_extracts
| 'ComputeUnsampledMetrics' >> compute_per_slice_metrics_cls(
compute_with_sampling=False, random_seed_for_testing=None, **kwargs))
if num_bootstrap_samples > 1:
multicombine = []
for i in range(num_bootstrap_samples):
seed = (None if random_seed_for_testing is None else
random_seed_for_testing + i)
multicombine.append(
sliced_extracts
| 'ComputeSampledMetrics%d' % i >> compute_per_slice_metrics_cls(
compute_with_sampling=True,
random_seed_for_testing=seed,
**kwargs))
output_results = (
multicombine
| 'FlattenBootstrapPartitions' >> beam.Flatten()
| 'GroupBySlice' >> beam.GroupByKey()
| 'MergeBootstrap' >> beam.ParDo(_MergeBootstrap(),
beam.pvalue.AsDict(output_results)))
return output_results
class _MergeBootstrap(beam.DoFn):
"""Merge the bootstrap values and fit a T-distribution to get confidence."""
def process(
self, element: Tuple[slicer.SliceKeyType, Iterable[_MetricsDict]],
unsampled_results: Dict[slicer.SliceKeyType, _MetricsDict]
) -> Iterator[Tuple[slicer.SliceKeyType, _MetricsDict]]:
"""Merge the bootstrap values.
Args:
element: The element is the tuple that contains slice key and a list of
the metrics dict. It's the output of the GroupByKey step. All the
metrics that under the same slice key are generated by
poisson-bootstrap.
unsampled_results: The unsampled_results is passed in as a side input.
It's a tuple that contains the slice key and the metrics dict from a run
of the slice with no sampling (ie, all examples in the set are
represented exactly once.) This should be identical to the values
obtained without sampling.
Yields:
A tuple of slice key and the metrics dict which contains the unsampled
value, as well as parameters about t distribution. If the metric is a
proto only the unsampled value will be returned.
Raises:
ValueError if the key of metrics inside element does not equal to the
key of metrics in unsampled_results.
"""
slice_key, metrics = element
# metrics should be a list of dicts, but the dataflow runner has a quirk
# that requires specific casting.
metrics = list(metrics)
if len(metrics) == 1:
yield slice_key, metrics[0]
return
# Group the same metrics into one list.
metrics_dict = {}
for metric in metrics:
for metrics_name in metric:
if metrics_name not in metrics_dict:
metrics_dict[metrics_name] = []
metrics_dict[metrics_name].append(metric[metrics_name])
unsampled_metrics_dict = unsampled_results.get(slice_key, {})
# The key set of the two metrics dicts must be identical.
if set(metrics_dict.keys()) != set(unsampled_metrics_dict.keys()):
raise ValueError('Keys of two metrics do not match: sampled_metrics: %s. '
'unsampled_metrics: %s' %
(metrics_dict.keys(), unsampled_metrics_dict.keys()))
metrics_with_confidence = {}
for metrics_name in metrics_dict:
# If metric is a proto, return as is.
unsampled_value = unsampled_metrics_dict[metrics_name]
if isinstance(unsampled_value, message.Message):
metrics_with_confidence[metrics_name] = unsampled_value
else:
metrics_with_confidence[metrics_name] = _calculate_t_distribution(
metrics_dict[metrics_name], unsampled_value)
yield slice_key, metrics_with_confidence
def _calculate_t_distribution( # pylint: disable=invalid-name
sampling_data_list: List[Union[int, float, np.ndarray]],
unsampled_data: Union[int, float, np.ndarray]):
"""Calculate the confidence interval of the data.
Args:
sampling_data_list: A list of number or np.ndarray.
unsampled_data: Individual number or np.ndarray. The format of the
unsampled_data should match the format of the element inside
sampling_data_list.
Returns:
Confidence Interval value stored inside
types.ValueWithTDistribution.
"""
if isinstance(sampling_data_list[0], (np.ndarray, list)):
merged_data = sampling_data_list[0][:]
if isinstance(sampling_data_list[0], np.ndarray):
merged_data = merged_data.astype(object)
for index in range(len(merged_data)):
merged_data[index] = _calculate_t_distribution(
[data[index] for data in sampling_data_list], unsampled_data[index])
return merged_data
else:
# Data has to be numeric. That means throw out nan values.
sampling_data_list = [
data for data in sampling_data_list if not np.isnan(data)
]
if n_samples := len(sampling_data_list):
sample_mean = np.mean(sampling_data_list)
sample_std = np.std(sampling_data_list, ddof=1)
return types.ValueWithTDistribution(sample_mean, sample_std,
n_samples - 1, unsampled_data)
else:
return types.ValueWithTDistribution(
float('nan'), float('nan'), -1, float('nan'))
```
#### File: tensorflow_model_analysis/metrics/sample_metrics.py
```python
from typing import Any, List, Optional, Text, Tuple
import apache_beam as beam
import numpy as np
from tensorflow_model_analysis.metrics import metric_types
from tensorflow_model_analysis.utils import beam_util
from tensorflow_model_analysis.utils import util
FIXED_SIZE_SAMPLE_NAME = 'fixed_size_sample'
# This corresponds to the comments in apache_beam/transforms/combiners.py
_HeapType = Tuple[bool, List[Any]]
class FixedSizeSample(metric_types.Metric):
"""Computes a fixed-size sample per slice."""
def __init__(self,
sampled_key: Text,
size: int,
name: Text = FIXED_SIZE_SAMPLE_NAME,
random_seed: Optional[int] = None):
"""Initializes a FixedSizeSample metric.
Args:
sampled_key: The key whose values should be sampled
size: The number of samples to collect (per slice)
name: Metric name.
random_seed: The random_seed to be used for intializing the per worker
np.random.RandomGenerator in the CombineFn setup. Note that when more
than one worker is used, setting this is not sufficient to guarantee
determinism.
"""
super().__init__(
_fixed_size_sample,
sampled_key=sampled_key,
size=size,
name=name,
random_seed=random_seed)
metric_types.register_metric(FixedSizeSample)
def _fixed_size_sample(
sampled_key: Text,
size: int,
name: Text,
random_seed: Optional[int],
model_names: Optional[List[Text]] = None,
output_names: Optional[List[Text]] = None,
sub_keys: Optional[List[metric_types.SubKey]] = None,
example_weighted: bool = False) -> metric_types.MetricComputations:
"""Returns metrics computations for FixedSizeSample metrcs."""
keys = []
for model_name in model_names or ['']:
for output_name in output_names or ['']:
keys.extend(metric_types.MetricKey(
name,
model_name=model_name,
output_name=output_name,
sub_key=sub_key,
example_weighted=example_weighted) for sub_key in sub_keys or [None])
return [
metric_types.MetricComputation(
keys=keys,
preprocessor=metric_types.FeaturePreprocessor(
feature_keys=[sampled_key]),
combiner=_FixedSizeSampleCombineFn(
metric_keys=keys,
sampled_key=sampled_key,
size=size,
example_weighted=example_weighted,
random_seed=random_seed))
]
class _FixedSizeSampleCombineFn(beam_util.DelegatingCombineFn):
"""A fixed size sample combiner which samples values of a specified key.
This CombineFn is similar to beam.combiners.SampleCombineFn except it makes
use of the numpy random generator which means that it accepts a seed for use
with deterministic testing.
"""
def __init__(self, metric_keys: List[metric_types.MetricKey],
sampled_key: Text, size: int, example_weighted: bool,
random_seed: Optional[int]):
self._metric_keys = metric_keys
self._sampled_key = sampled_key
self._example_weighted = example_weighted
self._random_seed = random_seed
# We delegate to the TopCombineFn rather than subclass because the use of a
# TopCombineFn is an implementation detail.
super().__init__(combine_fn=beam.combiners.TopCombineFn(n=size))
def setup(self):
self._random_generator = np.random.default_rng(self._random_seed)
def add_input(self, heap: _HeapType,
element: metric_types.StandardMetricInputs) -> _HeapType:
# TODO(b/206546545): add support for sampling derived features
sampled_value = util.get_by_keys(element.features, [self._sampled_key])
random_tag = self._random_generator.random()
if self._example_weighted:
# For details, see Weighted Random Sampling over Data Streams:
# https://arxiv.org/abs/1012.0256
weight = element.example_weight
random_tag = random_tag**(1 / weight)
return super().add_input(heap, (random_tag, sampled_value))
def extract_output(self, heap: _HeapType) -> metric_types.MetricsDict:
# drop random numbers used for sampling
sampled_values = np.array([v for _, v in super().extract_output(heap)])
return {k: sampled_values for k in self._metric_keys}
``` |
{
"source": "josephworks/PythonWS",
"score": 3
} |
#### File: josephworks/PythonWS/deepdream.py
```python
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
import numpy as np
import matplotlib as mpl
from IPython.display import clear_output
from matplotlib import pyplot as plt
# from tensorflow.keras.preprocessing import image
# url = 'https://storage.googleapis.com/download.tensorflow.org/example_images/YellowLabradorLooking_new.jpg'
url = 'https://raw.githubusercontent.com/josephworks/files/master/CLARKE_THOMAS.png'
# Download an image and read it into a NumPy array.
def download(url, target_size=None):
name = url.split('/')[-1]
image_path = tf.keras.utils.get_file(name, origin=url)
img = tf.keras.preprocessing.image.load_img(image_path, target_size=target_size)
return img
# Normalize an image
def deprocess(img):
img = 255 * (img + 1.0) / 2.0
return tf.cast(img, tf.uint8)
# Display an image
def show(img):
plt.figure(figsize=(12, 12))
plt.grid(False)
plt.axis('off')
plt.imshow(img)
plt.show()
# Downsizing the image makes it easier to work with.
base_model = tf.keras.applications.InceptionV3(include_top=False, weights='imagenet')
original_img = download(url, target_size=[225, 375])
original_img = np.array(original_img)
show(original_img)
# Maximize the activations of these layers
names = ['mixed3', 'mixed5']
layers = [base_model.get_layer(name).output for name in names]
# Create the feature extraction model
dream_model = tf.keras.Model(inputs=base_model.input, outputs=layers)
def calc_loss(img, model):
# Pass forward the image through the model to retrieve the activations.
# Converts the image into a batch of size 1.
img_batch = tf.expand_dims(img, axis=0)
layer_activations = model(img_batch)
losses = []
for act in layer_activations:
loss = tf.math.reduce_mean(act)
losses.append(loss)
return tf.reduce_sum(losses)
@tf.function
def deepdream(model, img, step_size):
with tf.GradientTape() as tape:
# This needs gradients relative to `img`
# `GradientTape` only watches `tf.Variable`s by default
tape.watch(img)
loss = calc_loss(img, model)
# Calculate the gradient of the loss with respect to the pixels of the input image.
gradients = tape.gradient(loss, img)
# Normalize the gradients.
gradients /= tf.math.reduce_std(gradients) + 1e-8
# In gradient ascent, the "loss" is maximized so that the input image increasingly "excites" the layers.
# You can update the image by directly adding the gradients (because they're the same shape!)
img = img + gradients * step_size
img = tf.clip_by_value(img, -1, 1)
return loss, img
def run_deep_dream_simple(model, img, steps=100, step_size=0.01):
# Convert from uint8 to the range expected by the model.
img = tf.keras.applications.inception_v3.preprocess_input(img)
for step in range(steps):
loss, img = deepdream(model, img, step_size)
if step % 100 == 0:
clear_output(wait=True)
show(deprocess(img))
print("Step {}, loss {}".format(step, loss))
result = deprocess(img)
clear_output(wait=True)
show(result)
return result
dream_img = run_deep_dream_simple(model=dream_model, img=original_img,
steps=800, step_size=0.001)
OCTAVE_SCALE = 1.3
img = tf.constant(np.array(original_img))
base_shape = tf.cast(tf.shape(img)[:-1], tf.float32)
for n in range(3):
new_shape = tf.cast(base_shape * (OCTAVE_SCALE ** n), tf.int32)
img = tf.image.resize(img, new_shape).numpy()
img = run_deep_dream_simple(model=dream_model, img=img, steps=200, step_size=0.001)
clear_output(wait=True)
show(img)
def random_roll(img, maxroll):
# Randomly shift the image to avoid tiled boundaries.
shift = tf.random.uniform(shape=[2], minval=-maxroll, maxval=maxroll, dtype=tf.int32)
shift_down, shift_right = shift[0], shift[1]
img_rolled = tf.roll(tf.roll(img, shift_right, axis=1), shift_down, axis=0)
return shift_down, shift_right, img_rolled
shift_down, shift_right, img_rolled = random_roll(np.array(original_img), 512)
show(img_rolled)
@tf.function
def get_tiled_gradients(model, img, tile_size=512):
shift_down, shift_right, img_rolled = random_roll(img, tile_size)
# Initialize the image gradients to zero.
gradients = tf.zeros_like(img_rolled)
for x in tf.range(0, img_rolled.shape[0], tile_size):
for y in tf.range(0, img_rolled.shape[1], tile_size):
# Calculate the gradients for this tile.
with tf.GradientTape() as tape:
# This needs gradients relative to `img_rolled`.
# `GradientTape` only watches `tf.Variable`s by default.
tape.watch(img_rolled)
# Extract a tile out of the image.
img_tile = img_rolled[x:x + tile_size, y:y + tile_size]
loss = calc_loss(img_tile, model)
# Update the image gradients for this tile.
gradients = gradients + tape.gradient(loss, img_rolled)
# Undo the random shift applied to the image and its gradients.
gradients = tf.roll(tf.roll(gradients, -shift_right, axis=1), -shift_down, axis=0)
# Normalize the gradients.
gradients /= tf.math.reduce_std(gradients) + 1e-8
return gradients
def run_deep_dream_with_octaves(model, img, steps_per_octave=100, step_size=0.01,
num_octaves=3, octave_scale=1.3):
img = tf.keras.preprocessing.image.img_to_array(img)
img = tf.keras.applications.inception_v3.preprocess_input(img)
for octave in range(num_octaves):
# Scale the image based on the octave
if octave > 0:
new_size = tf.cast(tf.convert_to_tensor(img.shape[:2]), tf.float32) * octave_scale
img = tf.image.resize(img, tf.cast(new_size, tf.int32))
for step in range(steps_per_octave):
gradients = get_tiled_gradients(model, img)
img = img + gradients * step_size
img = tf.clip_by_value(img, -1, 1)
if step % 10 == 0:
clear_output(wait=True)
show(deprocess(img))
print("Octave {}, Step {}".format(octave, step))
clear_output(wait=True)
result = deprocess(img)
show(result)
return result
dream_img = run_deep_dream_with_octaves(model=dream_model, img=original_img, step_size=0.01)
clear_output()
show(original_img)
show(dream_img)
``` |
{
"source": "joseph-wortmann/lambda-setuptools",
"score": 2
} |
#### File: src/lambda_setuptools/ldist.py
```python
import errno
import os
import re
import shutil
import zipfile
from distutils import log
from distutils.errors import (
DistutilsInternalError,
DistutilsOptionError,
DistutilsSetupError,
)
from lambda_pkg_resources import LAMBDA_EXCLUDES, DistInstaller, ExcludesWorkingSet
from pkg_resources import WorkingSet, parse_requirements
from setuptools import Command
def validate_lambda_function(dist, attr, value):
if not re.compile(r"^([a-zA-Z0-9_]+\.)*[a-zA-Z0-9_]+:[a-zA-Z0-9_]+$").match(value):
raise DistutilsSetupError(
f"{attr} must be in the form of 'my_package.some_module:some_function'"
)
def add_lambda_module_to_py_modules(dist, attr, value):
py_modules = getattr(dist, "py_modules", None)
if not py_modules:
py_modules = []
py_modules.append(value)
setattr(dist, "py_modules", py_modules)
def validate_lambda_package(dist, attr, value):
if not os.path.exists(value) or not os.path.isdir(value):
raise DistutilsSetupError(
"lambda_package either doesn't exist or is not a directory"
)
if os.path.exists(os.path.join(value, "__init__.py")):
raise DistutilsSetupError(f"{attr} {value} cannot contain an __init__.py")
class LDist(Command):
description = "build a AWS Lambda compatible distribution"
user_options = [
(
"exclude-lambda-packages=",
None,
"Excludes the packages that are provided by the AWS Lambda execution environment",
),
(
"include-version=",
None,
"Include the version number on the lambda distribution name",
),
("build-layer=", None, "Build a layer instead of a function distribution"),
(
"layer-dir=",
None,
'The directory to place the layer into. Defaults to "python" if not provided',
),
]
def initialize_options(self):
"""Set default values for options."""
# Each user option must be listed here with their default value.
setattr(self, "exclude_lambda_packages", None)
setattr(self, "include_version", None)
setattr(self, "build_layer", None)
setattr(self, "layer_dir", None)
def finalize_options(self):
exclude_lambda_packages = getattr(self, "exclude_lambda_packages")
if (
exclude_lambda_packages is None
or exclude_lambda_packages == ""
or exclude_lambda_packages == "True"
or exclude_lambda_packages == "true"
or exclude_lambda_packages == "Yes"
or exclude_lambda_packages == "yes"
):
setattr(self, "exclude_lambda_packages", True)
elif (
exclude_lambda_packages == "False"
or exclude_lambda_packages == "false"
or exclude_lambda_packages == "No"
or exclude_lambda_packages == "no"
):
setattr(self, "exclude_lambda_packages", False)
else:
raise DistutilsOptionError(
"exclude-lambda-packages must be True, true, Yes, yes, False, false, No, no or absent"
)
include_version = getattr(self, "include_version")
if (
include_version is None
or include_version == ""
or include_version == "True"
or include_version == "true"
or include_version == "Yes"
or include_version == "yes"
):
setattr(self, "include_version", True)
elif (
include_version == "False"
or include_version == "false"
or include_version == "No"
or include_version == "no"
):
setattr(self, "include_version", False)
else:
raise DistutilsOptionError(
"include-version must be True, true, Yes, yes, False, false, No, no or absent"
)
build_layer = getattr(self, "build_layer")
if (
build_layer == "True"
or build_layer == "true"
or build_layer == "Yes"
or build_layer == "yes"
):
setattr(self, "build_layer", True)
elif (
build_layer is None
or build_layer == ""
or build_layer == "False"
or build_layer == "false"
or build_layer == "No"
or build_layer == "no"
):
setattr(self, "build_layer", False)
else:
raise DistutilsOptionError(
"build-layer must be True, true, Yes, yes, False, false, No, no or absent"
)
layer_dir = getattr(self, "layer_dir")
if layer_dir is None:
setattr(self, "layer_dir", "python")
def run(self):
# We must create a distribution to install first
# This is a short-cut to working with the actual build
# directory, or to using the 'install' command, which
# will generally only install a zipped egg
self.run_command("bdist_wheel")
bdist_wheel_command = self.get_finalized_command("bdist_wheel")
setattr(self, "_dist_dir", bdist_wheel_command.dist_dir)
# Install the package built by bdist_wheel
# (or bdist, or bdist_wheel, depending on how the user called setup.py
impl_tag, abi_tag, plat_tag = bdist_wheel_command.get_tag()
self._install_dist_package(
os.path.join(
bdist_wheel_command.dist_dir,
f"{bdist_wheel_command.wheel_dist_name}-{impl_tag}-{abi_tag}-{plat_tag}.whl",
)
)
# Use zero (if none specified) or more of the lambda_function, lambda_module or
# lambda_package attributes to create the lambda entry point function
if not getattr(self, "build_layer"):
self._create_lambda_entry_point()
# Now build the lambda package
self._build_lambda_package()
def _build_lambda_package(self):
dist_name = (
f"{self.distribution.get_name()}-{self.distribution.get_version()}.zip"
if getattr(self, "include_version")
else f"{self.distribution.get_name()}.zip"
)
dist_path = os.path.join(self._dist_dir, dist_name)
if os.path.exists(dist_path):
os.remove(dist_path)
log.info(f"creating {dist_path}")
with zipfile.ZipFile(dist_path, "w", zipfile.ZIP_DEFLATED) as zf:
abs_src = os.path.abspath(self._lambda_build_dir)
for root, _, files in os.walk(self._lambda_build_dir):
for filename in files:
absname = os.path.abspath(os.path.join(root, filename))
arcname = absname[len(abs_src) + 1 :]
log.debug(f"zipping {os.path.join(root, filename)} as {arcname}")
zf.write(absname, arcname)
# Set the resulting distribution file path for downstream command use
setattr(self, "dist_name", dist_name)
setattr(self, "dist_path", dist_path)
def _create_lambda_entry_point(self):
self._create_lambda_function()
self._copy_lambda_package()
def _create_lambda_function(self):
lambda_function = getattr(self.distribution, "lambda_function", None)
if not lambda_function:
return
components = lambda_function.split(":")
module = components[0]
function = components[1]
function_lines = [
f"import {module}\n",
"\n",
"\n",
f"handler = {module}.{function}\n",
]
package_name = self.distribution.get_name().replace("-", "_").replace(".", "_")
function_path = os.path.join(
self._lambda_build_dir, f"{package_name}_function.py"
)
log.info(f"creating {function_path}")
with open(function_path, "w") as py:
py.writelines(function_lines)
def _copy_lambda_package(self):
lambda_package = getattr(self.distribution, "lambda_package", None)
if not lambda_package:
return
for filename in os.listdir(lambda_package):
filepath = os.path.join(lambda_package, filename)
if os.path.isdir(filepath):
log.debug(f"{filepath} is a directory, skipping lambda copy")
continue
log.info(f"copying {filepath} to {self._lambda_build_dir}")
shutil.copy(filepath, self._lambda_build_dir)
def _install_dist_package(self, wheel_path):
# Get the name of the package that we just built
package_name = self.distribution.get_name()
# Get the dist directory that bdist_wheel put the package in
# Create the lambda build dir
self._lambda_build_dir = os.path.join("build", "ldist-" + package_name)
build_dir = self._lambda_build_dir
if getattr(self, "build_layer"):
build_dir = os.path.join(build_dir, getattr(self, "layer_dir"))
try:
if os.path.exists(self._lambda_build_dir):
shutil.rmtree(self._lambda_build_dir)
log.info(f"creating {self._lambda_build_dir}")
os.makedirs(build_dir)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(self._lambda_build_dir):
pass
else:
raise DistutilsInternalError(
f"{self._lambda_build_dir} already exists and is not a directory"
)
log.info(
f"installing package {package_name} from {self._dist_dir} into {build_dir}"
)
# Extract our wheel into our build dir
with zipfile.ZipFile(wheel_path, "r") as zf:
zf.extractall(build_dir)
# Create the working set to get all recursive dependencies, EXCEPT for the libraries included
# with the lambda environment
working_set = (
ExcludesWorkingSet(entries=[build_dir], excludes=LAMBDA_EXCLUDES)
if getattr(self, "exclude_lambda_packages")
else WorkingSet(entries=[build_dir])
)
dist_installer = DistInstaller(build_dir)
working_set.resolve(
parse_requirements(package_name),
installer=dist_installer.fetch_dist,
replace_conflicting=True,
)
``` |
{
"source": "joseph-x-li/py-alphabet-texter",
"score": 3
} |
#### File: py-alphabet-texter/alphabet_texter/alphabet_texter.py
```python
import tkinter as tk
from alphabet_lib import alphabet_display, alphabet_graph, alphabet_utils
import random # for string shuffling
class AlphabetTexter(tk.Frame):
MASTER_FONT = "Menlo"
KEY_DICT = {
"(A-Z)": "<KEY>",
"(Z-A)": "<KEY>",
"(A-Z), With Spaces": "a b c d e f g h i j k l m n o p q r s t u v w x y z",
"(Z-A), With Spaces": "z y x w v u t s r q p o n m l k j i h g f e d c b a",
"Random": None,
}
KEY_OPTIONS = [
"(A-Z)",
"(Z-A)",
"(A-Z), With Spaces",
"(Z-A), With Spaces",
"Random",
]
def __init__(self, parent, *args, **kwargs):
tk.Frame.__init__(
self,
parent,
*args,
highlightthickness=1,
highlightbackground="black",
**kwargs,
)
self._parent = parent
self.au = alphabet_utils.AlphabetUtils(key=self.KEY_DICT["(A-Z)"])
self.make_internals(self)
self.grid_columnconfigure(0, weight=1)
self.grid_rowconfigure(1, weight=1)
def make_internals(self, parent_frame):
self.title = tk.Label(
parent_frame,
text="Alphabet Texter",
font=(self.MASTER_FONT, 24),
relief="ridge",
)
self.title.grid(row=0, column=0, sticky="news", ipady=5, ipadx=5)
self.graph = alphabet_graph.AlphabetGraph(
parent_frame,
dpi=100,
key=self.KEY_DICT["(A-Z)"],
interval=150,
)
self.graph.grid(row=1, column=0, sticky="news")
self.display = alphabet_display.AlphabetDisplay(
parent_frame, key=self.KEY_DICT["(A-Z)"]
)
self.display.grid(row=2, column=0, sticky="ns", ipadx=5, ipady=5)
self.input_var = tk.StringVar()
self.input_var.trace("w", self.on_keystroke)
self.text_entry = tk.Entry(
parent_frame, textvariable=self.input_var, font=self.MASTER_FONT, width=26
)
self.text_entry.grid(row=3, column=0, sticky="ns")
self.text_entry.focus()
self.util_frame = tk.Frame(parent_frame)
self.util_frame.grid(row=4, column=0, sticky="news")
for i in range(3):
self.util_frame.grid_columnconfigure(i, weight=1)
self.previous_time_label = tk.Label(
self.util_frame, text=f"Recent Time: -", font=self.MASTER_FONT
)
self.previous_time_label.grid(row=0, column=0, sticky="nws")
self.best_time_label = tk.Label(
self.util_frame, text=f"Best Time: -", font=self.MASTER_FONT
)
self.best_time_label.grid(row=0, column=1, sticky="nws")
self.key_selection = tk.StringVar()
self.key_selection.set(self.KEY_OPTIONS[0])
self.key_menu = tk.OptionMenu(
self.util_frame,
self.key_selection,
*self.KEY_OPTIONS,
command=self.on_set_key,
)
self.key_menu.grid(row=0, column=2, sticky="ns")
self.reset_button = tk.Button(
self.util_frame,
text="Reset",
font=self.MASTER_FONT,
command=self.on_reset,
)
self.reset_button.grid(row=0, column=3, sticky="news")
def on_keystroke(self, *args):
inp = self.input_var.get()
correct, letter_states, time_array = self.au.tell(inp)
self.display.set_colors(letter_states, len(inp))
time_array = [(round(t, 5) if t >= 0.0 else 0.0) for t in time_array]
self.graph.set_times(time_array)
if correct:
self.text_entry.config(state="disabled")
self._set_time_display()
def _set_time_display(self):
recent_time, best_time = self.au.get_scores()
self.previous_time_label.config(text=f"Recent Time: {recent_time}")
self.best_time_label.config(text=f"Best Time: {best_time}")
def on_reset(self):
self.au.reset()
self.text_entry.config(state="normal")
self.text_entry.delete(0, "end")
self.display.reset()
self.graph.reset()
def on_set_key(self, event):
new_key = self.KEY_DICT[self.key_selection.get()]
if new_key is None:
new_key = list(self.KEY_DICT["(A-Z)"])
random.shuffle(new_key)
new_key = "".join(new_key)
# destroy and rebuild the graph
self.graph.destroy()
self.graph = alphabet_graph.AlphabetGraph(
self,
dpi=100,
key=new_key,
interval=150,
)
self.graph.grid(row=1, column=0, sticky="news")
# Set backend time calculations
self.au.set_key(new_key)
self.au.reset_scores()
# Set text display
self.display.set_key(new_key)
self.input_var = tk.StringVar()
self.input_var.trace("w", self.on_keystroke)
self.text_entry.destroy()
self.text_entry = tk.Entry(
self,
textvariable=self.input_var,
font=self.MASTER_FONT,
width=len(new_key),
)
self.text_entry.grid(row=3, column=0, sticky="ns")
self.text_entry.focus()
# Set display numbers
self.on_reset()
self._set_time_display()
def main():
root = tk.Tk()
root.title("py-alphabet-texter")
at = AlphabetTexter(root).pack(side="top", fill="both", expand=True)
root.mainloop()
if __name__ == "__main__":
main()
``` |
{
"source": "joseph-x-li/pystreaming",
"score": 3
} |
#### File: video/testimages/imageloader.py
```python
from PIL import Image
import os
TEST_S = 0
TEST_M = 1
TEST_L = 2
IMAG_S = 3
IMAG_M = 4
IMAG_L = 5
lookup = [
"640x480_c.png",
"1280x720_c.png",
"1920x1080_c.png",
"640x480_i.png",
"1280x720_i.png",
"1920x1080_i.png",
]
def loadimage(enum):
"""Load a test image or a test card.
Args:
enum (int): One of
pystreaming.TEST_S
pystreaming.TEST_M
pystreaming.TEST_L
pystreaming.IMAG_S
pystreaming.IMAG_M
pystreaming.IMAG_L
Raises:
IndexError: Raised when received enum is not defined.
Returns:
PIL.Image: Image requested.
"""
try:
truepath = os.path.join(
os.path.dirname(os.path.realpath(__file__)), lookup[enum]
)
return Image.open(truepath)
except IndexError:
raise IndexError(f"Unrecognized image option: {enum}")
```
#### File: pystreaming/tests/test_datstructures.py
```python
from pystreaming.listlib.circularlist import CircularList, Empty
from pystreaming.listlib.circulardict import CircularOrderedDict
def testcircularlist():
try:
cl = CircularList(0)
assert False
except ValueError:
pass
cl = CircularList(1)
assert len(cl) == 0
assert not cl.full()
assert cl.__repr__() == "[None] front: 0 back: 0"
cl.push(1)
assert len(cl) == 1
cl.push(2)
assert len(cl) == 1
cl.push(3)
assert cl.full()
assert len(cl) == 1
assert cl.__repr__() == "[3] front: 0 back: 0"
assert cl.pop() == 3
assert not cl.full()
cl = CircularList(5)
assert cl.__repr__() == "[None, None, None, None, None] front: 0 back: 0"
cl.push(0)
cl.push(1)
cl.push(2)
cl.push(3)
assert cl.__repr__() == "[0, 1, 2, 3, None] front: 0 back: 4"
assert len(cl) == 4
cl.push(4)
cl.push(5)
cl.push(6)
assert cl.__repr__() == "[5, 6, 2, 3, 4] front: 2 back: 2"
assert len(cl) == 5
assert cl.pop() == 2
assert cl.__repr__() == "[5, 6, 2, 3, 4] front: 3 back: 2"
assert len(cl) == 4
assert cl.pop() == 3
assert cl.pop() == 4
assert cl[0] == 5
try:
print(cl[2])
assert False
except IndexError:
pass
cl[1] = 7
try:
cl[2] = 8
assert False
except IndexError:
pass
assert cl.pop() == 5
assert cl.pop() == 7
try:
cl.pop()
assert False
except Empty:
pass
def testcirculardict():
try:
cd = CircularOrderedDict(0)
assert False
except ValueError:
pass
cd = CircularOrderedDict(5)
cd.insert_end(1, 1)
cd.insert_end(2, 2)
cd.insert_end(3, 3)
cd.insert_end(4, 4)
assert len(cd) == 4
cd.insert_end(5, 5)
assert len(cd) == 5
cd.insert_end(6, 6)
assert len(cd) == 5
for k, i in zip(cd.keys(), range(2, 7)):
assert k == i
cd.insert_end(3, 100)
cd[2] = -100
assert cd[2] == -100
try:
cd[7] = 7
assert False
except KeyError:
pass
assert cd.__repr__() == "OrderedDict([(2, -100), (4, 4), (5, 5), (6, 6), (3, 100)])"
cd.delete(2)
assert len(cd) == 4
assert cd.pop_front() == (4, 4)
assert len(cd) == 3
assert cd.pop_front() == (5, 5)
assert len(cd) == 2
assert cd.pop_front() == (6, 6)
assert len(cd) == 1
assert cd.pop_front() == (3, 100)
assert len(cd) == 0
``` |
{
"source": "josephxsxn/alchemists_notepad",
"score": 3
} |
#### File: alchemists_notepad/Object/Alchemical.py
```python
from Object.AlchemicalColor import AlchemicalColor
from Object.AlchemicalSign import AlchemicalSign
from Object.AlchemicalSize import AlchemicalSize
class Alchemical:
def __init__(self, color, sign, size):
self.color = color
self.sign = sign
self.size = size
def get_color(self):
return self.color
def get_sign(self):
return self.sign
def get_size(self):
return self.size
```
#### File: alchemists_notepad/Object/AlchemicalTriplet.py
```python
from Object.Alchemical import Alchemical
class AlchemicalTriplet():
def __init__(self, alchemical_list):
self.alchemicals = alchemical_list
def get_alchemicals(self):
return self.alchemicals
```
#### File: alchemists_notepad/Routine/AlchemicalCombinations.py
```python
from Object.Alchemical import Alchemical
from Object.AlchemicalTriplet import AlchemicalTriplet
from Object.AlchemicalColor import AlchemicalColor
from Object.AlchemicalSign import AlchemicalSign
from Object.AlchemicalSize import AlchemicalSize
from Object.PotionColor import PotionColor
from Object.PotionSign import PotionSign
class AlchemicalCombinations:
#Takes a Potion, and Dictonary of IngredientProperties
#Returns dic of simple potion only reduction of AlchemicalTriplets for Ingredients used
def reduce_potion_alchemicals(self, potion, ingredients_prop_dic):
potion_ingredients = potion.get_ingredients()
potion_color = potion.get_color()
potion_sign = potion.get_sign()
#create all simple conbinations based only off
#potion results for AlchemicalTriplets
#if not exist in dic then create all new Triplets
ingredient_options_list = {}
if potion_color is not PotionColor.NEUTRAL and potion_sign is not PotionSign.NEUTRAL:
for ingredient in potion_ingredients:
#print(ingredients_prop_dic.keys())
if ingredient in ingredients_prop_dic:
#print('IngredientMatched => ' + str(ingredient))
ingredient_options_list[ingredient]=self.potion_only_filter(ingredients_prop_dic[ingredient].get_alchemical_options(), potion_color, potion_sign)
else:
#print('IngredientNOTMatched => ' + str(ingredient))
ingredient_options_list[ingredient]=self.potion_only_filter(self.inital_alchemical_options(), potion_color, potion_sign)
else:
#If it exits return itself
for ingredient in potion_ingredients:
#SELF
if ingredient in ingredients_prop_dic:
print('SELF => ' + str(ingredient))
print('Options =>' + str(len(ingredients_prop_dic[ingredient].get_alchemical_options())))
ingredient_options_list[ingredient] = ingredients_prop_dic[ingredient].get_alchemical_options()
#ALL
else:
print('ALL => ' + str(ingredient))
ingredient_options_list[ingredient] = self.inital_alchemical_options()
return ingredient_options_list
#Filters list of AlchemicalTriplets to acceptalbe Triplets by Input Color & Sign
def potion_only_filter(self, triplet_list, color, sign):
filtered_alchemical_triplets = []
for triplet in triplet_list:
for alchem in triplet.get_alchemicals():
#print('ALCHEM COLOR => ' + str(alchem.get_color().value) + ' ' + str(color.value) + ' SIGN => ' + str(alchem.get_sign().value) + ' ' + str(sign.value))
#if alchem.get_color().value == color.value and alchem.get_sign().value == sign.value:
if alchem.get_color() == color and alchem.get_sign() == sign:
#print('MATCHED')
filtered_alchemical_triplets.append(triplet)
return filtered_alchemical_triplets
#Generates all possible AlchemicalTriplets for given Sign and Color of Potion
#This is used when a Ingredient doesn't exist in our Dictonary yet
def inital_alchemical_options(self):
inital_ingredient_triplets = []
#RED Alchemical - OUTTER Alchemical in For Loop
for red_sign in AlchemicalSign:
#Inital RESET for all Alchemicals after collection
red_alchemical = None
blue_alchemical = None
green_alchemical = None
for red_size in AlchemicalSize:
red_alchemical = Alchemical(AlchemicalColor.RED, red_sign, red_size)
#BLUE Alchemical - MIDDLE Alchemical in For Loop
for blue_sign in AlchemicalSign:
for blue_size in AlchemicalSize:
blue_alchemical = Alchemical(AlchemicalColor.BLUE, blue_sign, blue_size)
#Green Alchemical - INNER Alchemical in For Loop
#This is were we collect the Alchemicals into a Triplet
for green_sign in AlchemicalSign:
for green_size in AlchemicalSize:
green_alchemical = Alchemical(AlchemicalColor.GREEN, green_sign, green_size)
inital_ingredient_triplets.append(AlchemicalTriplet([red_alchemical, blue_alchemical, green_alchemical]))
return inital_ingredient_triplets
``` |
{
"source": "josephxsxn/diceroller",
"score": 3
} |
#### File: josephxsxn/diceroller/diceroll.py
```python
import random
import optparse
from urllib.parse import urlparse
from http.server import BaseHTTPRequestHandler, HTTPServer
import time
#PARSE CLI or use Defaults
def build_parser():
parser = optparse.OptionParser(usage='Roll Some Dice!')
parser.add_option("-s", "--sides", dest="sides", help="Number of sides on the dice rolled - 6, 10, 20?", type=int, default=10)
parser.add_option("-n","--num",dest="num", help="Number of dice to roll.", type=int, default=1)
parser.add_option("-a","--again",dest="again", help="Reroll any dice equal to or above this number", type=int, default=10)
parser.add_option("-+","--above",dest="above", help="Count all dice rolls equal to or above this number", type=int,default=8)
parser.add_option("-w","--web",dest="web", help="setup rest server on given port", type=int, default=0)
(options, args) = parser.parse_args()
return options
def roll_die(sides):
return random.randint(1, sides)
def roll_multipledice(num, sides):
raw_dice = []
for _ in range(num):
raw_dice.append(roll_die(sides))
return raw_dice
def roll_multipledicemultipletimes(num, sides, again):
master_rawdice = []
again_dice = []
still = True
while still:
if again_dice != []:
num = len(again_dice)
raw_dice=roll_multipledice(num, sides)
master_rawdice.extend(raw_dice)
again_dice=list(filter(lambda x: x >= again, raw_dice))
if again_dice == []:
still = False
return master_rawdice
def score_dice(dicerolls, above):
winners = []
for dice in dicerolls:
if dice >= above:
winners.append(dice)
return winners
def parse_queryargs(path):
options = {}
query = (path).split("&")
for arg in query:
quarg=arg.split("=")
if len(quarg) == 2:
options[quarg[0]] = int(quarg[1])
return options
class DiceHandler(BaseHTTPRequestHandler):
def do_HEAD(s):
s.send_response(200)
s.send_header("Content-type", "text/plain")
s.end_headers()
def do_GET(s):
s.send_response(200)
s.send_header("Content-type", "text/plain")
s.end_headers()
options = parse_queryargs((urlparse(s.path)[4]))
if len(options) == 4:
raw_rolls = roll_multipledicemultipletimes(int(options['num']), options['sides'], options['again'])
winning_rolls = score_dice(raw_rolls, options['above'])
s.wfile.write(bytes(str(raw_rolls)+"\n", "utf-8"))
s.wfile.write(bytes(str(winning_rolls), "utf-8"))
elif len(options) == 3:
raw_rolls = roll_multipledicemultipletimes(int(options['num']), options['sides'], options['again'])
s.wfile.write(bytes(str(raw_rolls)+"\n", "utf-8"))
else:
s.wfile.write(bytes("missing arguments please provide num, sides, again, and optionally above","utf-8"))
def run_webservice(port):
httpd = HTTPServer(("0.0.0.0", port), DiceHandler)
print (time.asctime(), "Server Starts - %s:%s" % ("0.0.0.0", port))
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
print (time.asctime(), "Server Stops - %s:%s" % ("0.0.0.0", port))
if __name__ == "__main__":
options = build_parser()
print ('running with ' + str(options))
if options.web == 0:
t2 = roll_multipledicemultipletimes(options.num, options.sides, options.again)
print ('Rolled ==> ' + str(t2))
t2 = score_dice(t2, options.above)
print ('Success Rolls => ' + str(t2))
else:
print('running as webserver')
run_webservice(options.web)
``` |
{
"source": "josephyaconelli/crazyflie_ros-pwm-control",
"score": 3
} |
#### File: crazyflie_mpc/src/lossfnc_pnngaussian.py
```python
import torch
import numpy as np
class PNNLoss_Gaussian(torch.nn.Module):
'''
Here is a brief aside on why we want and will use this loss. Essentially, we will incorporate this loss function to include a probablistic nature to the dynamics learning nueral nets. The output of the Probablistic Nueral Net (PNN) or Bayesian Neural Net (BNN) will be both a mean for each trained variable and an associated variance. This loss function will take the mean (u), variance (sig), AND the true trained value (s) to compare against the mean. Stacked variances form Cov matrix
loss_gaussian = sum_{data} (u - s)^T Cov^-1 (u-s) + log Det(Cov)
Need to add code like this to the implementation:
To bound the variance output for a probabilistic network to be between the upper and lower bounds found during training the network on the training data, we used the following code with automatic differentiation:
logvar = max_logvar - tf.nn.softplus(max_logvar - logvar)
logvar = min_logvar + tf.nn.softplus(logvar - min_logvar)
var = tf.exp(logvar)
with a small regularization penalty on term on max_logvar so that it does not grow beyond the training distribution’s maximum output variance, and on the negative of min_logvar so that it does not drop below the training distribution’s minimum output variance.
'''
def __init__(self):
super(PNNLoss_Gaussian,self).__init__()
def forward(self, output, target):
'''
output is a vector of length 2d
mean is a vector of length d, which is the first set of outputs of the PNN
var is a vector of variances for each of the respective means
target is a vector of the target values for each of the mean
'''
d2 = output.size()[1]
d = torch.tensor(d2/2, dtype=torch.int32)
mean = output[:,:d]
logvar = output[:,d:]
var = torch.exp(logvar)
b_s = mean.size()[0] # batch size
eps = 1e-9 # Add to variance to avoid 1/0
A = mean - target.expand_as(mean)
B = torch.div(mean - target.expand_as(mean), var.add(eps))
loss = sum(torch.bmm(A.view(b_s, 1, -1), B.view(b_s, -1, 1)).reshape(-1,1)+torch.log(torch.abs(torch.prod(var.add(eps),1)).reshape(-1,1)))
return loss
'''
https://github.com/pytorch/pytorch/blob/master/torch/nn/functional.py
def mse_loss(input, target, size_average=True, reduce=True):
"""mse_loss(input, target, size_average=True, reduce=True) -> Tensor
Measures the element-wise mean squared error.
See :class:`~torch.nn.MSELoss` for details.
"""
return _pointwise_loss(lambda a, b: (a - b) ** 2, torch._C._nn.mse_loss,
input, target, size_average, reduce)
'''
``` |
{
"source": "josephyaconelli/VAEs-in-Economics",
"score": 2
} |
#### File: VAEs-in-Economics/Notebooks/vaes_net.py
```python
import tensorflow.keras as keras
keras.__version__
from tensorflow.keras import layers
from tensorflow.keras import regularizers
from tensorflow.keras import backend as K
from tensorflow.keras.models import Model
from tensorflow import set_random_seed
from numpy.random import seed
import numpy as np
import pandas as pd
def make_vae( full_data,
plot_types_args = {},
img_shape = (389+1, ),
latent_dim = 1,
dense_width = 1024,
l2_penalty=0.0,
l1_penalty=0.0,
encoder_dropout_rate=0.5,
decoder_dropout_rate=0.5,
entanglement_penalty = 2,
hidden_n = 2,
lr_factor = 0.9,
lr_patience = 30):
class PlotEpoch(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if epoch % 100 == 0:
plot_types(encoder = self.model.encoder,
decoder = self.model.decoder,
data = self.model.full_data,
**plot_types_args)
plot_epoch = PlotEpoch()
callback_list = [
keras.callbacks.ReduceLROnPlateau(
monitor = 'val_loss',
factor = lr_factor,
patience = lr_patience,
verbose =1 #true
),
plot_epoch
]
input_img = keras.Input(shape=img_shape)
# The last input indicate to the network whether this is validation
is_validation = input_img[:,-1]
input_data = input_img[:,:-1]
input_data = layers.GaussianNoise(0.03*(1-K.mean(is_validation)))(input_data)
x = layers.Dense(dense_width, activation=layers.PReLU(alpha_regularizer=regularizers.l1_l2(
l1=l1_penalty,l2=l2_penalty)), \
kernel_regularizer=regularizers.l1_l2(
l1=l1_penalty,l2=l2_penalty))(input_data)
x = layers.Dropout(encoder_dropout_rate)(x)
for i in range(hidden_n):
x = layers.Dense(dense_width, activation=layers.PReLU(alpha_regularizer=regularizers.l1_l2(
l1=l1_penalty,l2=l2_penalty)),
kernel_regularizer=regularizers.l1_l2(
l1=l1_penalty,l2=l2_penalty))(x)
x = layers.Dropout(encoder_dropout_rate)(x)
z_mean = layers.Dense(latent_dim)(x)
z_log_var = layers.Dense(latent_dim)(x)
# Reduce sampling variance to near zero on validation (idea credit: <NAME>)
is_validation_change = is_validation*100
z_log_var = keras.layers.Subtract()([z_log_var, is_validation_change])
def sampling(args):
z_mean, z_log_var = args
epsilon = K.random_normal(shape=(K.shape(z_mean)[0], latent_dim),
mean=0., stddev=1.)
return z_mean + K.exp(z_log_var) * epsilon
class CustomVariationalLayer(keras.layers.Layer):
def vae_loss(self, x, z_decoded):
is_validation = x[:,-1]
input_data = x[:,:-1]
x = K.flatten(input_data)
z_decoded = K.flatten(z_decoded)
xent_loss = keras.metrics.mse(x, z_decoded)
kl_loss = -5e-4 * K.mean(
1 + z_log_var - K.square(z_mean)
- entanglement_penalty*K.exp(z_log_var), axis=-1)
# Penalize for variance, but only in training
return K.mean(xent_loss + (1-is_validation)*kl_loss)
def call(self, inputs):
x = inputs[0]
z_decoded = inputs[1]
loss = self.vae_loss(x, z_decoded)
self.add_loss(loss, inputs=inputs)
# We don't use this output.
return x
z = layers.Lambda(sampling)([z_mean, z_log_var])
encoder = Model(input_img,z_mean) # Maybe better if Model(input_data,z_mean)
# This is the input where we will feed `z`.
decoder_input = layers.Input(K.int_shape(z)[1:])
print(decoder_input.shape)
x = layers.Dense(dense_width,
activation=layers.PReLU(alpha_regularizer=regularizers.l1_l2(
l1=l1_penalty,l2=l2_penalty)),
kernel_regularizer=regularizers.l1_l2(
l1=l1_penalty,l2=l2_penalty))(decoder_input)
x = layers.Dropout(decoder_dropout_rate)(x)
for i in range(hidden_n):
x = layers.Dense(dense_width,
activation=layers.PReLU(alpha_regularizer=regularizers.l1_l2(
l1=l1_penalty,l2=l2_penalty)),
kernel_regularizer=regularizers.l1_l2(
l1=l1_penalty,l2=l2_penalty))(x)
x = layers.Dropout(decoder_dropout_rate)(x)
x = layers.Dense(img_shape[0]-1,
kernel_regularizer=regularizers.l1_l2(
l1=l1_penalty,l2=l2_penalty))(x)
# This is our decoder model.
decoder = Model(decoder_input, x)
# We then apply it to `z` to recover the decoded `z`.
z_decoded = decoder(z)
# We call our custom layer on the input and the decoded output,
# to obtain the score. Note that the objective is computed by
# this special final layer.
y = CustomVariationalLayer()([input_img, z_decoded])
vae = Model(input_img, y)
vae.compile(optimizer='adam', loss=None)
vae.encoder = encoder
vae.decoder = decoder
vae.full_data = full_data
vae.callback_list = callback_list
return vae
import matplotlib.pyplot as plt
from scipy.stats import norm
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
import seaborn as sns
sns.set()
sns.set_style("darkgrid")
def plot_types(encoder, decoder, data,
n_type = 60, each_hight = 20, approx_width=400,
frac_width =0.55,
n_activity = 90, lowest_percentile= 1,
highest_percentile = 99, figsize=(10, 37),
cmap='viridis', n_xlabels=13, spacing = -0.005,
hist_size=0.08, scaler=True):
# definitions for the axes
left, width = 0.05, frac_width
bottom, height = 0.025, 0.65
rect_scatter = [left, bottom, width, height]
rect_histx = [left, bottom + height + spacing, width, hist_size]
rect_colorbar = [left+width+0.1, bottom + height + spacing +0.01, width, 0.02]
# start with a rectangular Figure
plt.figure(figsize=figsize)
ax_scatter = plt.axes(rect_scatter)
ax_scatter.tick_params(direction='in', top=True, right=True)
ax_histx = plt.axes(rect_histx)
ax_histx.tick_params(direction='in', labelbottom=False)
ax_colorbar = plt.axes(rect_colorbar)
ax_colorbar.tick_params(direction='in', labelbottom=False, labelleft=False)
each_width = np.int(np.ceil(approx_width/n_type))
figure = np.zeros((each_hight*n_activity,n_type*each_width))
# Linearly spaced coordinates on the unit square were transformed
# through the inverse CDF (ppf) of the Gaussian
# to produce values of the latent variables z,
# since the prior of the latent space is Gaussian
# We need to add a column of ones to indicate validation
test_examples = data.shape[0]
flag_1 = np.ones((test_examples,1),dtype=data.values.dtype)
data_mat = np.concatenate((data.values,flag_1),axis=-1)
encoded_data=encoder.predict(data_mat)
lowest=np.percentile(encoded_data, lowest_percentile)
highest=np.percentile(encoded_data, highest_percentile)
grid_x = np.linspace(lowest, highest, n_type)
for i, xi in enumerate(grid_x):
z_sample = np.array([[xi]])
x_decoded = decoder.predict(z_sample)
figure[0:n_activity*each_hight,i*each_width : (i + 1)*each_width] = \
np.repeat(x_decoded[0,0:n_activity],each_hight).reshape(n_activity*each_hight,1)
if scaler:
figure=np.transpose(figure)
scaler = MinMaxScaler()
figure=scaler.fit_transform(figure)
figure=np.transpose(figure)
im = ax_scatter.imshow(figure, cmap=cmap)
plt.colorbar(im, ax= ax_colorbar, orientation='horizontal', fraction=1)
# prec = pd.DataFrame(np.percentile(df,[50, 75, 95, 99],axis=0))
# ax_scatter.text(1.02*n_type*each_width,
# 0.8*each_hight -each_hight, '50% 75% 95% 99%', fontsize=14)
for i in range(n_activity):
ax_scatter.text(1.02*n_type*each_width,
0.8*each_hight+i*each_hight,
# '{:5.1f} {:5.1f} {:5.1f} {:5.1f} '.format(prec.iloc[0,i]/60,
# prec.iloc[1,i]/60,
# prec.iloc[2,i]/60,
# prec.iloc[3,i]/60) +
data.columns[i], fontsize=14)
bins=np.append(grid_x-(grid_x[1]-grid_x[0])/2,
grid_x[n_type-1]+(grid_x[1]-grid_x[0])/2)
ax_scatter.set_xticks( np.linspace(0,n_type*each_width,n_xlabels))
ax_scatter.set_xticklabels(np.round(np.linspace(bins[0], bins[n_type], n_xlabels),
decimals=2))
ax_scatter.set_yticks([])
ax_histx.set_xticks( np.linspace(bins[0], bins[n_type], n_xlabels))
sns.distplot(encoded_data,ax=ax_histx,bins=bins,kde=False,
rug=False).set_xlim(bins[0],bins[n_type])
#ax_histx.set_xticklabels(np.round(np.linspace(bins[0], bins[n_type], n_xlabels),
# decimals=2))
plt.savefig('type_plot.png')
plt.show()
def encode_plot2d(
encoder,
decoder,
data,
x_col = 'Work__main_job',
y_col = 'Physical_care_for_hh_children',
lowest_percentile=1,
highest_percentile = 99,
n=70,
step=10):
test_examples = data.shape[0]
flag_1 = np.ones((test_examples,1),dtype=data.values.dtype)
data_mat = np.concatenate((data.values,flag_1),axis=-1)
encoded_data=encoder.predict(data_mat)
lowest=np.percentile(encoded_data, lowest_percentile)
highest=np.percentile(encoded_data, highest_percentile)
grid_x = np.linspace(lowest, highest, n)
filtered=pd.DataFrame((decoder.predict(grid_x)))
scaler = MinMaxScaler()
filtered=pd.DataFrame(scaler.fit_transform(filtered))
filtered.columns = data.columns
sns.kdeplot(data[x_col], data[y_col], cmap="Blues", shade=True, bw=.2,
cut=0.1, legend=True)
sns.lineplot(x = filtered[x_col], y=filtered[y_col], linewidth=1.5,
color= '#8E3179', sort=False)
for i in range(0,n,step):
plt.text(filtered[x_col][i]+0.01, filtered[y_col][i]+0.01, np.round(grid_x[i],1),
horizontalalignment='left', size='small', color='black')
plt.savefig(x_col+'_'+y_col+'.png')
plt.show()
``` |
{
"source": "josephyancey/ha-wyzebulb",
"score": 2
} |
#### File: wyzeapi/tests/test.py
```python
from .secrets import *
from ..wyzeapi.wyzeapi_exceptions import *
from ..wyzeapi.wyzeapi import WyzeApi
def TestAccessTokenError():
print("Test: TestAccessTokenError")
wyze = WyzeApi(username, password, no_save=True)
bulbs = wyze.list_bulbs()
# Kill access token
wyze._access_token = "Killed"
try:
wyze.list_bulbs()
assert(True)
except WyzeApiError:
print("SUCCESS")
return
print("ERROR")
def TestBadPassword():
print("Test: TestBadPassword")
try:
wyze = WyzeApi(username, "BadPassword", no_save=True)
except WyzeApiError:
print("SUCCESS")
return
print("ERROR")
def TestTurnOffBulbs():
print("Test: TestTurnOffBulbs")
wyze = WyzeApi(username, password, no_save=True)
bulbs = wyze.list_bulbs()
for bulb in bulbs:
bulb.turn_off()
print("SUCCESS")
def TestTurnOnBulbs():
print("Test: TestTurnOnBulbs")
wyze = WyzeApi(username, password, no_save=True)
bulbs = wyze.list_bulbs()
for bulb in bulbs:
bulb.turn_on()
print("SUCCESS")
def TestTurnOffSwitches():
print("Test: TestTurnOffSwitches")
wyze = WyzeApi(username, password, no_save=True)
switches = wyze.list_switches()
for switch in switches:
switch.turn_off()
print("SUCCESS")
def TestTurnOnSwitches():
print("Test: TestTurnOnSwitches")
wyze = WyzeApi(username, password, no_save=True)
switches = wyze.list_switches()
for switch in switches:
switch.turn_on()
print("SUCCESS")
if __name__ == '__main__':
TestAccessTokenError()
TestBadPassword()
TestTurnOnBulbs()
TestTurnOffBulbs()
TestTurnOnSwitches()
TestTurnOffSwitches()
```
#### File: wyzeapi/wyzeapi/wyzeapi_config.py
```python
from configparser import ConfigParser
def parseConfig():
config = ConfigParser()
config.read('wyzeconfig.ini')
try:
access_token = config.get('auth', 'access_token')
device_id = config.get('auth', 'device_id')
return (device_id, access_token)
except:
return (None, None)
def updateConfig(device_id, access_token):
config = ConfigParser()
config.read('wyzeconfig.ini')
config.add_section('auth')
config.set('auth', 'access_token', str(access_token))
config.set('auth', 'device_id', str(device_id))
with open('wyzeconfig.ini', 'w') as f:
config.write(f)
``` |
{
"source": "josephyhu/Basketball-Team-Stats-Tool",
"score": 4
} |
#### File: josephyhu/Basketball-Team-Stats-Tool/app.py
```python
import constants
import random
import copy
# Removed the global ALL, replaced with local all_numbers.
def main_menu():
select_option = True
while select_option:
print("Main Menu:\n")
print("1. Display Stats\n2. Quit\n")
option = input("Select an option: ")
print("\n")
try:
if option.isdigit():
option = int(option)
if option != 1 and option != 2:
raise ValueError("Please enter 1 or 2.")
else:
raise ValueError("Please enter 1 or 2.")
except ValueError as err:
print("Invalid input.")
print("({})\n".format(err))
continue
else:
return option
def teams_menu():
select_team = True
while select_team:
print("Teams Menu:\n")
print("0. Main Menu")
for index in range(len(teams)):
print("{}. {}".format(index + 1, teams[index]))
option = input("\nSelect an option: ")
print("\n")
try:
if option.isdigit():
option = int(option)
if option != 0 and option != 1 and option != 2 and option != 3:
raise ValueError("Please enter 0, 1, 2, or 3.")
else:
raise ValueError("Please enter 0, 1, 2, or 3.")
except ValueError as err:
print("Invalid input.")
print("({})\n".format(err))
continue
else:
return option
def add_players():
# Made a copy of all_numbers.
all = copy.deepcopy(all_numbers)
num_players = 0
team = []
nums = set()
while len(nums) < 6:
nums.add(random.choice(all))
for num in nums:
team.append(players[num]["name"])
num_players += 1
all.remove(num)
return num_players, team
def continue_or_quit():
continue_or_quit = input("\nEnter c to continue or q to quit. ")
print("\n")
while continue_or_quit.lower() != 'c' and continue_or_quit.lower() != 'q':
print("Invalid input.")
continue_or_quit = input("Enter c to continue or q to quit. ")
print("\n")
return continue_or_quit.lower()
def convert_heights():
heights = []
converted_heights = []
for player in players:
heights.append(player["height"].split())
for index in range(len(heights)):
converted_heights.append(int(heights[index][0]))
for index in range(len(players)):
players[index]["height"] = converted_heights[index]
def convert_experiences():
experienced = []
converted_experienced = []
for player in players:
experienced.append(player["experience"])
for index in range(len(experienced)):
if experienced[index] == "YES":
experienced[index] = True
else:
experienced[index] = False
converted_experienced.append(experienced[index])
for index in range(len(players)):
players[index]["experience"] = converted_experienced[index]
def stats():
num_players, team = add_players()
print("Total players: {}\n".format(num_players))
print("Players: {}\n".format(", ".join(team)))
def members_available():
# Made a copy of all_numbers
all = copy.deepcopy(all_numbers)
if len(all) == 0:
all = list(range(0, 18))
if __name__ == "__main__":
all_numbers = list(range(0, 18))
# Made copies of constants.TEAMS and constants.PLAYERS
teams = copy.deepcopy(constants.TEAMS)
players = copy.deepcopy(constants.PLAYERS)
# Called the functions that converts heights and experiences respectively.
# Not sure what else to do with them for meets expectations grade though.
convert_heights()
convert_experiences()
print("Basketball Team Stats Tool\n\n")
team_selected = False
while True:
option = main_menu()
if option == 1:
while not team_selected:
select_team = teams_menu()
if select_team == 0:
break
elif select_team == 1:
print("Team: {}\n".format(teams[0]))
members_available()
stats()
if continue_or_quit() == 'c':
continue
else:
break
elif select_team == 2:
print("Team: {}\n".format(teams[1]))
members_available()
stats()
if continue_or_quit() == 'c':
continue
else:
break
elif select_team == 3:
print("Team: {}\n".format(teams[2]))
members_available()
stats()
if continue_or_quit() == 'c':
continue
else:
break
elif option == 2:
break
``` |
{
"source": "JosepHyv/omegaup-cli",
"score": 3
} |
#### File: omegaup-cli/omegaup/user.py
```python
from .utils import *
from . import models
class User:
endpoint = ENTRYPOINT + "/api/user"
def __init__(self, target_session):
self.session = target_session
# Log into account.
def login(self, username_or_email, password):
login_data = get_dict("api-user-login")
login_data["password"] = password
login_data["usernameOrEmail"] = username_or_email
return self.session.post(url = self.endpoint + "/login",
params = login_data)
#addGroup = "https://omegaup.com/api/user/addGroup/"
#associateIdentity = "https://omegaup.com/api/user/associateIdentity/"
#changePassword = "https://omegaup.com/api/user/changePassword/"
#coderOfTheMonth = "https://omegaup.com/api/user/coderOfTheMonth/"
#coderOfTheMonthList = "https://omegaup.com/api/user/coderOfTheMonthList/"
#contestStats = "https://omegaup.com/api/user/contestStats/"
#
## Interesting stuff
#create = "https://omegaup.com/api/user/create/"
#generateOmiUsers = "https://omegaup.com/api/user/generateOmiUsers/"
#
#listAssociatedIdentities = "https://omegaup.com/api/user/listAssociatedIdentities/"
#listUnsolvedProblems = "https://omegaup.com/api/user/listUnsolvedProblems/"
#login = "https://omegaup.com/api/user/login/"
#login = "https://omegaup.com"
#login = "https://omegaup.com"
#login = "https://omegaup.com"
#login = "https://omegaup.com"
#login = "https://omegaup.com"
#login = "https://omegaup.com"
#login = "https://omegaup.com"
#login = "https://omegaup.com"
#login = "https://omegaup.com"
#login = "https://omegaup.com"
#login = "https://omegaup.com"
#login = "https://omegaup.com"
#login = "https://omegaup.com"
#login = "https://omegaup.com"
#login = "https://omegaup.com"
``` |
{
"source": "joseph-zabaleta/data-structures-and-algorithms",
"score": 4
} |
#### File: challenges/breadth_first/breadth_first.py
```python
from collections import deque
class Vertex:
def __init__(self, value):
self.value = value
self.next = None
def __str__(self):
return self.value
class Edge:
def __init__(self, vertex, weight=1):
self.vertex = vertex
self.weight = weight
class Queue:
def __init__(self):
self.storage = deque()
def enqueue(self, value):
"""Takes any value as an argument and adds a new node with that value to the back of the queue with an O(1) Time Performance."""
self.storage.appendleft(value)
def dequeue(self):
"""Takes no arguments, remove the node from the front of the queue, and returns the node's value."""
return self.storage.pop()
def peek(self):
"""Takes no arguments and returns the value of the node located in the front of the queue, without removing it from the queue."""
return self.storage[-1]
def is_empty(self):
"""Takes no arguments and returns a boolean indicating whether or not the queue is empty."""
return len(self.storage) == 0
class Graph:
def __init__(self):
self.graph = {}
def add_node(self, value):
""" Takes in a value to create a new node/vertex for the graph. Returns the added node.
"""
node = Vertex(value)
self.graph[node] = []
return node
def add_edge(self, vertex1, vertex2, weight=1):
""" Takes in two nodes to be linked together with an edge. Third parameter is the ability to add a weight to the edge.
"""
if vertex1 not in self.graph:
raise KeyError('Vertex1 is not in the graph')
if vertex2 not in self.graph:
raise KeyError('Vertex2 is not in the graph')
edge = Edge(vertex2, weight)
self.graph[vertex1].append(edge)
def get_nodes(self):
""" Returns all vertices/nodes within the graph as a collection
"""
return self.graph.keys()
def get_neighbors(self, vertex):
""" Takes in a vertex/node and returns a collection of edges connected to the given vertex/node as well as the weight of the connection.
"""
collection = []
connections = self.graph.get(vertex, [])
for neighbor in connections:
holder = {}
holder[neighbor] = neighbor.weight
collection.append(holder)
return collection
def size(self):
""" Returns the total number of vertices/nodes in the graph
"""
return len(self.graph) if len(self.graph) > 0 else None
def breadth_first(self, vertex):
""" Takes in a node/vertex and performs a breadth first traversal of the graph. This will return a collection of the nodes/vertices within the graph from a breadth first traversal perspective of the given node/vertex.
"""
nodes = []
holder = set()
breadth = Queue()
holder.add(vertex.value)
breadth.enqueue(vertex)
while not breadth.is_empty():
front = breadth.dequeue()
nodes.append(front.value)
for child in self.graph[front]:
if child.vertex.value not in holder:
holder.add(child.vertex.value)
breadth.enqueue(child.vertex)
return nodes
```
#### File: challenges/fizz_buzz_tree/fizz_buzz_tree.py
```python
from collections import deque
class Node:
def __init__(self, value, left=None, right=None):
self.value = value
self.left = left
self.right = right
class BinaryTree:
def __init__(self):
self.root = None
def add(self, value):
""" This is the default add method that will add nodes to a tree with Breadth First logic
"""
new_node = Node(value)
breadth = Queue()
breadth.enqueue(self.root)
if not self.root:
self.root = new_node
return
while not breadth.is_empty():
front = breadth.dequeue()
if not front.left:
front.left = new_node
return
elif not front.right:
front.right = new_node
return
if front.left:
breadth.enqueue(front.left)
if front.right:
breadth.enqueue(front.right)
def pre_order(self):
"""This is a Depth First traversal method. It prioritizes printing the `root` first, then looks to print `left` if left is not `None`, and lastly looks `right`."""
collection = []
def walk(root):
if not root:
return
# <<< root >>>
collection.append(root.value)
# <<< left
walk(root.left)
# right >>>
walk(root.right)
# end
walk(self.root)
return collection
def in_order(self):
"""This is a Depth First traversal method. It prioritizes printing the `left` first, then prints the `root`, and lastly looks to print `right`."""
collection = []
def walk(root):
if not root:
return
# <<< left
if root.left == None and root.right == None:
collection.append(root.value)
return
else:
walk(root.left)
# <<< root >>>
collection.append(root.value)
# right >>>
if root.left == None and root.right == None:
collection.append(root.value)
return
else:
walk(root.right)
# Invoke
walk(self.root)
return collection
def post_order(self):
"""This is a Depth First traversal method. It prioritizes print the `left` first, then looks to print the `right` and lastly prints the `root`."""
collection = []
def walk(root):
if not root:
return
# <<< left
if root.left == None and root.right == None:
collection.append(root.value)
return
else:
walk(root.left)
# right >>>
if root.left == None and root.right == None:
collection.append(root.value)
return
else:
walk(root.right)
# <<< root >>>
collection.append(root.value)
# Invoke
walk(self.root)
return collection
def breadth_first(self):
""" This is a Breadth first traversal method that iterates through the tree by going through each level of the tree node by node.
"""
collection = []
breadth = Queue()
breadth.enqueue(self.root)
while not breadth.is_empty():
front = breadth.dequeue()
collection.append(front.value)
if front.left:
breadth.enqueue(front.left)
if front.right:
breadth.enqueue(front.right)
return collection
class Queue:
def __init__(self):
self.storage = deque()
def enqueue(self, value):
"""Takes any value as an argument and adds a new node with that value to the back of the queue with an O(1) Time Performance."""
self.storage.appendleft(value)
def dequeue(self):
"""Takes no arguments, remove the node from the front of the queue, and returns the node's value."""
return self.storage.pop()
def peek(self):
"""Takes no arguments and returns the value of the node located in the front of the queue, without removing it from the queue."""
return self.storage[-1]
def is_empty(self):
"""Takes no arguments and returns a boolean indicating whether or not the queue is empty."""
return len(self.storage) == 0
def fizz_buzz_tree(tree):
"""Takes in a tree as a single argument. Changes values throughout the tree based on Fizzbuzz logic, and returns a new tree in the same order and structure.
"""
collection = tree.breadth_first()
new_collection = []
for i in collection:
if i % 3 == 0 and i % 5 == 0:
i = 'FizzBuzz'
new_collection.append(i)
elif i % 3 == 0:
i = 'Fizz'
new_collection.append(i)
elif i % 5 == 0:
i = 'Buzz'
new_collection.append(i)
else:
i = str(i)
new_collection.append(i)
new_tree = BinaryTree()
for i in new_collection:
new_tree.add(i)
return new_tree
```
#### File: challenges/insertion_sort/insertion_sort.py
```python
def insertion_sort(list1):
for i in range(1, len(list1)):
j = i - 1
temp = list1[i]
while j >= 0 and temp < list1[j]:
list1[j + 1] = list1[j]
j = j - 1
list1[j + 1] = temp
return list1
```
#### File: challenges/multi_bracket_validation/multi_bracket_validation.py
```python
def multi_bracket_validation(string):
"""Takes in a string argument and returns a boolean representing whether or not the brackets within the string are balanced.
"""
brackets = []
open_brackets = []
bracket_dict = {
")" : "(",
"]" : "[",
"}" : "{"
}
if string.count('{') == string.count('}') and string.count('(') == string.count(')') and string.count('[') == string.count(']'):
for char in string:
if char in ['(', ')', '[', ']', '{', '}']:
brackets.append(char)
if len(brackets) == 0:
return True
if brackets[0] in [')', '}', ']']:
return False
for bracket in brackets:
if bracket in ['(', '[', '{']:
open_brackets.append(bracket)
elif open_brackets:
if bracket_dict[bracket] == open_brackets[len(open_brackets)-1]:
open_brackets.pop()
else:
return False
else:
return False
else:
return False
return True
```
#### File: challenges/tree_intersection/tree_intersection.py
```python
from collections import deque
class Node:
def __init__(self, value, left=None, right=None):
self.value = value
self.left = left
self.right = right
class Queue:
def __init__(self):
self.storage = deque()
def enqueue(self, value):
"""Takes any value as an argument and adds a new node with that value to the back of the queue with an O(1) Time Performance."""
self.storage.appendleft(value)
def dequeue(self):
"""Takes no arguments, remove the node from the front of the queue, and returns the node's value."""
return self.storage.pop()
def peek(self):
"""Takes no arguments and returns the value of the node located in the front of the queue, without removing it from the queue."""
return self.storage[-1]
def is_empty(self):
"""Takes no arguments and returns a boolean indicating whether or not the queue is empty."""
return len(self.storage) == 0
def tree_intersection(root1, root2):
"""Takes two binary tree roots and will return a list of intersecting values, or nodes that share the same value at the same position in the tree.
"""
output = []
collection1 = []
collection2 = []
tree1 = Queue()
tree1.enqueue(root1.root)
while not tree1.is_empty():
front = tree1.dequeue()
collection1.append(front.value)
if front.left:
tree1.enqueue(front.left)
if front.right:
tree1.enqueue(front.right)
tree2 = Queue()
tree2.enqueue(root2.root)
while not tree2.is_empty():
front = tree2.dequeue()
collection2.append(front.value)
if front.left:
tree2.enqueue(front.left)
if front.right:
tree2.enqueue(front.right)
if len(collection1) > len(collection2):
for i in range(len(collection2)):
if collection1[i] == collection2[i]:
output.append(collection1[i])
else:
for i in range(len(collection1)):
if collection1[i] == collection2[i]:
output.append(collection1[i])
return output
```
#### File: data_structures/hashtable/hashtable.py
```python
class Node:
def __init__(self, data):
self.data = data
self.next = None
class LinkedList:
def __init__(self):
self.head = None
def add(self, data):
node = Node(data)
if not self.head:
self.head = node
else:
current = self.head
while current.next:
current = current.next
current.next = node
def display(self):
collection = []
current = self.head
while current:
collection.append(current.data[0])
current = current.next
return collection
class Hashmap:
"""
This class is used to implement a hashmap
It has four available methods: Add, Get, Contains, Hash
"""
def __init__(self, size):
self.size = size
self.map = [None] * self.size
def add(self, key, value):
"""Add is reponsible for adding data to the hashmap datas structure
"""
hashed_key = self.hash(key)
if not self.map[hashed_key]:
self.map[hashed_key] = LinkedList()
self.map[hashed_key].add([key, value])
def get(self, key):
"""Get is responsible for taking in a key argument and returning the value for that key in the hashmap
"""
index = self.hash(key)
if self.map[index]:
ll = self.map[index]
while ll.head:
if ll.head.data[0] == key:
return ll.head.data[1]
else:
ll.head = ll.head.next
else:
return None
def contains(self, key):
"""Contains is reponsible for returning a bool for wether or not the provided key is within the data structure
"""
index = self.hash(key)
if self.map[index]:
collection = self.map[index].display()
if key in collection:
return True
else:
pass
return False
def hash(self, key):
"""
Hash is responsible for splitting they key, converting to ascii values, adding them together, multiply it by any prime number, then modulous by the size of the hashmap to return a valid index value within the hashmap to store that key.
"""
total = 0
for char in key:
total += ord(char)
total *= 19
hashed_key = total % self.size
return hashed_key
```
#### File: tests/challenges/--test_fifo_animal_shelter.py
```python
import pytest
from dsa.challenges.fifo_animal_shelter.fifo_animal_shelter import (
Node,
Dog,
Cat,
AnimalShelter,
PseudoQueue
)
def test_AnimalShelter_enqueue_something():
shelter = AnimalShelter()
shelter.enqueue('something')
actual = shelter.peek()
expected = 'something'
assert actual == expected
def test_AnimalShelter_enqueue_Dog():
shelter = AnimalShelter()
oliver = Dog('Oliver')
shelter.enqueue(oliver)
actual = shelter.peek()
expected = oliver
assert actual == expected
def test_AnimalShelter_enqueue_Cat():
shelter = AnimalShelter()
cheeto = Cat('Cheeto')
oliver = Dog('Oliver')
shelter.enqueue(cheeto)
shelter.enqueue(oliver)
actual = shelter.peek()
expected = cheeto
assert actual == expected
def test_AnimalShelter_dequeue_no_pref():
shelter = AnimalShelter()
cheeto = Cat('Cheeto')
oliver = Dog('Oliver')
shelter.enqueue(cheeto)
shelter.enqueue(oliver)
actual = shelter.dequeue()
expected = None
assert actual == expected
def test_AnimalShelter_dequeue_pref_not_catOrDog():
shelter = AnimalShelter()
cheeto = Cat('Cheeto')
oliver = Dog('Oliver')
shelter.enqueue(cheeto)
shelter.enqueue(oliver)
actual = shelter.dequeue('bird')
expected = None
assert actual == expected
def test_AnimalShelter_dequeue_pref_dog():
shelter = AnimalShelter()
cheeto = Cat('Cheeto')
oliver = Dog('Oliver')
shelter.enqueue(cheeto)
shelter.enqueue(oliver)
actual = shelter.dequeue('dog')
expected = 'cat'
assert actual == expected
def test_PseudoQueue_enqueue_cat():
shelter = PseudoQueue()
cheeto = Cat('Cheeto')
oliver = Dog('Oliver')
shelter.enqueue(cheeto)
shelter.enqueue(oliver)
actual = shelter.storage1.peek().kind
expected = 'dog'
=======
Dog,
Cat,
Bird,
AnimalShelter
)
def test_AnimalShelter_enqueue_Dog():
shelter = AnimalShelter()
oliver = Dog('Oliver')
shelter.enqueue(oliver)
actual = str(shelter)
expected = '[dog: Oliver] <- None'
assert actual == expected
def test_AnimalShelter_enqueue_Dog_Cat():
shelter = AnimalShelter()
oliver = Dog('Oliver')
cheeto = Cat('Cheeto')
shelter.enqueue(oliver)
shelter.enqueue(cheeto)
actual = str(shelter)
expected = '[dog: Oliver] <- [cat: Cheeto] <- None'
assert actual == expected
def test_AnimalShelter_enqueue_Exception():
with pytest.raises(Exception):
shelter = AnimalShelter()
cookie = Bird('Cookie')
actual = shelter.enqueue(cookie)
expected = 'We can only accept a cat or a dog.'
assert actual == expected
def test_AnimalShelter_dequeue_dog_first():
shelter = AnimalShelter()
oliver = Dog('Oliver')
cheeto = Cat('Cheeto')
shelter.enqueue(oliver)
shelter.enqueue(cheeto)
actual = shelter.dequeque('dog')
expected = 'Oliver'
assert actual == expected
def test_AnimalShelter_dequeue_cat_first():
shelter = AnimalShelter()
oliver = Dog('Oliver')
cheeto = Cat('Cheeto')
shelter.enqueue(cheeto)
shelter.enqueue(oliver)
actual = shelter.dequeque('cat')
expected = 'Cheeto'
assert actual == expected
def test_AnimalShelter_dequeue_dog_not_first():
shelter = AnimalShelter()
oliver = Dog('Oliver')
cheeto = Cat('Cheeto')
sneakers = Cat('Sneakers')
shelter.enqueue(cheeto)
shelter.enqueue(sneakers)
shelter.enqueue(oliver)
actual = shelter.dequeque('dog')
expected = 'Oliver'
assert actual == expected
def test_AnimalShelter_dequeue_cat_not_first():
shelter = AnimalShelter()
oliver = Dog('Oliver')
poncho = Dog('Poncho')
cheeto = Cat('Cheeto')
shelter.enqueue(oliver)
shelter.enqueue(poncho)
shelter.enqueue(cheeto)
actual = shelter.dequeque('cat')
expected = 'Cheeto'
assert actual == expected
def test_AnimalShelter_dequeue_alternating():
shelter = AnimalShelter()
oliver = Dog('Oliver')
cheeto = Cat('Cheeto')
poncho = Dog('Poncho')
sneakers = Cat('Sneakers')
peso = Dog('Peso')
shelter.enqueue(oliver)
shelter.enqueue(poncho)
shelter.enqueue(cheeto)
shelter.enqueue(sneakers)
shelter.enqueue(peso)
actual = shelter.dequeque('cat')
expected = 'Cheeto'
assert actual == expected
```
#### File: tests/challenges/--test_ll_merge.py
```python
import pytest
from dsa.challenges.ll_merge.ll_merge import LinkedList, Node, merge_list
def test_LinkedList_instance():
assert LinkedList()
def test_LinkedList_str():
ll = LinkedList()
ll.insert("a")
ll.insert("b")
actual = str(ll)
expected = "{ b } -> { a } -> NULL"
assert actual == expected
def test_LinkedList_repr():
ll = LinkedList()
actual = repr(ll)
expect = "LinkedList: None"
assert actual == expect
def test_LinkedList_head():
ll = LinkedList()
actual = ll.head
expected = None
assert actual == expected
def test_LinkedList_insert():
ll = LinkedList()
ll.insert("a")
ll.insert("b")
assert ll.head.value == "b"
assert ll.head.next.value == "a"
def test_LinkedList_insert_before(ll_list):
ll_list.insertBefore("a", "d")
actual = str(ll_list)
expected = "{ c } -> { b } -> { d } -> { a } -> NULL"
assert actual == expected
def test_LinkedList_insert_before_false(ll_list):
with pytest.raises(ValueError):
ll_list.insertAfter("f", "d")
def test_LinkedList_insert_after(ll_list):
ll_list.insertAfter("b", "d")
actual = str(ll_list)
expected = "{ c } -> { b } -> { d } -> { a } -> NULL"
assert actual == expected
def test_LinkedList_insert_Exception(ll_list):
with pytest.raises(ValueError):
ll_list.insertAfter("f", "d")
def test_LinkedList_includes_true(ll_list):
actual = ll_list.includes("c")
expected = True
assert actual == expected
def test_LinkedList_includes_false(ll_list):
actual = ll_list.includes("d")
expected = False
assert actual == expected
def test_LinkedList_append(ll_list):
ll_list.append("d")
actual = str(ll_list)
expected = "{ c } -> { b } -> { a } -> { d } -> NULL"
assert actual == expected
def test_LinkedList_kth_from_end_0():
ll = LinkedList()
ll.insert(2)
ll.insert(8)
ll.insert(3)
ll.insert(1)
actual = ll.kth_from_end(0)
expected = 2
assert actual == expected
def test_LinkedList_kth_from_end_2():
ll = LinkedList()
ll.insert(2)
ll.insert(8)
ll.insert(3)
ll.insert(1)
actual = ll.kth_from_end(2)
expected = 3
assert actual == expected
def test_LinkedList_kth_from_end_same_length():
ll = LinkedList()
ll.insert(2)
ll.insert(8)
actual = ll.kth_from_end(2)
expected = 8
assert actual == expected
def test_LinkedList_kth_from_end_IndexError_Exception():
ll = LinkedList()
ll.insert(2)
ll.insert(8)
ll.insert(3)
ll.insert(1)
with pytest.raises(IndexError):
ll.kth_from_end(6)
def test_LinkedList_kth_from_end_ValueError_Exception():
ll = LinkedList()
ll.insert(2)
ll.insert(8)
ll.insert(3)
ll.insert(1)
with pytest.raises(ValueError):
ll.kth_from_end(-6)
def test_node_exception():
with pytest.raises(TypeError):
Node("Test", "This must be a node not a string")
def test_merge_list_list1_none():
list1 = LinkedList()
list2 = LinkedList()
list2.insert("c")
list2.insert("b")
list2.insert("a")
actual = str(merge_list(list1, list2))
expected = "{ a } -> { b } -> { c } -> NULL"
assert actual == expected
def test_merge_list_list2_none():
list1 = LinkedList()
list2 = LinkedList()
list1.insert("c")
list1.insert("b")
list1.insert("a")
actual = str(merge_list(list1, list2))
expected = "{ a } -> { b } -> { c } -> NULL"
assert actual == expected
def test_merge_list_equal_lists():
list1 = LinkedList()
list2 = LinkedList()
list1.insert("e")
list1.insert("c")
list1.insert("a")
list2.insert("f")
list2.insert("d")
list2.insert("b")
actual = str(merge_list(list1, list2))
expected = "{ a } -> { b } -> { c } -> { d } -> { e } -> { f } -> NULL"
assert actual == expected
def test_merge_list_list1_shorter():
list1 = LinkedList()
list2 = LinkedList()
list1.insert("3")
list1.insert("1")
list2.insert("4")
list2.insert("9")
list2.insert("5")
actual = str(merge_list(list1, list2))
expected = "{ 1 } -> { 5 } -> { 3 } -> { 9 } -> { 4 } -> NULL"
assert actual == expected
def test_merge_list_list1_longer():
list1 = LinkedList()
list2 = LinkedList()
list1.insert("3")
list1.insert("1")
list1.insert("4")
list2.insert("9")
list2.insert("5")
actual = str(merge_list(list1, list2))
expected = "{ 4 } -> { 5 } -> { 1 } -> { 9 } -> { 3 } -> NULL"
assert actual == expected
@pytest.fixture
def ll_list():
"""Sets up a linked list instance along with adds a few nodes for testing"""
ll = LinkedList()
ll.insert("a")
ll.insert("b")
ll.insert("c")
return ll
@pytest.fixture
def ll_list_merge():
"""Sets up two linked list instances along with some nodes for testing"""
list1 = LinkedList()
list1.insert("c")
list1.insert("b")
list1.insert("a")
```
#### File: tests/challenges/--test_repeated_word.py
```python
import pytest
from dsa.challenges.repeated_word.repeated_word import word_most_often, repeated_word
@pytest.mark.parametrize(
"test_input,expected",
[
("Once upon a time, there was a brave princess who...", "a"),
("It was the best of times, it was the worst of times, it was the age of wisdom, it was the age of foolishness, it was the epoch of belief, it was the epoch of incredulity, it was the season of Light, it was the season of Darkness, it was the spring of hope, it was the winter of despair, we had everything before us, we had nothing before us, we were all going direct to Heaven, we were all going direct the other way – in short, the period was so far like the present period, that some of its noisiest authorities insisted on its being received, for good or for evil, in the superlative degree of comparison only...", "it"),
("It was a queer, sultry summer, the summer they electrocuted the Rosenbergs, and I didn’t know what I was doing in New York...", "summer"),
],
)
def test_all(test_input, expected):
actual = repeated_word(test_input)
assert actual == expected
def test_random_string_one():
string = 'Something that is random about this assignment is that this string is crazy!'
actual = repeated_word(string)
expected = 'is'
assert actual == expected
def test_rnadom_string_two():
string = "Random something going on here what is random about this"
actual = repeated_word(string)
expected = 'random'
assert actual == expected
``` |
{
"source": "joseph-zabaleta/i-dont-know",
"score": 4
} |
#### File: i-dont-know/src/history.py
```python
import json
from datetime import date
def get_full_info():
"""
This method return a array with 3 elements, each element is a list,
where possition 0 is the title of the list:
array in position 0: Number of orders by user
array in position 1: List of orders by food category
array in position 2: These are all the orders realizaded
"""
list_return = []
list_users = [] # internal working list
list_orders = [] #i nternal working list
list_users_return = [] # this is the returning array, in list_return, at position 0. Number of orders by user
list_orders_return = [] # this is the returning array, list_return, at position 1. List of orders by food category
list_history_return = [] # this is the returning array, list_return, at position 2. These are all the orders realizaded
order_history = get_history()
if not order_history :
return list_return
for order in order_history["orders"]:
list_users.append(order["user"])
list_orders.append(order["order"])
unique_users_names = get_unique_list_values(list_users)
unique_food_names = get_unique_list_values(list_orders)
# Filling list_users_return
tupple_users = (list_users) # create tupples to use count method
list_users_return.append('Number of orders by user')
for user in unique_users_names:
list_users_return.append(user + " has ordered " + str(tupple_users.count(user)) + " times.")
# Filling list_orders_return
list_orders_return.append("Orders by food category")
tupple_orders = (list_orders) # create tupples to use count method
for food in unique_food_names:
list_orders_return.append(food + " has been ordered " + str(tupple_orders.count(food)) + " times.")
# Filling list_history_return
list_history_return = get_history_list(True)
list_history_return.insert(0, "History of all the orders")
list_return.append(list_users_return)
list_return.append(list_orders_return)
list_return.append(list_history_return)
return list_return
def get_unique_list_values(list_to_review):
"""Helper function, takes in a list as a single argument and returns a unique list.
"""
unique_list = []
# traverse for all elements
for item in list_to_review:
# check if exists in unique_list or not
if item not in unique_list:
unique_list.append(item)
return unique_list
def get_users():
"""
This function returns a list with the names of the users who have placed orders.
"""
list_users = []
order_history = get_history()
if not order_history :
return list_users
for order in order_history["orders"]:
list_users.append(order["user"])
return get_unique_list_values(list_users)
def display_orders_by_user():
"""
Function which displays the numbers of orders by users, and the number of times
a specific food have been ordered.
"""
list_users = []
list_orders = []
order_history = get_history()
if not order_history :
print('There are not orders saved yet....')
return
for order in order_history["orders"]:
list_users.append(order["user"])
list_orders.append(order["order"])
unique_users_names = get_unique_list_values(list_users)
unique_food_names = get_unique_list_values(list_orders)
# create tupples to use count method
tupple_users = (list_users)
print("Number of order by user... \n")
for user in unique_users_names:
print(" " + user + " has ordered " + str(tupple_users.count(user)) + " times.")
# create tupples to use count method
tupple_orders = (list_orders)
print("\n\n")
print("Number of orders by type... \n")
for food in unique_food_names:
print(" " + food + " has been ordered " + str(tupple_orders.count(food)) + " times.")
def display_orders_history():
order_list = get_history_list(True)
if len(order_list) == 0:
print('There are not orders saved yet....')
return
print("\n\n")
print('These are the orders so far...\n\n')
for order in order_list:
print(order)
def get_history_list(reversed = None):
"""
This method returns all the orders in a list, where each order is in one position on the array.
And returns in in the indicated order
"""
try:
order_list = []
with open('./assets/orders_history.txt') as json_file:
history = json.load(json_file)
for order in history["orders"]:
order_info = "On " + order['date'] + ", " + order['user'] + " ordered " + order['order']
order_list.append(order_info)
if reversed : order_list.reverse()
return order_list
except:
return []
def get_history():
"""
Returns all the ordering information in json style
"""
try:
with open('./assets/orders_history.txt') as json_file:
history = json.load(json_file)
return history
except:
return None
def add_order_to_history(user, food, order_date = None):
if not order_date : order_date = date.today()
order_history = get_history()
if not order_history:
order_history = {}
order_history["orders"] = []
order_history["orders"].append({
"user" : user,
"date" : str(order_date),
"order" : food
})
else:
order_history["orders"].append({
"user" : user,
"date" : str(order_date),
"order" : food
})
with open('./assets/orders_history.txt', 'w') as outfile:
json.dump(order_history, outfile)
def load_dummy_data():
add_order_to_history('Skyler', 'Indian','2020-03-29')
add_order_to_history('JB', 'Tai','2020-04-15')
add_order_to_history('JB', 'Pizza','2020-04-15')
add_order_to_history('Ahmad', 'Mexican','2020-04-18')
add_order_to_history('Skyler', 'Italian','2020-05-03')
add_order_to_history('JB', 'Hamburguers','2020-05-14')
add_order_to_history('Aliya', 'Car<NAME>','2020-05-19')
add_order_to_history('Skyler', 'Sushi','2020-05-22')
add_order_to_history('JB', 'Pizza','2020-05-24')
add_order_to_history('Ahmad', 'Tacos','2020-05-29')
add_order_to_history('JB', 'Tacos','2020-06-02')
add_order_to_history('Skyler', 'Hamburguers','2020-06-07')
add_order_to_history('Ahmad', 'Sushi','2020-06-09')
def load_test_info():
"""
Cleans all the ordering information in json file orders_history.
"""
try:
with open('./assets/orders_history.txt', 'w') as outfile:
pass
load_dummy_data()
except:
return None
``` |
{
"source": "JosephZheng1998/CNN-Emotion-Detection",
"score": 2
} |
#### File: CNN-Emotion-Detection/ensembles/train_cnn.py
```python
import numpy as np
from PIL import Image
import torch
import torchvision
from torchvision import transforms
import torchvision.models as models
from torchvision.datasets import ImageFolder
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader, random_split
from sklearn.metrics import roc_auc_score
from sklearn.metrics import confusion_matrix
import seaborn as sns
import time
import pandas as pd
import matplotlib.pyplot as plt
import splitfolders
from training import train_model, training_plot
def preprocessing():
batch_size = 64
train_transform = transforms.Compose([transforms.Resize(224),
transforms.RandomRotation(45),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
val_transform = transforms.Compose([transforms.Resize(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
train_dataset = ImageFolder(root='train/', transform=train_transform)
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=16, pin_memory=True)
dev_dataset = ImageFolder(root='val_test/val/', transform=val_transform)
dev_loader = DataLoader(dev_dataset, batch_size=batch_size, shuffle=False, num_workers=16, pin_memory=True)
test_dataset = ImageFolder(root='val_test/test/', transform=val_transform)
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False, num_workers=16, pin_memory=True)
print('train dataset: {} images {} classes'.format(len(train_dataset), len(train_dataset.classes)))
print('dev dataset: {} images {} classes'.format(len(dev_dataset), len(dev_dataset.classes)))
print('test dataset: {} images {} classes'.format(len(test_dataset), len(test_dataset.classes)))
return train_loader, dev_loader, test_loader
if __name__ == '__main__':
# run only once to set up validation and test folders
splitfolders.ratio('test', output='val_test', seed=1337, ratio=(0, 0.5, 0.5), group_prefix=None)
# check for GPU
cuda = torch.cuda.is_available()
device = torch.device('cuda' if cuda else 'cpu')
print(device)
train_loader, dev_loader, test_loader = preprocessing()
num_epochs = 30
num_classes = len(train_dataset.classes)
lr = 1e-2
weight_decay = 5e-4
momentum = 0.9
start_epoch = 0
train_losses = []
train_accuracy = []
valid_losses = []
valid_accuracy = []
test_losses = []
test_accuracy = []
empty_cache = True
# change this to train other SOTA CNNs
model_name = 'DenseNet121'
model = models.densenet121(pretrained=True)
model.classifier = nn.Linear(1024, num_classes)
model.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=lr, weight_decay=weight_decay, momentum=momentum)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.5, patience=1, verbose=True)
scaler = torch.cuda.amp.GradScaler()
if os.path.exists('{}.model'.format(model_name)):
print('Found pretrained model!')
saved_model = torch.load('{}.model'.format(model_name))
start_epoch = saved_model['train_epochs']
model.load_state_dict(saved_model['model'])
criterion.load_state_dict(saved_model['criterion'])
optimizer.load_state_dict(saved_model['optimizer'])
scheduler.load_state_dict(saved_model['scheduler'])
scaler.load_state_dict(saved_model['scaler'])
train_losses = saved_model['train_losses']
train_accuracy = saved_model['train_accuracy']
valid_losses = saved_model['valid_losses']
valid_accuracy = saved_model['valid_accuracy']
test_losses = saved_model['test_losses']
test_accuracy = saved_model['test_accuracy']
print('Training', model_name)
train_losses, train_accuracy, valid_losses, valid_accuracy, test_losses, test_accuracy = train_model(model, model_name, train_loader, dev_loader, test_loader, optimizer, criterion, scheduler, scaler, device, start_epoch, num_epochs, train_losses, train_accuracy, valid_losses, valid_accuracy, test_losses, test_accuracy, empty_cache)
training_plot(train_losses, valid_losses, test_losses, 'Loss')
training_plot(train_accuracy, valid_accuracy, test_accuracy, 'Accuracy')
``` |
{
"source": "joseph-zhong/dilation-tensorflow",
"score": 2
} |
#### File: joseph-zhong/dilation-tensorflow/utils.py
```python
import os
import cv2
import numpy as np
from datasets import CONFIG
#import numba
# this function is the same as the one in the original repository
# basically it performs upsampling for datasets having zoom > 1
# @numba.jit(nopython=True)
def interp_map(prob, zoom, width, height):
channels = prob.shape[2]
zoom_prob = np.zeros((height, width, channels), dtype=np.float32)
for c in range(channels):
for h in range(height):
for w in range(width):
r0 = h // zoom
r1 = r0 + 1
c0 = w // zoom
c1 = c0 + 1
rt = float(h) / zoom - r0
ct = float(w) / zoom - c0
v0 = rt * prob[r1, c0, c] + (1 - rt) * prob[r0, c0, c]
v1 = rt * prob[r1, c1, c] + (1 - rt) * prob[r0, c1, c]
zoom_prob[h, w, c] = (1 - ct) * v0 + ct * v1
return zoom_prob
# predict function, mostly reported as it was in the original repo
def predict(image, input_tensor, model, ds, sess, test=False):
import pdb
image = image.astype(np.float32) - CONFIG[ds]['mean_pixel']
conv_margin = CONFIG[ds]['conv_margin']
input_dims = (1,) + CONFIG[ds]['input_shape']
batch_size, input_height, input_width, num_channels = input_dims
model_in = np.zeros(input_dims, dtype=np.float32)
image_size = image.shape
output_height = input_height - 2 * conv_margin
output_width = input_width - 2 * conv_margin
image = cv2.copyMakeBorder(image, conv_margin, conv_margin,
conv_margin, conv_margin,
cv2.BORDER_REFLECT_101)
# import matplotlib.pyplot as plt
# plt.imshow(image)
# plt.show()
# np.save('/home/josephz/ws/git/ml/framework/scripts/outs/tf_add_const', image)
# exit()
num_tiles_h = image_size[0] // output_height + (1 if image_size[0] % output_height else 0)
num_tiles_w = image_size[1] // output_width + (1 if image_size[1] % output_width else 0)
row_prediction = []
for h in range(num_tiles_h):
col_prediction = []
for w in range(num_tiles_w):
offset = [output_height * h,
output_width * w]
tile = image[offset[0]:offset[0] + input_height,
offset[1]:offset[1] + input_width, :]
margin = [0, input_height - tile.shape[0],
0, input_width - tile.shape[1]]
tile = cv2.copyMakeBorder(tile, margin[0], margin[1],
margin[2], margin[3],
cv2.BORDER_REFLECT_101)
model_in[0] = tile
prob = sess.run(model, feed_dict={input_tensor: tile[None, ...]})[0]
# assert prob.shape == (1024, 1024, 19)
col_prediction.append(prob)
col_prediction = np.concatenate(col_prediction, axis=1) # previously axis=2
row_prediction.append(col_prediction)
import pdb
# pdb.set_trace()
prob = np.concatenate(row_prediction, axis=0)
if test:
return prob
else:
assert prob.shape[:-1] == image_size[:-1]
if CONFIG[ds]['zoom'] > 1:
prob = interp_map(prob, CONFIG[ds]['zoom'], image_size[1], image_size[0])
prediction = np.argmax(prob, axis=2)
color_image = CONFIG[ds]['palette'][prediction.ravel()].reshape(image_size)
return color_image
# predict function, no tiles
def predict_no_tiles(image, input_tensor, model, ds, sess, test=False):
image = image.astype(np.float32) - CONFIG[ds]['mean_pixel']
if not os.path.isfile('/home/josephz/ws/git/ml/framework/scripts/dilation/outs/tf/add_const.npy'):
np.save('/home/josephz/ws/git/ml/framework/scripts/dilation/outs/tf/add_const', image)
conv_margin = CONFIG[ds]['conv_margin']
# input_dims = (1,) + CONFIG[ds]['input_shape']
# batch_size, input_height, input_width, num_channels = input_dims
# model_in = np.zeros(input_dims, dtype=np.float32)
image_size = image.shape
image = cv2.copyMakeBorder(image, conv_margin, conv_margin,
conv_margin, conv_margin,
cv2.BORDER_REFLECT_101)
prob = sess.run(model, feed_dict={input_tensor: image[None, ...]})[0]
if test:
return prob
else:
import pdb
pdb.set_trace()
assert prob.shape[:-1] == image_size[:-1]
if CONFIG[ds]['zoom'] > 1:
prob = interp_map(prob, CONFIG[ds]['zoom'], image_size[1], image_size[0])
prediction = np.argmax(prob, axis=2)
color_image = CONFIG[ds]['palette'][prediction.ravel()].reshape(image_size)
outp = '/home/josephz/ws/git/ml/framework/scripts/dilation/outs/tf/semseg.png'
if not os.path.isfile(outp):
import imageio
imageio.imwrite(outp, color_image)
import matplotlib.pyplot as plt
plt.imshow(color_image)
plt.show()
return color_image
``` |
{
"source": "joseph-zhong/LipReading",
"score": 2
} |
#### File: models/face/prnet.py
```python
import os
import numpy as np
import tensorflow as tf
import tensorflow.contrib.layers as tcl
from tensorflow.contrib.framework import arg_scope
from skimage.io import imread, imsave
from skimage.transform import estimate_transform, warp
import src.utils.utility as _util
class PRN:
''' Joint 3D Face Reconstruction and Dense Alignment with Position Map Regression Network
Args:
is_dlib(bool, optional): If true, dlib is used for detecting faces.
prefix(str, optional): If run at another folder, the absolute path is needed to load the data.
'''
def __init__(self, is_dlib=False):
# resolution of input and output image size.
self.resolution_inp = 256
self.resolution_op = 256
# ---- load detectors
if is_dlib:
import dlib
detector_path = _util.getRelWeightsPath('dlib', 'mmod_human_face_detector.dat')
self.face_detector = dlib.cnn_face_detection_model_v1(detector_path)
# ---- load PRN
self.pos_predictor = PosPrediction(self.resolution_inp, self.resolution_op)
prn_path = _util.getRelWeightsPath('prnet', 'net/256_256_resfcn256_weight')
assert os.path.isfile(prn_path + '.data-00000-of-00001'), "please download PRN trained model first."
self.pos_predictor.restore(prn_path)
# uv file: 2 x 68
self.uv_kpt_ind = np.loadtxt(_util.getRelWeightsPath('prnet', 'uv', 'uv_kpt_ind.txt')).astype(np.int32)
# get kpt: get valid vertices in the pos map
self.face_ind = np.loadtxt(_util.getRelWeightsPath('prnet', 'uv', 'face_ind.txt')).astype(np.int32)
# ntri x 3.
self.triangles = np.loadtxt(_util.getRelWeightsPath('prnet', 'uv', 'triangles.txt')).astype(np.int32)
self.uv_coords = self.generate_uv_coords()
# Cache Position map.
self.pos = None
def generate_uv_coords(self):
resolution = self.resolution_op
uv_coords = np.meshgrid(range(resolution), range(resolution))
uv_coords = np.transpose(np.array(uv_coords), [1, 2, 0])
uv_coords = np.reshape(uv_coords, [resolution ** 2, -1])
uv_coords = uv_coords[self.face_ind, :]
uv_coords = np.hstack((uv_coords[:, :2], np.zeros([uv_coords.shape[0], 1])))
return uv_coords
def dlib_detect(self, image):
return self.face_detector(image, 1)
def net_forward(self, image):
''' The core of out method: regress the position map of a given image.
Args:
image: (256,256,3) array. value range: 0~1
Returns:
pos: the 3D position map. (256, 256, 3) array.
'''
return self.pos_predictor.predict(image)
def process(self, input, image_info=None):
''' process image with crop operation.
Args:
input: (h,w,3) array or str(image path). image value range:1~255.
image_info(optional): the bounding box information of faces. if None, will use dlib to detect face.
Returns:
pos: the 3D position map. (256, 256, 3).
'''
if isinstance(input, str):
try:
image = imread(input)
except IOError:
print("error opening file: ", input)
return None
else:
image = input
if image.ndim < 3:
image = np.tile(image[:, :, np.newaxis], [1, 1, 3])
if image_info is not None:
# REVIEW josephz: Deprecated behavior, accepting landmarks to infer tight bbox.
# if np.max(image_info.shape) > 4: # key points to get bounding box
# kpt = image_info
# if kpt.shape[0] > 3:
# kpt = kpt.T
# left = np.min(kpt[0, :])
# right = np.max(kpt[0, :])
# top = np.min(kpt[1, :])
# bottom = np.max(kpt[1, :])
# else: # bounding box
bbox = image_info
left = bbox[0]
right = bbox[1]
top = bbox[2]
bottom = bbox[3]
old_size = (right - left + bottom - top) / 2
center = np.array([right - (right - left) / 2.0, bottom - (bottom - top) / 2.0])
size = int(old_size * 1.6)
else:
detected_faces = self.dlib_detect(image)
if len(detected_faces) == 0:
print('warning: no detected face')
return None
d = detected_faces[
0].rect ## only use the first detected face (assume that each input image only contains one face)
left = d.left()
right = d.right()
top = d.top()
bottom = d.bottom()
old_size = (right - left + bottom - top) / 2
center = np.array([right - (right - left) / 2.0, bottom - (bottom - top) / 2.0 + old_size * 0.14])
size = int(old_size * 1.58)
# crop image
src_pts = np.array([[center[0] - size / 2, center[1] - size / 2], [center[0] - size / 2, center[1] + size / 2],
[center[0] + size / 2, center[1] - size / 2]])
DST_PTS = np.array([[0, 0], [0, self.resolution_inp - 1], [self.resolution_inp - 1, 0]])
tform = estimate_transform('similarity', src_pts, DST_PTS)
image = image / 255.
cropped_image = warp(image, tform.inverse, output_shape=(self.resolution_inp, self.resolution_inp))
# run our net
# st = time()
cropped_pos = self.net_forward(cropped_image)
# print 'net time:', time() - st
# restore
cropped_vertices = np.reshape(cropped_pos, [-1, 3]).T
z = cropped_vertices[2, :].copy() / tform.params[0, 0]
cropped_vertices[2, :] = 1
vertices = np.dot(np.linalg.inv(tform.params), cropped_vertices)
vertices = np.vstack((vertices[:2, :], z))
pos = np.reshape(vertices.T, [self.resolution_op, self.resolution_op, 3])
# Cache position map.
self.pos = pos
return pos, image
def get_landmarks(self, pos):
'''
Args:
pos: the 3D position map. shape = (256, 256, 3).
Returns:
kpt: 68 3D landmarks. shape = (68, 3).
'''
kpt = pos[self.uv_kpt_ind[1, :], self.uv_kpt_ind[0, :], :]
return kpt
def get_vertices(self, pos):
'''
Args:
pos: the 3D position map. shape = (256, 256, 3).
Returns:
vertices: the vertices(point cloud). shape = (num of points, 3). n is about 40K here.
'''
all_vertices = np.reshape(pos, [self.resolution_op ** 2, -1])
vertices = all_vertices[self.face_ind, :]
return vertices
def get_colors_from_texture(self, texture):
'''
Args:
texture: the texture map. shape = (256, 256, 3).
Returns:
colors: the corresponding colors of vertices. shape = (num of points, 3). n is 45128 here.
'''
all_colors = np.reshape(texture, [self.resolution_op ** 2, -1])
colors = all_colors[self.face_ind, :]
return colors
def get_colors(self, image, vertices):
'''
Args:
pos: the 3D position map. shape = (256, 256, 3).
Returns:
colors: the corresponding colors of vertices. shape = (num of points, 3). n is 45128 here.
'''
[h, w, _] = image.shape
vertices[:, 0] = np.minimum(np.maximum(vertices[:, 0], 0), w - 1) # x
vertices[:, 1] = np.minimum(np.maximum(vertices[:, 1], 0), h - 1) # y
ind = np.round(vertices).astype(np.int32)
colors = image[ind[:, 1], ind[:, 0], :] # n x 3
return colors
def resBlock(x, num_outputs, kernel_size=4, stride=1, activation_fn=tf.nn.relu, normalizer_fn=tcl.batch_norm,
scope=None):
assert num_outputs % 2 == 0 # num_outputs must be divided by channel_factor(2 here)
with tf.variable_scope(scope, 'resBlock'):
shortcut = x
if stride != 1 or x.get_shape()[3] != num_outputs:
shortcut = tcl.conv2d(shortcut, num_outputs, kernel_size=1, stride=stride,
activation_fn=None, normalizer_fn=None, scope='shortcut')
x = tcl.conv2d(x, num_outputs / 2, kernel_size=1, stride=1, padding='SAME')
x = tcl.conv2d(x, num_outputs / 2, kernel_size=kernel_size, stride=stride, padding='SAME')
x = tcl.conv2d(x, num_outputs, kernel_size=1, stride=1, activation_fn=None, padding='SAME', normalizer_fn=None)
x += shortcut
x = normalizer_fn(x)
x = activation_fn(x)
return x
class resfcn256(object):
def __init__(self, resolution_inp=256, resolution_op=256, channel=3, name='resfcn256'):
self.name = name
self.channel = channel
self.resolution_inp = resolution_inp
self.resolution_op = resolution_op
def __call__(self, x, is_training=True):
with tf.variable_scope(self.name) as scope:
with arg_scope([tcl.batch_norm], is_training=is_training, scale=True):
with arg_scope([tcl.conv2d, tcl.conv2d_transpose], activation_fn=tf.nn.relu,
normalizer_fn=tcl.batch_norm,
biases_initializer=None,
padding='SAME',
weights_regularizer=tcl.l2_regularizer(0.0002)):
size = 16
# x: s x s x 3
se = tcl.conv2d(x, num_outputs=size, kernel_size=4, stride=1) # 256 x 256 x 16
se = resBlock(se, num_outputs=size * 2, kernel_size=4, stride=2) # 128 x 128 x 32
se = resBlock(se, num_outputs=size * 2, kernel_size=4, stride=1) # 128 x 128 x 32
se = resBlock(se, num_outputs=size * 4, kernel_size=4, stride=2) # 64 x 64 x 64
se = resBlock(se, num_outputs=size * 4, kernel_size=4, stride=1) # 64 x 64 x 64
se = resBlock(se, num_outputs=size * 8, kernel_size=4, stride=2) # 32 x 32 x 128
se = resBlock(se, num_outputs=size * 8, kernel_size=4, stride=1) # 32 x 32 x 128
se = resBlock(se, num_outputs=size * 16, kernel_size=4, stride=2) # 16 x 16 x 256
se = resBlock(se, num_outputs=size * 16, kernel_size=4, stride=1) # 16 x 16 x 256
se = resBlock(se, num_outputs=size * 32, kernel_size=4, stride=2) # 8 x 8 x 512
se = resBlock(se, num_outputs=size * 32, kernel_size=4, stride=1) # 8 x 8 x 512
pd = tcl.conv2d_transpose(se, size * 32, 4, stride=1) # 8 x 8 x 512
pd = tcl.conv2d_transpose(pd, size * 16, 4, stride=2) # 16 x 16 x 256
pd = tcl.conv2d_transpose(pd, size * 16, 4, stride=1) # 16 x 16 x 256
pd = tcl.conv2d_transpose(pd, size * 16, 4, stride=1) # 16 x 16 x 256
pd = tcl.conv2d_transpose(pd, size * 8, 4, stride=2) # 32 x 32 x 128
pd = tcl.conv2d_transpose(pd, size * 8, 4, stride=1) # 32 x 32 x 128
pd = tcl.conv2d_transpose(pd, size * 8, 4, stride=1) # 32 x 32 x 128
pd = tcl.conv2d_transpose(pd, size * 4, 4, stride=2) # 64 x 64 x 64
pd = tcl.conv2d_transpose(pd, size * 4, 4, stride=1) # 64 x 64 x 64
pd = tcl.conv2d_transpose(pd, size * 4, 4, stride=1) # 64 x 64 x 64
pd = tcl.conv2d_transpose(pd, size * 2, 4, stride=2) # 128 x 128 x 32
pd = tcl.conv2d_transpose(pd, size * 2, 4, stride=1) # 128 x 128 x 32
pd = tcl.conv2d_transpose(pd, size, 4, stride=2) # 256 x 256 x 16
pd = tcl.conv2d_transpose(pd, size, 4, stride=1) # 256 x 256 x 16
pd = tcl.conv2d_transpose(pd, 3, 4, stride=1) # 256 x 256 x 3
pd = tcl.conv2d_transpose(pd, 3, 4, stride=1) # 256 x 256 x 3
pos = tcl.conv2d_transpose(pd, 3, 4, stride=1,
activation_fn=tf.nn.sigmoid) # , padding='SAME', weights_initializer=tf.random_normal_initializer(
# 0, 0.02))
return pos
@property
def vars(self):
return [var for var in tf.global_variables() if self.name in var.name]
class PosPrediction():
def __init__(self, resolution_inp=256, resolution_op=256):
# -- hyper settings
self.resolution_inp = resolution_inp
self.resolution_op = resolution_op
self.MaxPos = resolution_inp * 1.1
# network type
self.network = resfcn256(self.resolution_inp, self.resolution_op)
# net forward
self.x = tf.placeholder(tf.float32, shape=[None, self.resolution_inp, self.resolution_inp, 3])
self.x_op = self.network(self.x, is_training=False)
self.sess = tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True)))
def restore(self, model_path):
tf.train.Saver(self.network.vars).restore(self.sess, model_path)
def predict(self, image):
pos = self.sess.run(self.x_op,
feed_dict={self.x: image[np.newaxis, :, :, :]})
pos = np.squeeze(pos)
return pos * self.MaxPos
def predict_batch(self, images):
pos = self.sess.run(self.x_op,
feed_dict={self.x: images})
return pos * self.MaxPos
```
#### File: src/scripts/generate_labels.py
```python
import glob
import json
import os
import sys
import tqdm as _tqdm
import src.utils.utility as _util
import src.utils.cmd_line as _cmd
from src.utils.data.caption import extract_captions, prune_and_filter_captions
_logger = _util.getLogger("CMD Line")
def _getSharedLogger(verbosity=_util.DEFAULT_VERBOSITY):
global _logger
if _logger is None:
_logger = _util.getLogger(os.path.basename(__file__).split('.')[0], verbosity=verbosity)
return _logger
def _gen_from_dataset(dataset_path, cap_ext):
assert os.path.isdir(dataset_path)
raise NotImplementedError
def _gen_from_raw(raw_path, cap_ext):
assert os.path.isdir(raw_path)
chars = set()
for video in _tqdm.tqdm(glob.glob(os.path.join(raw_path, '*' + cap_ext))):
captions = extract_captions(video)
captions = prune_and_filter_captions(captions)
for caption in captions.values():
chars.update(caption)
return sorted(chars)
def generate_labels(data, output="labels.json", cap_ext=".vtt", use_raw=True):
"""
Generates an ordered list of labels, unique characters used in video captions, from
the specified data.
:param data: Either dataset or raw data name.
:param output: File in data directory to which the labels should be written, or - for stdout.
:param cap_ext: File extension for subtitle files.
:param use_raw: True to use raw, else uses dataset.
"""
data_path = _util.getRelRawPath(data) if use_raw else _util.getRelDatasetsPath(generate_labels)
if use_raw:
labels = _gen_from_raw(data_path, cap_ext)
else:
labels = _gen_from_dataset(data_path, cap_ext)
if output == "-":
json.dump(labels, sys.stdout)
else:
with open(os.path.join(data_path, output), "w") as out:
json.dump(labels, out)
def main():
global _logger
args = _cmd.parseArgsForClassOrScript(generate_labels)
varsArgs = vars(args)
verbosity = varsArgs.pop('verbosity', _util.DEFAULT_VERBOSITY)
_getSharedLogger(verbosity=verbosity).info("Passed arguments: '{}'".format(varsArgs))
generate_labels(**varsArgs)
if __name__ == '__main__':
main()
```
#### File: src/scripts/train.py
```python
import os
import time
import numpy as np
import torch
import torch.utils.data as _data
import tensorboardX
import src.data.data_loader as _data_loader
import src.models.lipreader.better_model as _better_model
import src.train.train_better_model as _train
import src.utils.cmd_line as _cmd
import src.utils.utility as _util
import src.models.lipreader.analysis as _analysis
_logger = None
def _getSharedLogger(verbosity=_util.DEFAULT_VERBOSITY):
global _logger
if _logger is None:
_logger = _util.getLogger(os.path.basename(__file__).split('.')[0], verbosity=verbosity)
return _logger
def _get_datasets(dataset_name, train_split, sentence_dataset,
threshold=0.8,
labels='labels.json', rand=None, refresh=False, include_test=True):
# REVIEW josephz: If we can load from pickles, we should not even do this split. We could have a helper factory thingy?
# Load dataset video IDs and shuffle predictably.
train_ids, val_ids, test_ids = _data_loader.split_dataset(dataset_name, train_split=train_split, rand=rand)
train_dataset = _data_loader.FrameCaptionDataset(dataset_name, 'train', train_ids,
labels=labels, threshold=threshold, sentence_dataset=sentence_dataset, refresh=refresh)
val_dataset = _data_loader.FrameCaptionDataset(dataset_name, 'val', val_ids,
labels=labels, threshold=threshold, sentence_dataset=sentence_dataset, refresh=refresh)
if include_test:
test_dataset = _data_loader.FrameCaptionDataset(dataset_name, 'test', test_ids,
labels=labels, threshold=threshold, sentence_dataset=sentence_dataset, refresh=refresh)
print()
print("Dataset Information:")
print("\tTrain Dataset Size:", len(train_dataset))
print("\tVal Dataset Size:", len(val_dataset))
if include_test:
print("\tTest Dataset Size:", len(test_dataset))
print()
return (train_dataset, val_dataset, test_dataset) if include_test else (train_dataset, val_dataset)
def _init_models(
char2idx,
num_layers,
frame_dim,
hidden_size,
char_dim,
enable_ctc,
rnn_type,
attention_type,
attn_hidden_size,
bidirectional,
rnn_dropout,
device
):
encoder = _better_model.VideoEncoder(frame_dim, hidden_size,
rnn_type=rnn_type, num_layers=num_layers, bidirectional=bidirectional, rnn_dropout=rnn_dropout,
enable_ctc=enable_ctc, vocab_size=len(char2idx), char2idx=char2idx, device=device).to(device)
decoding_step = _better_model.CharDecodingStep(encoder,
char_dim=char_dim, vocab_size=len(char2idx), char2idx=char2idx, rnn_dropout=rnn_dropout, attention_type=attention_type,
attn_hidden_size=attn_hidden_size, device=device).to(device)
return encoder, decoding_step
def restore(net, save_file):
"""Restores the weights from a saved file
This does more than the simple Pytorch restore. It checks that the names
of variables match, and if they don't doesn't throw a fit. It is similar
to how Caffe acts. This is especially useful if you decide to change your
network architecture but don't want to retrain from scratch.
Args:
net(torch.nn.Module): The net to restore
save_file(str): The file path
"""
net_state_dict = net.state_dict()
restore_state_dict = torch.load(save_file)
restored_var_names = set()
print('\tRestoring:')
for var_name in restore_state_dict.keys():
if var_name in net_state_dict:
var_size = net_state_dict[var_name].size()
restore_size = restore_state_dict[var_name].size()
if var_size != restore_size:
print('\t\tShape mismatch for var', var_name, 'expected', var_size, 'got', restore_size)
else:
if isinstance(net_state_dict[var_name], torch.nn.Parameter):
# backwards compatibility for serialized parameters
net_state_dict[var_name] = restore_state_dict[var_name].data
try:
net_state_dict[var_name].copy_(restore_state_dict[var_name])
print(str(var_name) + ' -> \t' + str(var_size) + ' = ' + str(int(np.prod(var_size) * 4 / 10**6)) + 'MB')
restored_var_names.add(var_name)
except Exception as ex:
print('\t\tWhile copying the parameter named {}, whose dimensions in the model are'
' {} and whose dimensions in the checkpoint are {}, ...'.format(
var_name, var_size, restore_size))
raise ex
ignored_var_names = sorted(list(set(restore_state_dict.keys()) - restored_var_names))
unset_var_names = sorted(list(set(net_state_dict.keys()) - restored_var_names))
if len(ignored_var_names) == 0:
print('\t\tRestored all variables')
else:
print('\t\tDid not restore:\n\t' + '\n\t'.join(ignored_var_names))
if len(unset_var_names) == 0:
print('\t\tNo new variables')
else:
print('\t\tInitialized but did not modify:\n\t' + '\n\t'.join(unset_var_names))
print('\tRestored %s' % save_file)
def train(
data="StephenColbert/medium_no_vtx1",
labels="labels.json",
sentence_dataset=False,
occlussion_threshold=0.8,
train_split=0.8,
num_workers=1,
refresh=False,
patience=10,
batch_size=4,
learning_rate=1e-4,
annealings=2,
enable_ctc=False,
grad_norm=50,
tr_epochs=50,
max_tfr=0.9,
min_tfr=0.0,
num_layers=1,
frame_dim=68*3,
hidden_size=700,
char_dim=300,
rnn_type='LSTM',
attention_type='1_layer_nn',
attn_hidden_size=-1,
bidirectional=False,
rnn_dropout=0.0,
seed=123456,
cuda=False,
):
""" Runs the primary training loop.
:param data:
:param labels:
:param sentence_dataset:
:param occlussion_threshold:
:param train_split:
:param num_workers:
:param patience:
:param batch_size:
:param learning_rate:
:param annealings: Number of times to anneal learning rate before training is finished.
:param enable_ctc:
:param max_tfr:
:param grad_norm:
:param num_layers:
:param frame_dim:
:param hidden_size:
:param char_dim:
:param rnn_type:
:param attention_type:
:param attn_hidden_size:
:param bidirectional:
:param rnn_dropout:
:param seed:
:param cuda:
"""
# Setup seed.
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
rand = np.random.RandomState(seed=seed)
# Setup device.
# REVIEW josephz: Is there a clean way to use multiple or different GPUs?
device = torch.device('cuda') if cuda else torch.device('cpu')
print("Device: ", device)
# Init Data.
print("Initializing dataset '{}'".format(data))
train_dataset, val_dataset, test_dataset = _get_datasets(data, train_split, sentence_dataset,
threshold=occlussion_threshold, labels=labels, rand=rand, refresh=refresh, include_test=True)
train_loader = _data.DataLoader(train_dataset, batch_size=batch_size, num_workers=num_workers, collate_fn=_data_loader._collate_fn)
val_loader = _data.DataLoader(val_dataset, batch_size=batch_size, num_workers=num_workers, collate_fn=_data_loader._collate_fn)
test_loader = _data.DataLoader(test_dataset, batch_size=batch_size, num_workers=num_workers, collate_fn=_data_loader._collate_fn)
# Init Models.
print("Initializing model")
encoder, decoding_step = _init_models(train_dataset.char2idx, num_layers, frame_dim, hidden_size, char_dim,
enable_ctc, rnn_type, attention_type, attn_hidden_size, bidirectional, rnn_dropout, device)
# Initialize Logging.
weights_dir = _util.getRelWeightsPath(data, use_existing=False)
tensorboard_writer = tensorboardX.SummaryWriter(weights_dir)
_getSharedLogger().info("Writing Tensorboard logs to '%s'", weights_dir)
print()
print("Try visualizing by running the following:")
print(f"\ttensorboard --logdir='{weights_dir}'")
print("Then open the following URL in your local browser. "
"\n\tIf you're running on a remote machine see `README_TENSORBOARD.md` for help...")
# REVIEW josephz: Multi-input support doesn't seem ready yet: https://github.com/lanpa/tensorboardX/issues/256
# tensorboard_writer.add_graph(encoder,
# torch.autograd.Variable(
# torch.tensor([torch.zeros(batch_size, 100, 68, 3), torch.zeros(batch_size,))))
# tensorboard_writer.add_graph(decoding_step,
# torch.autograd.Variable(
# torch.tensor(torch.zeros(batch_size,), torch.zeros(num_layers, batch_size, hidden_size), torch.zeros(batch_size,), torch.zeros(batch_size, 100,
# hidden_size))))
# Train.
val_cers = []
train_decoder_losses = []
train_ctc_losses = []
best_val_cer = 1.0
best_val_cer_idx = -1
# Initial evaluation
print("Initial evaluation...")
decoder_loss, val_correct, val_count = _train.eval(encoder, decoding_step, val_loader, device, train_dataset.char2idx)
val_cer = (val_count - val_correct).float() / val_count
print("\tCER: ", str(val_cer))
encoder_path = os.path.join(weights_dir, "best_encoder.pth")
decoder_path = os.path.join(weights_dir, "best_decoder.pth")
num_epochs = 0
num_annealings = 0
print("Beginning training loop")
ts = time.time()
while val_cer < best_val_cer or num_annealings < annealings:
print("Epoch {}:".format(num_epochs + 1))
if num_epochs - best_val_cer_idx > patience:
# If the model does not improve after our set 'patience' number of epochs, we will reduce the learning rate.
num_annealings += 1
learning_rate /= 5
print(f'\tAnnealing to {learning_rate}')
restore(encoder, encoder_path)
restore(decoding_step, decoder_path)
# Must set best val CER to here, or else this will also trigger next loop
# if val CER does not go down.
best_val_cer_idx = num_epochs
# Apply linear teacher-forcing ratio decay.
curr_tfr = max(min_tfr, max_tfr - num_epochs / tr_epochs)
assert 0.0 <= curr_tfr <= 1.0
print(f'\tCurrent Teacher Forcing Ratio: {curr_tfr}')
avg_decoder_loss, avg_ctc_loss = _train.train(encoder, decoding_step, train_loader,
opt=torch.optim.Adam(list(encoder.parameters()) + list(decoding_step.parameters()), lr=learning_rate),
device=device,
char2idx=train_dataset.char2idx,
teacher_forcing_ratio=curr_tfr,
grad_norm=grad_norm)
print(f'\tAVG Decoder Loss: {avg_decoder_loss}')
print(f'\tAVG CTC Loss: {avg_ctc_loss}')
tensorboard_writer.add_scalar(os.path.join(data, 'avg decoder loss'), avg_decoder_loss, global_step=num_epochs)
tensorboard_writer.add_scalar(os.path.join(data, 'avg CTC loss'), avg_ctc_loss, global_step=num_epochs)
decoder_loss, val_correct, val_count = _train.eval(encoder, decoding_step, val_loader, device, train_dataset.char2idx)
_, train_correct, train_count = _train.eval(encoder, decoding_step, train_loader, device, train_dataset.char2idx)
val_cer = (val_count - val_correct).float() / val_count
train_cer = (train_count - train_correct).float() / train_count
encoder.save_best_model(val_cer, encoder_path)
decoding_step.save_best_model(val_cer, decoder_path)
print(f'\tTrain CER: {train_cer}')
print(f'\tVal CER: {val_cer}')
# ANALYSIS
encoder.eval()
decoding_step.eval()
with torch.no_grad():
# CER
_, test_correct, test_count = _train.eval(encoder, decoding_step, test_loader, device, train_dataset.char2idx)
test_cer = (test_count - test_correct).float() / test_count
print(f'\tTest CER: {train_cer}')
# Sample teacher forcing output
print('Some teacher-forcing outputs:')
_analysis.print_samples(encoder, decoding_step, test_loader, device, train_dataset.char2idx, max_=10)
# confusion matrix
print('drawing confusion matrix:')
try:
_analysis.get_confusion_matrix(encoder, decoding_step, test_loader, device, test_dataset.char2idx, num_epochs)
except:
print('oops something wrong happened in drawing confusion matrix')
# inference
print('Some student-forcing outputs with beam search:')
for frames, frame_lens, chars, char_lens in test_loader:
frames, frame_lens, chars, char_lens = frames[:2], frame_lens[:2], chars[:2], char_lens[:2]
frames, frame_lens, chars, char_lens = frames.to(device), frame_lens.to(device), chars.to(device), char_lens.to(device)
pred, gt = _analysis.inference(encoder, decoding_step, frames, frame_lens, chars, char_lens, device,
test_dataset.char2idx, beam_width=10, max_label_len=100)
for gt_, pred_ in zip(gt, pred):
print(f'GTL\t: {gt_}')
print(f'Pred\t: {pred_}')
break
tensorboard_writer.add_scalars(os.path.join(data, 'CER'), {"Train": train_cer, "Val": val_cer}, global_step=num_epochs)
tensorboard_writer.add_scalar(os.path.join(data, 'learning rate'), learning_rate, global_step=num_epochs)
val_cers.append(val_cer)
train_decoder_losses.append(avg_decoder_loss)
train_ctc_losses.append(avg_ctc_loss)
if val_cer < best_val_cer:
best_val_cer = val_cer
best_val_cer_idx = num_epochs
num_epochs += 1
te = time.time()
total_time = te - ts
print()
print("Training complete: Took '{}' seconds, or '{}' per epoch".format(total_time, total_time / num_epochs))
print("Training Statistics")
print("\tBest Val CER: '{}'".format(np.min(val_cers)))
print("\tBest Decoder Loss: '{}'".format(np.min(train_decoder_losses)))
print("\tBest CTC Loss: '{}'".format(np.min(train_ctc_losses)))
print()
def main():
global _logger
args = _cmd.parseArgsForClassOrScript(train)
varsArgs = vars(args)
verbosity = varsArgs.pop('verbosity', _util.DEFAULT_VERBOSITY)
_getSharedLogger(verbosity=verbosity).info("Passed arguments: '{}'".format(varsArgs))
train(**varsArgs)
if __name__ == '__main__':
main()
```
#### File: utils/data/caption.py
```python
import os
import re
import collections
import unicodedata
import pycaption
import src.utils.time as _time
import src.utils.utility as _util
_patterns = (
r"\(.*\)",
r"<[^>]*>",
r"\[.*\]",
r"\{.*\}",
r"stephen:",
r">>",
)
_conditions = (
lambda x: len(x.split()) <= 2,
# lambda x: len(x) <= 8,
)
def _getSharedLogger():
return _util.getLogger(os.path.basename(__file__).split('.')[0])
def extract_captions(cap_fname, lang='en-US'):
""" Reads a list of captions and returns an ordered dictionary of {(start_time, end_time) -> "caption"}
with time in units of seconds.
:param cap_fname: VTT subtitle file to read from. Produces Caption sets with text, and times in microseconds.
"""
assert os.path.isfile(cap_fname)
_getSharedLogger().info("Reading captions from '%s'", cap_fname)
reader = pycaption.WebVTTReader()
res = collections.OrderedDict()
with open(cap_fname) as fin:
captions_raw = fin.read()
assert reader.detect(captions_raw), "Malformed file: '{}'".format(cap_fname)
caption_set = reader.read(captions_raw)
assert not caption_set.is_empty(), "Empty VTT file: '{}'".format(cap_fname)
# REVIEW josephz: We'll need to check what other possibilities there are.
assert lang in caption_set.get_languages()
captions = caption_set.get_captions(lang=lang)
assert len(captions) > 0
_getSharedLogger().info("Detected '%s' captions...", len(captions))
for c in captions:
cap_raw = c.get_text()
start = _time.micros_to_sec(c.start)
end = _time.micros_to_sec(c.end)
res[(start, end)] = cap_raw.strip()
assert len(res) == len(captions)
return res
def prune_and_filter_captions(captions, patterns=None, conditions=None, union=True):
""" Cleans a dictionary of time-sequenced captions based on regex patterns to prune invalid tokens within
captions, as well as filtering conditions to delete captions entirely.
i.e. The following regex patterns will match on any characters encapsulated in opening and closing parentheses.
```
patterns = [
r"\(.*\)",
]
```
Furthermore, when any of the following conditions match the caption, it will be deleted.
```
filter = [
lambda x: len(x.split()) <= 2,
lambda x: len(x) <= 8,
]
```
:param captions: Dictionary of captions to prune and filter.
:param patterns: Regex patterns to prune caption tokens. Should be lowercase only.
:param conditions: Boolean conditions to filter captions.
"""
if conditions is None:
conditions = _conditions
if patterns is None:
patterns = _patterns
regex = re.compile("|".join(patterns))
# Remove matched patterns within captions.
for k, cap_raw in captions.items():
cap_raw = regex.sub('', cap_raw).strip()
cap_raw = cap_raw.replace('\n', ' ')
cap_raw = cap_raw.lower()
cap_raw = unicodedata.normalize(u'NFKD', cap_raw).encode('ascii', 'ignore').decode('utf8')
captions[k] = cap_raw
# Filter captions based on caption condition filters.
fn = any if union else all
res = collections.OrderedDict(
(k, cap) for k, cap in captions.items()
if not fn(cond(cap) for cond in conditions))
return res
```
#### File: utils/data/video.py
```python
import os
import imageio
import collections
import numpy as np
class VideoReader:
def __init__(self, vid_path, start=0, seq_len=1, fps=29.97):
""" The VideoReader serves a generator of frame-sequences as needed. To increase performance, sequential
frame access is enforced, and frames are cached into a buffer to allow for quicker repeated and
sequential accesses within the allocated buffer.
TODO: Buffer implementation. Could be implemented by taking a cache_size param for max(caption_size) and
keeping track lo/hi indices for when to update. See `_updateCache` for draft.
Note memory usage may grow significantly with high-resolution video or large cache sizes, calculated by the
following formula:
```
bytes = seq_len * height * width * channels
```
A 1080p (1920x1080) video with sequence length of 30, or approximately 1 second of 30fps footage equates to:
```
30 * 1080 * 1920 * 3 bytes ~ 186MB
```
:param vid_path: Path of the video to read from.
:param start: The starting frame index to begin reading from.
:param seq_len: The length of the sequence of frames to serve.
"""
assert os.path.isfile(vid_path)
reader = imageio.get_reader(vid_path)
vid_len = reader.get_length()
# State.
self.lo = start
self._reader = reader
self.cache = collections.deque(maxlen=seq_len)
self.buf = np.empty(shape=(seq_len,), dtype=np.ndarray)
# For convenience.
self._seq_len = seq_len
self._vid_len = vid_len
self._fps = fps
def get_frame_idx(self, seconds):
return int(seconds * self._fps)
def getNumFrames(self):
return self._vid_len
def genFrames(self, lo, hi):
# assert isinstance(self.buf, np.ndarray) and self.buf.ndim == 1 and len(self.buf) == self._seq_len
# assert isinstance(self.cache, collections.deque) and self.cache.maxlen == len(self.cache) == self._seq_len
# self._updateCache(lo, hi)
# Populate ndarray vector with cache.
# REVIEW josephz: Can this be improved with a buffer?
assert self.lo <= lo <= hi
self.lo = lo
return [self._reader.get_data(x) for x in range(lo, min(hi, self.getNumFrames()))]
# for x in range(lo, hi):
# yield self._reader.get_data(x)
def _updateCache(self, lo, hi):
raise NotImplementedError
# assert isinstance(self.cache, collections.deque) and self.cache.maxlen == self._seq_len
# assert self.lo <= lo < hi < self._vid_len
#
# # The subsequent sequence may jump ahead. If so, we only wish to load the minimum number of
# # frames to catch-up since our previous sequence.
# assert lo + len(self.cache) <= hi + self._seq_len
# cacheRange = range(max(lo + len(self.cache), hi), hi + self._seq_len)
# self.cache.extend(self._reader.get_data(x) for x in cacheRange)
``` |
{
"source": "joseph-zhong/VideoSummarization",
"score": 2
} |
#### File: src/scripts/eval.py
```python
import os
import matplotlib
import seaborn as sns
import torch
from torch import nn
from tqdm import tqdm
sns.set()
matplotlib.use('Agg')
import src.utils.utility as _util
import src.utils.cmd_line as _cmd
import src.data.msrvtt as _data
import src.model.models as _models
import src.train.train_test_utils as _train
from extern.coco_caption.pycocotools.coco import COCO
from src.data.caption import vocab
_logger = _util.get_logger(__file__)
def evaluate(raw: str, dataset: str, mode: str, weights_path: str, batch_size: int = 64, use_cuda: bool = False) -> None:
dataset_dir = _util.get_dataset_by_name(dataset, mode)
raw_dir = _util.get_raw_dataset_by_name(raw, mode)
model, run, args, weights_path = _util.get_params_by_weights_path(weights_path)
a_feature_size = int(args["a_feature_size"])
projected_size = int(args["projected_size"])
mid_size = int(args["mid_size"])
hidden_size = int(args["hidden_size"])
max_frames = int(args["max_frames"])
max_words = int(args["max_words"])
banet = _models.BANet(a_feature_size, projected_size, mid_size, hidden_size, max_frames, max_words, use_cuda=use_cuda)
pretrained_path = os.path.join(weights_path, "weights.pth")
weights = torch.load(pretrained_path)
banet.load_state_dict(weights)
if use_cuda:
banet.cuda()
print("Computing metrics...")
eval_loader = _data.get_eval_dataloader(dataset, mode, batch_size=batch_size)
test_reference_txt_path = os.path.join(dataset_dir, 'reference.json')
test_prediction_txt_path = os.path.join(dataset_dir, 'prediction.txt')
reference = COCO(test_reference_txt_path)
_train.eval_step(eval_loader, banet, test_prediction_txt_path, reference, use_cuda=use_cuda)
# Must switch to a new loder which provides captions.
eval_loader = _data.get_dataloader(dataset, mode, batch_size=batch_size)
for i, (videos, captions, cap_lens, video_ids) in tqdm(enumerate(eval_loader, start=1), total=len(eval_loader)):
if use_cuda:
videos = videos.cuda()
video_encoded = banet.encoder(videos)
tokens = banet.decoder.sample(video_encoded)
# vid_paths = [os.path.join(raw_dir, "{}.mp4".format(video_id)) for video_id in video_ids]
for j in range(len(tokens)):
# vid = imageio.get_reader(vid_paths[j]).iter_data()
print('[vid_id={}]'.format(video_ids[j]))
print("gt :", vocab().decode(captions[j]))
print("pred:", vocab().decode(tokens.data[j].squeeze()))
print()
# First few frames are black sometimes
# next(vid)
# next(vid)
# next(vid)
# next(vid)
# plt.imshow(next(vid))
def main():
global _logger
args = _cmd.parseArgsForClassOrScript(evaluate)
varsArgs = vars(args)
verbosity = varsArgs.pop('verbosity', _util.DEFAULT_VERBOSITY)
_logger.info("Passed arguments: '{}'".format(varsArgs))
evaluate(**varsArgs)
if __name__ == '__main__':
main()
```
#### File: src/scripts/train.py
```python
import os
import pickle
import shutil
import inspect
import torch
import numpy as np
import tensorboard_logger as _tb_logger
import tqdm
import src.data.msrvtt as _data
import src.model.models as _models
import src.train.train_test_utils as _train
import src.utils.utility as _util
import src.utils.cmd_line as _cmd
from src.data.caption import Vocabulary, Token, vocab
from extern.coco_caption.pycocotools.coco import COCO
_logger = _util.get_logger(__file__)
def train(
# General training hyperparameters.
dataset: str,
num_epochs: int=100,
batch_size: int=128,
# Learning rate schedulers.
learning_rate: float=3e-4,
ss_factor: int=24,
min_ss: float=0.6,
# Representation hyperparameters.
projected_size: int=500,
hidden_size: int=1024, # Hidden size of the recurrent cells.
mid_size: int=128, # Dimension of the boundary detection layer.
# REVIEW josephz: Remove this?
# frame_shape: tuple=(3, 224, 224), # Video frame shape.
a_feature_size: int=2048, # Appearance model feature-dimension size.
# REVIEW josephz: Remove this?
# m_feature_size=4096, # Motion model feature-dimension size.
# Maximum-size hyperparameters.
# frame_sample_rate: int=10, # Sample rate of video frames.
max_frames: int=30, # Maximum length of the video-frame sequence.
max_words: int=30, # Maximum length of the caption-word sequence.
# Misc hyperparameters.
ckpt_freq: int=3,
use_cuda: bool=False,
use_ckpt: bool=False,
use_argmax: bool=False,
seed: int=0,
):
"""
Args:
dataset (str): Dataset to train on.
num_epochs (int): Number of epochs to train for.
batch_size (int): Batch size to train with.
learning_rate (float): Learning rate.
ss_factor (int): Scheduled Sampling factor, to compute a teacher-forcing ratio.
min_ss (float): Minimum teacher-forcing ratio.
projected_size (int): Projection size for the Encoder-Decoder model.
hidden_size (int): Hidden state size for the recurrent network in the encoder.
mid_size (int): Hidden state size for the Boundary Detector network in the encoder.
a_feature_size: Input feature size for the Encoder network.
max_frames (int): Maximum length of the video-frame sequence.
max_words (int): Maximum length of the caption-word sequence.
ckpt_freq (int): Frequency to compute evaluation metrics and save checkpoint.
use_cuda (bool): Flag whether to use CUDA devices.
use_ckpt (bool): Flag on whether to load checkpoint if possible.
use_argmax (bool): Flag on whether to use greedy or multinomial sampling during decoding.
seed (int): Random seed.
Effects:
We will have several outputs:
- Checkpoints (model weights)
- Logs (tensorboard logs)
"""
# Set seeds.
torch.random.manual_seed(seed)
np.random.seed(seed)
# Prepare output paths.
# REVIEW josephz: This is unbelievably hacky, but we want an easy way to allow the user to set and track
# hyperparameters using the cmd_line interface? This should probably be abstracted in utility.py.
hparams = locals()
params = {arg_name: hparams[arg_name] for arg_name in inspect.signature(train).parameters.keys()}
ckpt_path = _util.get_weights_path_by_param(reuse=False, **params)
print("Saving checkpoints to '{ckpt_path}', you may visualize in tensorboard with the following: \n\n\t`tensorboard --logdir={ckpt_path}`\n".format(
ckpt_path=ckpt_path))
# Setup logging paths.
log_path = os.path.join(ckpt_path, 'logs')
_util.mkdir(log_path)
_tb_logger.configure(log_path, flush_secs=10)
# REVIEW josephz: Todo, clean this up.
banet_pth_path_fmt = os.path.join(ckpt_path, '{:04d}_{:04d}.pth')
best_banet_pth_path = os.path.join(ckpt_path, 'weights.pth')
optimizer_pth_path = os.path.join(ckpt_path, 'optimizer.pth')
best_optimizer_pth_path = os.path.join(ckpt_path, 'best_optimizer.pth')
# Load Vocabulary.
vocab_size = len(vocab())
# Load Reference for COCO.
# val_dir = _util.get_dataset_by_name(dataset, mode='val')
# val_reference_txt_path = os.path.join(val_dir, 'reference.json')
# val_prediction_txt_path = os.path.join(val_dir, 'prediction.txt')
# reference = COCO(val_reference_txt_path)
eval_mode = 'val'
eval_dir = _util.get_dataset_by_name(dataset, mode=eval_mode)
test_reference_txt_path = os.path.join(eval_dir, 'reference.json')
test_prediction_txt_path = os.path.join(eval_dir, 'prediction.txt')
reference = COCO(test_reference_txt_path)
print("Evaluating on '{}'".format(eval_dir))
# Initialize the model.
banet = _models.BANet(
a_feature_size, projected_size, mid_size, hidden_size, max_frames, max_words,
use_cuda=use_cuda)
# Load model weights if possible.
if use_ckpt:
pretrained_path = os.path.join(_util.get_raw_dataset_by_name('MSRVTT'), 'pretrained_weights.pth')
weights = torch.load(pretrained_path)
# REVIEW josephz: Figure out how to do the decoder weights partially:
# https://discuss.pytorch.org/t/how-to-load-part-of-pre-trained-model/1113/6
del weights['decoder.word_embed.weight']
del weights['decoder.word_restore.bias']
del weights['decoder.word_restore.weight']
banet.load_state_dict(weights, strict=False)
if use_cuda:
banet.cuda()
# Initialize loss and optimizer.
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(banet.parameters(), lr=learning_rate)
if os.path.exists(optimizer_pth_path) and use_ckpt:
optimizer.load_state_dict(torch.load(optimizer_pth_path))
# Initialize Dataloaders.
train_loader = _data.get_train_dataloader(dataset, batch_size=batch_size)
eval_loader = _data.get_eval_dataloader(dataset, eval_mode, batch_size=batch_size)
num_train_steps = len(train_loader)
num_eval_steps = len(eval_loader)
# Begin Training Loop.
print("Training Configuration:")
print("\tLearning Rate: '{0:.4f}'".format(learning_rate))
print("\tScheduled Sampling:")
print("\t\tMax Teacher Forcing Rate: '{0:.4f}'".format(min_ss))
print("\t\tScheduled Factor: '{0:.4f}'".format(ss_factor))
print("\tBatch Size: '{}'".format(batch_size))
print("\tEpochs: '{}'".format(num_epochs))
print("\tDataset: '{}'".format(dataset))
print("\tCheckpoint Path: '{}'".format(ckpt_path))
best_meteor = 0
loss_count = 0
for epoch in range(num_epochs):
epsilon = max(min_ss, ss_factor / (ss_factor + np.exp(epoch / ss_factor)))
print('epoch:%d\tepsilon:%.8f' % (epoch, epsilon))
_tb_logger.log_value('epsilon', epsilon, epoch)
for i, (videos, captions, cap_lens, video_ids) in tqdm.tqdm(enumerate(train_loader, start=1), total=num_train_steps):
if use_cuda:
videos = videos.cuda()
targets = captions.cuda()
else:
targets = captions
# Zero the gradients and run the encoder-decoder model.
optimizer.zero_grad()
outputs, video_encoded = banet(videos, targets, teacher_forcing_ratio=epsilon, use_argmax=use_argmax)
# NOTE: Usually the last batch is less than the selected batch_size, so we dynamically
# compute the correct batch_size to use here, rather than throwing away the last
# training batch.
bsz = len(targets)
# Un-pad and flatten the outputs and labels.
outputs = torch.cat([outputs[j][:cap_lens[j]] for j in range(bsz)], dim=0)
targets = torch.cat([targets[j][:cap_lens[j]] for j in range(bsz)], dim=0)
outputs = outputs.view(-1, vocab_size)
targets = targets.view(-1)
# Compute loss for back-propagation.
# assert all(targets > 0) and all(outputs > 0)
loss = criterion(outputs, targets)
loss_val = loss.item()
_tb_logger.log_value('loss', loss_val, epoch * num_train_steps + i)
loss_count += loss_val
# REVIEW josephz: Is there grad_norm?
loss.backward()
optimizer.step()
eval_steps = 25
if i % eval_steps == 0 or bsz < batch_size:
loss_count /= eval_steps if bsz == batch_size else i % eval_steps
perplexity = np.exp(loss_count)
print('Epoch [%d/%d]:\n\tStep [%d/%d]\n\tLoss: %.4f\n\tPerplexity: %5.4f' %
(epoch, num_epochs, i, num_train_steps, loss_count, perplexity))
_tb_logger.log_value('perplexity', perplexity, epoch * num_train_steps + i)
loss_count = 0
tokens = banet.decoder.sample(video_encoded)
for j in range(5):
we = vocab().decode(tokens.data[j].squeeze())
gt = vocab().decode(captions[j].squeeze())
print('\t\t[vid_id={}]'.format(video_ids[j]))
print('\t\t\tWE: %s\n\t\t\tGT: %s' % (we, gt))
# Finally, compute evaluation metrics and save the best models.
if epoch % ckpt_freq == 0:
# Save epoch checkpoint.
banet_pth_path = banet_pth_path_fmt.format(epoch, num_epochs)
print("Saving checkpoints to '{}'".format(banet_pth_path))
torch.save(banet.state_dict(), banet_pth_path)
torch.save(optimizer.state_dict(), optimizer_pth_path)
# Compute evaluation.
banet.eval()
print("Computing Metrics:...")
metrics = _train.eval_step(eval_loader, banet, test_prediction_txt_path, reference, use_cuda=use_cuda)
for k, v in metrics.items():
_tb_logger.log_value(k, v, epoch)
if k == 'METEOR' and v > best_meteor:
# Save the best model based on the METEOR metric.
# For reference, see https://www.cs.cmu.edu/~alavie/papers/BanerjeeLavie2005-final.pdf
print("Saving best checkpoint of metric: '{}'".format(best_meteor))
shutil.copy2(banet_pth_path, best_banet_pth_path)
shutil.copy2(optimizer_pth_path, best_optimizer_pth_path)
best_meteor = v
banet.train()
def main():
global _logger
args = _cmd.parseArgsForClassOrScript(train)
varsArgs = vars(args)
verbosity = varsArgs.pop('verbosity', _util.DEFAULT_VERBOSITY)
_logger.info("Passed arguments: '{}'".format(varsArgs))
train(**varsArgs)
if __name__ == '__main__':
main()
``` |
{
"source": "josephzxy/py-vsys",
"score": 3
} |
#### File: py-vsys/py_vsys/chain.py
```python
from __future__ import annotations
import enum
from typing import Dict, Any, TYPE_CHECKING, List
# https://stackoverflow.com/a/39757388
if TYPE_CHECKING:
from py_vsys import api
class ChainID(enum.Enum):
"""
ChainID is the enum class for chain ID.
"""
MAIN_NET = "M"
TEST_NET = "T"
class Chain:
"""
Chain is the class for the narrowly-defined chain.
It contains handy methods for querying chain-related data(e.g. height, last block, etc).
"""
def __init__(self, node_api: api.NodeAPI, chain_id: ChainID = ChainID.TEST_NET):
"""
Args:
node_api (api.NodeAPI): The NodeAPI object the chain uses.
chain_id (ChainID, optional): The chain's ID. Defaults to ChainID.TEST_NET.
"""
self._api = node_api
self._chain_id = chain_id
@property
def api(self) -> api.NodeAPI:
"""
api returns the NodeAPI object that the chain uses.
Returns:
api.NodeAPI: The NodeAPI object that the chain uses.
"""
return self._api
@property
def chain_id(self) -> ChainID:
"""
chain_id returns the ID of the chain.
Returns:
ChainID: ID of the chain.
"""
return self._chain_id
@property
async def height(self) -> int:
"""
height queries & returns the height of the chain.
Returns:
int: The height of the chain.
"""
resp = await self.api.blocks.get_height()
return resp["height"]
@property
async def last_block(self) -> Dict[str, Any]:
"""
last_block queries & returns the last_block of the chain.
Returns:
Dict[str, Any]: The last block data of the chain.
"""
return await self.api.blocks.get_last()
async def get_block_at(self, height: int) -> Dict[str, Any]:
"""
get_block_at gets the block at the given height.
Args:
height (int): The height of the block.
Returns:
Dict[str, Any]: The block.
"""
return await self.api.blocks.get_block_at(height)
async def get_blocks_within(
self, start_height: int, end_height: int
) -> List[Dict[str, Any]]:
"""
get_blocks_within gets blocks fall in the given range.
NOTE that the max length of the range is 100.
Args:
start_height (int): The start height.
end_height (int): The end height.
Returns:
List[Dict[str, Any]]: The blocks.
"""
return await self.api.blocks.get_blocks_within(start_height, end_height)
```
#### File: py_vsys/contract/lock_ctrt.py
```python
from __future__ import annotations
from typing import TYPE_CHECKING, Dict, Any, Optional
from loguru import logger
# https://stackoverflow.com/a/39757388
if TYPE_CHECKING:
from py_vsys import chain as ch
from py_vsys import account as acnt
from py_vsys import data_entry as de
from py_vsys import tx_req as tx
from py_vsys import model as md
from py_vsys.contract import tok_ctrt_factory as tcf
from . import CtrtMeta, Ctrt, BaseTokCtrt
class LockCtrt(Ctrt):
"""
LockCtrt is the class for VSYS Lock contract.
"""
CTRT_META = CtrtMeta.from_b58_str(
"<KEY>"
)
class FuncIdx(Ctrt.FuncIdx):
"""
FuncIdx is the enum class for function indexes of a contract.
"""
LOCK = 0
class StateVar(Ctrt.StateVar):
"""
StateVar is the enum class for state variables of a contract.
"""
MAKER = 0
TOKEN_ID = 1
class StateMapIdx(Ctrt.StateMapIdx):
"""
StateMapIdx is the enum class for state map indexes.
"""
CONTRACT_BALANCE = 0
CONTRACT_LOCK_TIME = 1
class DBKey(Ctrt.DBKey):
"""
DBKey is the class for DB key of a contract used to query data.
"""
@classmethod
def for_maker(cls) -> LockCtrt.DBKey:
"""
for_maker returns the LockCtrt.DBKey object for querying the maker.
Returns:
LockCtrt.DBKey: The LockCtrt.DBKey object.
"""
b = LockCtrt.StateVar.MAKER.serialize()
return cls(b)
@classmethod
def for_token_id(cls) -> LockCtrt.DBKey:
"""
for_token_id returns the LockCtrt.DBKey object for querying the token_id.
Returns:
LockCtrt.DBKey: The LockCtrt.DBKey object.
"""
b = LockCtrt.StateVar.TOKEN_ID.serialize()
return cls(b)
@classmethod
def for_contract_balance(cls, addr: str) -> LockCtrt.DBKey:
"""
for_contract_balance returns the LockCtrt.DBKey object for querying the contract balance.
Args:
addr (str): The account address.
Returns:
LockCtrt.DBKey: The LockCtrt.DBKey object.
"""
b = LockCtrt.StateMap(
idx=LockCtrt.StateMapIdx.CONTRACT_BALANCE,
data_entry=de.Addr(md.Addr(addr)),
).serialize()
return cls(b)
@classmethod
def for_contract_lock_time(cls, addr: str) -> LockCtrt.DBKey:
"""
for_contract_lock_time returns the LockCtrt.DBKey object for querying the contract lock time.
Args:
addr (str): The account address.
Returns:
LockCtrt.DBKey: The LockCtrt.DBKey object.
"""
b = LockCtrt.StateMap(
idx=LockCtrt.StateMapIdx.CONTRACT_LOCK_TIME,
data_entry=de.Addr(md.Addr(addr)),
).serialize()
return cls(b)
def __init__(self, ctrt_id: str, chain: ch.Chain) -> None:
"""
Args:
ctrt_id (str): The id of the contract.
chain (ch.Chain): The object of the chain where the contract is on.
"""
super().__init__(ctrt_id, chain)
self._tok_id: Optional[md.TokenID] = None
self._tok_ctrt: Optional[BaseTokCtrt] = None
@classmethod
async def register(
cls,
by: acnt.Account,
tok_id: str,
ctrt_description: str = "",
fee: int = md.RegCtrtFee.DEFAULT,
) -> LockCtrt:
"""
register registers a Lock Contract
Args:
by (acnt.Account): The action taker.
tok_id (str): The id of the token to lock.
ctrt_description (str, optional): The description of the contract. Defaults to "".
fee (int, optional): The fee to pay for this action. Defaults to md.RegCtrtFee.DEFAULT.
Returns:
LockCtrt: The LockCtrt object of the registered Lock contract.
"""
data = await by._register_contract(
tx.RegCtrtTxReq(
data_stack=de.DataStack(
de.TokenID(md.TokenID(tok_id)),
),
ctrt_meta=cls.CTRT_META,
timestamp=md.VSYSTimestamp.now(),
description=md.Str(ctrt_description),
fee=md.RegCtrtFee(fee),
)
)
logger.debug(data)
return cls(
data["contractId"],
chain=by.chain,
)
@property
async def maker(self) -> md.Addr:
"""
maker queries & returns the maker of the contract.
Returns:
md.Addr: The address of the maker of the contract.
"""
raw_val = await self._query_db_key(self.DBKey.for_maker())
return md.Addr(raw_val)
@property
async def tok_id(self) -> md.TokenID:
"""
tok_id queries & returns the token_id of the contract.
Returns:
md.TokenID: The token_id of the contract.
"""
if not self._tok_id:
raw_val = await self._query_db_key(self.DBKey.for_token_id())
self._tok_id = md.TokenID(raw_val)
return self._tok_id
@property
async def tok_ctrt(self) -> BaseTokCtrt:
"""
tok_ctrt returns the token contract instance for the token used in the contract.
Returns:
BaseTokCtrt: The token contract instance.
"""
if not self._tok_ctrt:
tok_id = await self.tok_id
self._tok_ctrt = await tcf.from_tok_id(tok_id, self.chain)
return self._tok_ctrt
@property
async def unit(self) -> int:
"""
unit returns the unit of the token specified in this contract.
Returns:
int: The token unit.
"""
tc = await self.tok_ctrt
return await tc.unit
async def get_ctrt_bal(self, addr: str) -> md.Token:
"""
get_ctrt_bal queries & returns the balance of the token within this contract
belonging to the user address.
Args:
addr (str): The account address.
Returns:
md.Token: The balance of the token.
"""
raw_val = await self._query_db_key(self.DBKey.for_contract_balance(addr))
unit = await self.unit
return md.Token(data=raw_val, unit=unit)
async def get_ctrt_lock_time(self, addr: str) -> md.VSYSTimestamp:
"""
get_ctrt_lock_time queries & returns the lock time of the token locked in this contract
belonging to the user address.
Args:
addr (str): The account address.
Returns:
md.VSYSTimestamp: The lock time of the token in Unix timestamp.
"""
raw_val = await self._query_db_key(self.DBKey.for_contract_lock_time(addr))
return md.VSYSTimestamp(raw_val)
async def lock(
self,
by: acnt.Account,
expire_at: int,
attachment: str = "",
fee: int = md.ExecCtrtFee.DEFAULT,
) -> Dict[str, Any]:
"""
lock locks the user's deposited tokens in the contract until the given timestamp.
Args:
by (acnt.Account): The action taker.
expire_at (int): Unix timestamp. When the lock will expire.
Returns:
Dict[str, Any]: The response returned by the Node API
"""
data = await by._execute_contract(
tx.ExecCtrtFuncTxReq(
ctrt_id=self._ctrt_id,
func_id=self.FuncIdx.LOCK,
data_stack=de.DataStack(
de.Timestamp(md.VSYSTimestamp.from_unix_ts(expire_at)),
),
timestamp=md.VSYSTimestamp.now(),
attachment=md.Str(attachment),
fee=md.ExecCtrtFee(fee),
)
)
logger.debug(data)
return data
```
#### File: py-vsys/py_vsys/model.py
```python
from __future__ import annotations
import abc
import time
from typing import Any, NamedTuple, Union
import base58
from py_vsys import chain as ch
from py_vsys import words as wd
from py_vsys.utils.crypto import hashes as hs
class Model(abc.ABC):
"""
Model is the base class for data models that provides self-validation methods
and other handy methods(e.g. converts bytes to base58 string).
NOTE that the validate() method is deliberately called within the constructor so as
to avoid accidental malformed data as much as possible.
"""
def __init__(self, data: Any) -> None:
"""
Args:
data (Any): The data to contain.
"""
self.data = data
self.validate()
@abc.abstractmethod
def validate(self) -> None:
"""
validate validates the containing data.
"""
def __str__(self) -> str:
"""
E.g. Str('hello')
"""
cls_name = self.__class__.__name__
return f"{cls_name}({self.data})"
__repr__ = __str__
def __eq__(self, other: Model) -> bool:
return self.__class__ == other.__class__ and self.data == other.data
class Bytes(Model):
"""
Bytes is the data model for bytes.
"""
def __init__(self, data: bytes = b"") -> None:
"""
Args:
data (bytes, optional): The data to contain. Defaults to b"".
"""
self.data = data
self.validate()
@property
def b58_str(self) -> str:
"""
b58_str returns the base58 string representation of the containing data.
Returns:
str: The base58 string representation.
"""
return base58.b58encode(self.data).decode("latin-1")
def validate(self) -> None:
cls_name = self.__class__.__name__
if not isinstance(self.data, bytes):
raise TypeError(f"Data in {cls_name} must be bytes")
@classmethod
def from_b58_str(cls, s: str) -> Bytes:
"""
from_b58_str creates a Bytes object from the given base58-encoded string.
Args:
s (str): the input base58 string.
Returns:
Bytes: the Bytes instance.
"""
return cls(base58.b58decode(s))
@classmethod
def from_str(cls, s: str) -> Bytes:
"""
from_str creates a Bytes object from the given string.
Args:
s (str): the input string.
Returns:
Bytes: the Bytes instance.
"""
return cls(s.encode("latin-1"))
class Str(Model):
"""
Str is the data model for string.
"""
def __init__(self, data: str = "") -> None:
"""
Args:
data (str, optional): The data to contain. Defaults to "".
"""
self.data = data
self.validate()
@classmethod
def from_bytes(cls, b: bytes) -> Str:
"""
from_bytes parses the given bytes and creates a Str.
Args:
b (bytes): The bytes to parse.
Returns:
Str: The Str instance.
"""
return cls(b.decode("latin-1"))
@property
def bytes(self) -> bytes:
"""
bytes returns the bytes representation of the containing data.
Returns:
bytes: The bytes representation.
"""
return self.data.encode("latin-1")
@property
def b58_str(self) -> str:
"""
b58_str returns the base58 string representation of the containing data.
Returns:
str: The base58 string representation.
"""
return base58.b58encode(self.data).decode("latin-1")
def validate(self) -> None:
cls_name = self.__class__.__name__
if not isinstance(self.data, str):
raise TypeError(f"Data in {cls_name} must be a str")
class Seed(Str):
WORD_CNT = 15
def validate(self) -> None:
super().validate()
cls_name = self.__class__.__name__
words = self.data.split(" ")
if len(words) != self.WORD_CNT:
raise ValueError(
f"Data in {cls_name} must consist exactly {self.WORD_CNT} words"
)
for w in words:
if not w in wd.WORDS_SET:
raise ValueError(f"Data in {cls_name} contains invalid words")
class B58Str(Str):
"""
B58Str is the data model for base58 string.
"""
@classmethod
def from_bytes(cls, b: bytes) -> B58Str:
"""
from_bytes parses the given bytes and creates a B58Str.
Args:
b (bytes): The bytes to parse.
Returns:
B58Str: The B58Str instance.
"""
return cls(base58.b58encode(b).decode("latin-1"))
@property
def bytes(self) -> bytes:
"""
bytes returns the bytes representation of the containing data.
Returns:
bytes: The bytes representation.
"""
return base58.b58decode(self.data)
def validate(self) -> None:
super().validate()
cls_name = self.__class__.__name__
try:
self.bytes
except ValueError:
raise ValueError(f"Data in {cls_name} must be base58-decodable")
class FixedSizeB58Str(B58Str):
"""
FixedSizeB58Str is the data model for fixed-size base58 string.
"""
BYTES_LEN = 0
def validate(self) -> None:
super().validate()
cls_name = self.__class__.__name__
if not len(self.bytes) == self.BYTES_LEN:
raise ValueError(
f"Data in {cls_name} must be exactly {self.BYTES_LEN} bytes after base58 decode"
)
class Addr(FixedSizeB58Str):
"""
Addr is the data model for an address.
"""
VER = 5
VER_BYTES_LEN = 1
CHAIN_ID_BYTES_LEN = 1
PUB_KEY_HASH_BYTES_LEN = 20
CHECKSUM_BYTES_LEN = 4
BYTES_LEN = (
VER_BYTES_LEN + CHAIN_ID_BYTES_LEN + PUB_KEY_HASH_BYTES_LEN + CHECKSUM_BYTES_LEN
)
@property
def version(self) -> int:
"""
version returns the version of the address.
Returns:
int: The version.
"""
return self.bytes[0]
@property
def chain_id(self) -> str:
"""
chain_id returns the chain ID of the address.
Returns:
str: The chain ID.
"""
return chr(self.bytes[1])
@property
def pub_key_hash(self) -> bytes:
"""
pub_key_hash returns the hash of the public key of the address.
Returns:
bytes: The hash.
"""
prev_len = self.VER_BYTES_LEN + self.CHAIN_ID_BYTES_LEN
b = self.bytes[prev_len:]
return b[: self.PUB_KEY_HASH_BYTES_LEN]
@property
def checksum(self) -> bytes:
"""
checksum returns the checksum of the address.
Returns:
bytes: The checksum.
"""
return self.bytes[-self.CHECKSUM_BYTES_LEN :]
def must_on(self, chain: ch.Chain):
"""
must_on asserts that the address must be on the given chain.
Args:
chain (ch.Chain): The chain object.
Raises:
ValueError: If the address is not on the given chain.
"""
if self.chain_id != chain.chain_id.value:
raise ValueError(
f"Addr is not on the chain. The Addr has chain_id '{self.chain_id}' while the chain expects '{chain.chain_id.value}'"
)
def validate(self) -> None:
super().validate()
cls_name = self.__class__.__name__
if self.version != self.VER:
raise ValueError(f"Data in {cls_name} has invalid address version")
chain_id_valid = any([self.chain_id == c.value for c in ch.ChainID])
if not chain_id_valid:
raise ValueError(f"Data in {cls_name} has invalid chain_id")
def ke_bla_hash(b: bytes) -> bytes:
return hs.keccak256_hash(hs.blake2b_hash(b))
cl = self.CHECKSUM_BYTES_LEN
if self.checksum != ke_bla_hash(self.bytes[:-cl])[:cl]:
raise ValueError(f"Data in {cls_name} has invalid checksum")
@classmethod
def from_bytes_md(cls, b: Bytes) -> Addr:
"""
from_bytes_md contructs an Addr object from the given Bytes object.
Args:
b (Bytes): The given Bytes object.
Returns:
Addr: The Addr object.
"""
return cls(b.b58_str)
class CtrtID(FixedSizeB58Str):
"""
CtrtID is the data model for contract ID.
"""
BYTES_LEN = 26
class TokenID(FixedSizeB58Str):
"""
TokenID is the data model for token ID.
"""
BYTES_LEN = 30
MAINNET_VSYS_TOK_ID = "TWatCreEv7ayv6iAfLgke6ppVV33kDjFqSJn8yicf"
TESTNET_VSYS_TOK_ID = "TWuKDNU1SAheHR99s1MbGZLPh1KophEmKk1eeU3mW"
@property
def is_vsys_tok(self) -> bool:
return self.is_testnet_vsys_tok or self.is_mainnet_vsys_tok
@property
def is_mainnet_vsys_tok(self) -> bool:
return self.data == self.MAINNET_VSYS_TOK_ID
@property
def is_testnet_vsys_tok(self) -> bool:
return self.data == self.TESTNET_VSYS_TOK_ID
class TXID(FixedSizeB58Str):
"""
TXID is the data model for transaction ID.
"""
BYTES_LEN = 32
class PubKey(FixedSizeB58Str):
"""
PubKey is the data model for public key.
"""
BYTES_LEN = 32
class PriKey(FixedSizeB58Str):
"""
PriKey is the data model for private key.
"""
BYTES_LEN = 32
class Int(Model):
"""
Int is the data model for an integer.
"""
def __init__(self, data: int = 0) -> None:
"""
Args:
data (int, optional): The data to contain. Defaults to 0.
"""
self.data = data
self.validate()
def validate(self) -> None:
cls_name = self.__class__.__name__
if not isinstance(self.data, int):
raise TypeError(f"Data in {cls_name} must be an int")
class NonNegativeInt(Int):
"""
NonNegativeInt is the data model for a non-negative integer.
"""
def validate(self) -> None:
super().validate()
cls_name = self.__class__.__name__
if not self.data >= 0:
raise ValueError(f"Data in {cls_name} must be non negative")
class TokenIdx(NonNegativeInt):
"""
TokenIdx is the data model for token index.
"""
pass
class Nonce(NonNegativeInt):
"""
Nonce is the data model for nonce (used with seed for an account).
"""
pass
class VSYSTimestamp(NonNegativeInt):
"""
VSYSTimestamp is the data model for the timestamp used in VSYS.
"""
SCALE = 1_000_000_000
@classmethod
def from_unix_ts(cls, ux_ts: Union[int, float]) -> VSYSTimestamp:
"""
from_unix_ts creates a new VSYSTimestamp from the given UNIX timestamp at seconds.
Args:
ux_ts (Union[int, float]): The UNIX timestamp.
Raises:
TypeError: If the type of the given UNIX timestamp is neither int nor float.
ValueError: If the given UNIX timestamp is not positive.
Returns:
VSYSTimestamp: The VSYSTimestamp.
"""
if not (isinstance(ux_ts, int) or isinstance(ux_ts, float)):
raise TypeError("ux_ts must be an int or float")
return cls(int(ux_ts * cls.SCALE))
@classmethod
def now(cls) -> VSYSTimestamp:
"""
now creates a new VSYSTimestamp for current time.
Returns:
VSYSTimestamp: The VSYSTimestamp.
"""
return cls(int(time.time() * cls.SCALE))
@property
def unix_ts(self) -> float:
return self.data / self.SCALE
def validate(self) -> None:
super().validate()
cls_name = self.__class__.__name__
if not (self.data == 0 or self.data >= self.SCALE):
raise ValueError(
f"Data in {cls_name} must be either be 0 or equal or greater than {self.SCALE}"
)
class Token(NonNegativeInt):
"""
Token is the data model for tokens.
"""
def __init__(self, data: int = 0, unit: int = 0) -> None:
"""
Args:
data (int, optional): The data to contain. Defaults to 0.
unit (int, optional): The unit of the token. Defaults to 0.
"""
super().__init__(data)
self.unit = unit
@classmethod
def one(cls) -> Token:
"""
one creates a new Token where the amount is equal to ONE.
Returns:
Token: The Token.
"""
return cls.for_amount(1)
@property
def amount(self) -> float:
"""
amount returns the amount of Token the Token object represents.
Returns:
float: The amount of Token.
"""
return self.data / self.unit
@classmethod
def for_amount(cls, amount: Union[int, float], unit: int) -> Token:
"""
for_amount creates a new Token where the amount is equal to the given amount.
Args:
amount (Union[int, float]): The amount.
Returns:
Token: The Token.
"""
data = amount * unit
if int(data) < data:
raise ValueError(
f"Invalid amount for {cls.__name__}: {amount}. The minimal valid amount granularity is {1 / unit}"
)
return cls(int(data), unit)
class VSYS(NonNegativeInt):
"""
VSYS is the data model for VSYS(the native token on VSYS blockchain).
"""
UNIT = 1_00_000_000
@property
def amount(self) -> float:
"""
amount returns the amount of VSYS coins the VSYS object represents.
Returns:
float: The amount of VSYS coins.
"""
return self.data / self.UNIT
@classmethod
def for_amount(cls, amount: Union[int, float]) -> VSYS:
"""
for_amount creates a new VSYS where the amount is equal to the given amount.
Args:
amount (Union[int, float]): The amount.
Returns:
VSYS: The VSYS.
"""
data = amount * cls.UNIT
if int(data) < data:
raise ValueError(
f"Invalid amount for {cls.__name__}: {amount}. The minimal valid amount granularity is {1 / cls.UNIT}"
)
return cls(int(data))
def __mul__(self, factor: Union[int, float]) -> VSYS:
"""
__mul__ defines the behaviour of the '*' operator.
E.g.
v1 = VSYS.for_amount(1)
v20 = v1 * 20
v2 = v20 * 0.1
Args:
factor (Union[int, float]): The factor to multiply.
Returns:
VSYS: The result of the multiplication.
"""
return self.__class__(int(self.data * factor))
class Fee(VSYS):
"""
Fee is the data model for transaction fee.
"""
DEFAULT = int(VSYS.UNIT * 0.1)
def __init__(self, data: int = 0) -> None:
"""
Args:
data (int, optional): The data to contain. Defaults to VSYS.UNIT * 0.1.
"""
if data == 0:
data = self.DEFAULT
super().__init__(data)
def validate(self) -> None:
super().validate()
cls_name = self.__class__.__name__
if not self.data >= self.DEFAULT:
raise ValueError(
f"Data in {cls_name} must be equal or greater than {self.DEFAULT}"
)
class PaymentFee(Fee):
"""
PaymentFee is the data model for the fee of a transaction where the type is Payment.
"""
pass
class LeasingFee(Fee):
"""
LeasingFee is the data model for the fee of a transaction where the type is Leasing.
"""
pass
class LeasingCancelFee(Fee):
"""
LeasingCancelFee is the data model for the fee of a transaction where the type is Leasing Cancel.
"""
pass
class RegCtrtFee(Fee):
"""
RegCtrtFee is the data model for the fee of a transaction where the type is Register Contract.
"""
DEFAULT = VSYS.UNIT * 100
class ExecCtrtFee(Fee):
"""
ExecCtrtFee is the data model for the fee of a transaction where the type is Execute Contract.
"""
DEFAULT = int(VSYS.UNIT * 0.3)
class ContendSlotsFee(Fee):
"""
ContendSlotsFee is the data model for the fee of a transaction where the type is Contend Slots.
"""
DEFAULT = VSYS.UNIT * 50_000
class DBPutFee(Fee):
"""
DBPutFee is the data model for the fee of a transaction where the type is DB Put.
"""
DEFAULT = VSYS.UNIT
class Bool(Model):
"""
Bool is the data model for a boolean value.
"""
def __init__(self, data: bool = False) -> None:
"""
Args:
data (bool, optional): The data to contain. Defaults to False.
"""
self.data = data
self.validate()
def validate(self) -> None:
cls_name = self.__class__.__name__
if not isinstance(self.data, bool):
raise TypeError(f"Data in {cls_name} must be a bool")
class KeyPair(NamedTuple):
"""
KeyPair is the data model for a key pair(public / private keys).
"""
pub: PubKey
pri: PriKey
```
#### File: test/func_test/test_api.py
```python
import aiohttp
import pytest
import py_vsys as pv
class TestAPIGrp:
"""
TestAPIGrp tests pv.APIGrp
"""
class MockAPIGrp(pv.APIGrp):
"""
MockAPIGrp is the test subclass of pv.APIGrp
"""
PREFIX = "TEST"
async def test_make_url(self, host: str) -> None:
"""
test_make_url tests pv.APIGrp._make_url
Args:
host (str): The node api host.
"""
sess = aiohttp.ClientSession(base_url=host)
obj = self.MockAPIGrp(sess)
edpt = "EDPT"
assert obj._make_url(edpt) == self.MockAPIGrp.PREFIX + edpt
async def test_get(self, host: str) -> None:
"""
test_get tests pv.APIGrp._get
Args:
host (str): The node api host.
"""
self.MockAPIGrp.PREFIX = "/blocks"
sess = aiohttp.ClientSession(base_url=host)
edpt = "/height"
resp = await self.MockAPIGrp(sess)._get(edpt)
assert resp["height"] > 0
async def test_post(self, host: str) -> None:
"""
test_post tests pv.APIGrp._post
Args:
host (str): The node api host.
"""
self.MockAPIGrp.PREFIX = "/utils"
sess = aiohttp.ClientSession(base_url=host)
edpt = "/hash/fast"
raw = "hello"
resp = await self.MockAPIGrp(sess)._post(edpt, raw)
assert resp["hash"] == "4PNCZERNLKAqwSYHhZpb7B4GE34eiYDPXGgeNKWNNaBp"
```
#### File: func_test/test_ctrt/test_lock_ctrt.py
```python
import asyncio
import time
import pytest
import py_vsys as pv
from test.func_test import conftest as cft
class TestLockCtrt:
"""
TestLockCtrt is the collection of functional tests of Lock contract.
"""
TOK_MAX = 100
TOK_UNIT = 1
@pytest.fixture
async def new_tok_ctrt(self, acnt0: pv.Account) -> pv.TokCtrtWithoutSplit:
"""
new_tok_ctrt is the fixture that registers a new token contract without split instance.
Args:
acnt0 (pv.Account): The account of nonce 0.
Returns:
pv.TokCtrtWithoutSplit: The token contract instance.
"""
tc = await pv.TokCtrtWithoutSplit.register(acnt0, self.TOK_MAX, self.TOK_UNIT)
await cft.wait_for_block()
await tc.issue(acnt0, self.TOK_MAX)
await cft.wait_for_block()
return tc
@pytest.fixture
async def new_ctrt(
self, acnt0: pv.Account, new_tok_ctrt: pv.TokCtrtWithoutSplit
) -> pv.LockCtrt:
"""
new_ctrt is the fixture that registers a new Lock contract.
Args:
acnt0 (pv.Account): The account of nonce 0.
Returns:
pv.LockCtrt: The LockCtrt instance.
"""
tc = new_tok_ctrt
lc = await pv.LockCtrt.register(acnt0, tc.tok_id.data)
await cft.wait_for_block()
return lc
async def test_register(
self,
acnt0: pv.Account,
new_tok_ctrt: pv.TokCtrtWithoutSplit,
new_ctrt: pv.LockCtrt,
) -> pv.LockCtrt:
"""
test_register tests the method register.
Args:
acnt0 (pv.Account): The account of nonce 0.
new_tok_ctrt (pv.TokCtrtWithoutSplit): The fixture that registers a new Token contract.
new_ctrt (pv.LockCtrt): The fixture that registers a new Lock contract.
Returns:
pv.LockCtrt: The LockCtrt instance.
"""
tc = new_tok_ctrt
lc = new_ctrt
assert (await lc.maker).data == acnt0.addr.data
assert (await lc.tok_id) == tc.tok_id
assert (await lc.get_ctrt_bal(acnt0.addr.data)).amount == 0
assert (await lc.get_ctrt_lock_time(acnt0.addr.data)).unix_ts == 0
return lc
async def test_lock(
self,
acnt0: pv.Account,
new_tok_ctrt: pv.TokCtrtWithoutSplit,
new_ctrt: pv.LockCtrt,
):
"""
test_lock tests the method lock.
Args:
acnt0 (pv.Account): The account of nonce 0.
new_tok_ctrt (pv.TokCtrtWithoutSplit): The fixture that registers a new Token contract.
new_ctrt (pv.LockCtrt): The fixture that registers a new Lock contract.
"""
tc = new_tok_ctrt
lc = new_ctrt
api = acnt0.api
resp = await tc.deposit(acnt0, lc.ctrt_id.data, self.TOK_MAX)
await cft.wait_for_block()
await cft.assert_tx_success(api, resp["id"])
assert (await lc.get_ctrt_bal(acnt0.addr.data)).amount == self.TOK_MAX
later = int(time.time()) + cft.AVG_BLOCK_DELAY * 3
resp = await lc.lock(acnt0, later)
await cft.wait_for_block()
await cft.assert_tx_success(api, resp["id"])
assert (await lc.get_ctrt_lock_time(acnt0.addr.data)).unix_ts == later
# withdraw before the expiration will fail
resp = await tc.withdraw(acnt0, lc.ctrt_id.data, self.TOK_MAX)
await cft.wait_for_block()
await cft.assert_tx_status(api, resp["id"], "Failed")
assert (await lc.get_ctrt_bal(acnt0.addr.data)).amount == self.TOK_MAX
await asyncio.sleep(later - int(time.time()) + cft.AVG_BLOCK_DELAY)
# withdraw after the expiration will succeed
resp = await tc.withdraw(acnt0, lc.ctrt_id.data, self.TOK_MAX)
await cft.wait_for_block()
await cft.assert_tx_success(api, resp["id"])
assert (await lc.get_ctrt_bal(acnt0.addr.data)).amount == 0
@pytest.mark.whole
async def test_as_whole(
self,
acnt0: pv.Account,
new_tok_ctrt: pv.TokCtrtWithoutSplit,
new_ctrt: pv.LockCtrt,
):
"""
test_as_whole tests methods of LockCtrt as a whole so as to reduce resource consumption.
Args:
acnt0 (pv.Account): The account of nonce 0.
new_tok_ctrt (pv.TokCtrtWithoutSplit): The fixture that registers a new Token contract.
new_ctrt (pv.LockCtrt): The fixture that registers a new Lock contract.
"""
tc = new_tok_ctrt
lc = new_ctrt
lc = await self.test_register(acnt0, tc, lc)
await self.test_lock(acnt0, tc, lc)
```
#### File: func_test/test_ctrt/test_nft_ctrt.py
```python
import abc
import pytest
import py_vsys as pv
from test.func_test import conftest as cft
class TestNFTCtrt:
"""
TestNFTCtrt is the collection of functional tests of NFT contract.
"""
@pytest.fixture
async def new_ctrt(self, acnt0: pv.Account) -> pv.NFTCtrt:
"""
new_ctrt is the fixture that registers a new NFT contract.
Args:
acnt0 (pv.Account): The account of nonce 0.
Returns:
pv.NFTCtrt: The NFTCtrt instance.
"""
nc = await pv.NFTCtrt.register(acnt0)
await cft.wait_for_block()
return nc
@pytest.fixture
async def new_ctrt_with_tok(
self, new_ctrt: pv.NFTCtrt, acnt0: pv.Account
) -> pv.NFTCtrt:
"""
new_ctrt_with_tok is the fixture that registers a new NFT contract and issues an NFT token right after it.
Args:
new_ctrt (pv.NFTCtrt): The fixture that registers a new NFT contract.
acnt0 (pv.Account): The account of nonce 0.
Returns:
pv.NFTCtrt: The NFTCtrt instance.
"""
nc = new_ctrt
await nc.issue(acnt0)
await cft.wait_for_block()
return nc
@pytest.fixture
async def new_atomic_swap_ctrt(
self,
new_ctrt_with_tok: pv.NFTCtrt,
acnt0: pv.Account,
) -> pv.AtomicSwapCtrt:
"""
new_atomic_swap_ctrt is the fixture that registers a new atomic swap contract.
Args:
new_ctrt_with_tok (pv.NFTCtrt): The fixture that registers a new NFT contract and issues an NFT token right after it.
acnt0 (pv.Account): The account of nonce 0.
Returns:
pv.AtomicSwapCtrt: The AtomicSwapCtrt instance.
"""
nc = new_ctrt_with_tok
tok_id = pv.Ctrt.get_tok_id(nc.ctrt_id, pv.TokenIdx(0))
ac = await pv.AtomicSwapCtrt.register(acnt0, tok_id.data)
await cft.wait_for_block()
assert (await ac.maker) == acnt0.addr
assert (await ac.tok_id) == tok_id
return ac
async def test_register(self, acnt0: pv.Account) -> pv.NFTCtrt:
"""
test_register tests the method register.
Args:
acnt0 (pv.Account): The account of nonce 0.
Returns:
pv.NFTCtrt: The registered NFTCtrt.
"""
nc = await pv.NFTCtrt.register(acnt0)
await cft.wait_for_block()
assert (await nc.issuer) == acnt0.addr
assert (await nc.maker) == acnt0.addr
return nc
async def test_issue(self, new_ctrt: pv.NFTCtrt, acnt0: pv.Account):
"""
test_issue tests the method issue.
Args:
new_ctrt (pv.NFTCtrt): The fixture that registers a new NFT contract.
acnt0 (pv.Account): The account of nonce 0.
"""
nc = new_ctrt
api = nc.chain.api
resp = await nc.issue(acnt0)
await cft.wait_for_block()
await cft.assert_tx_success(api, resp["id"])
tok_id = pv.Ctrt.get_tok_id(nc.ctrt_id, pv.TokenIdx(0))
tok_bal = await cft.get_tok_bal(api, acnt0.addr.data, tok_id.data)
assert tok_bal == 1
async def test_send(
self, new_ctrt_with_tok: pv.NFTCtrt, acnt0: pv.Account, acnt1: pv.Account
):
"""
test_send tests the method send
Args:
new_ctrt_with_tok (pv.NFTCtrt): The fixture that registers a new NFT contract and issues an NFT token right after it.
acnt0 (pv.Account): The account of nonce 0.
acnt1 (pv.Account): The account of nonce 1.
"""
nc = new_ctrt_with_tok
api = nc.chain.api
tok_id = pv.Ctrt.get_tok_id(nc.ctrt_id, pv.TokenIdx(0))
tok_bal_acnt0 = await cft.get_tok_bal(api, acnt0.addr.data, tok_id.data)
assert tok_bal_acnt0 == 1
tok_bal_acnt1 = await cft.get_tok_bal(api, acnt1.addr.data, tok_id.data)
assert tok_bal_acnt1 == 0
resp = await nc.send(acnt0, acnt1.addr.data, 0)
await cft.wait_for_block()
await cft.assert_tx_success(api, resp["id"])
tok_bal_acnt0 = await cft.get_tok_bal(api, acnt0.addr.data, tok_id.data)
assert tok_bal_acnt0 == 0
tok_bal_acnt1 = await cft.get_tok_bal(api, acnt1.addr.data, tok_id.data)
assert tok_bal_acnt1 == 1
async def test_transfer(
self, new_ctrt_with_tok: pv.NFTCtrt, acnt0: pv.Account, acnt1: pv.Account
):
"""
test_transfer tests the method transfer.
Args:
new_ctrt_with_tok (pv.NFTCtrt): The fixture that registers a new NFT contract and issues an NFT token right after it.
acnt0 (pv.Account): The account of nonce 0.
acnt1 (pv.Account): The account of nonce 1.
"""
nc = new_ctrt_with_tok
api = nc.chain.api
tok_id = pv.Ctrt.get_tok_id(nc.ctrt_id, pv.TokenIdx(0))
tok_bal_acnt0 = await cft.get_tok_bal(api, acnt0.addr.data, tok_id.data)
assert tok_bal_acnt0 == 1
tok_bal_acnt1 = await cft.get_tok_bal(api, acnt1.addr.data, tok_id.data)
assert tok_bal_acnt1 == 0
resp = await nc.transfer(acnt0, acnt0.addr.data, acnt1.addr.data, 0)
await cft.wait_for_block()
await cft.assert_tx_success(api, resp["id"])
tok_bal_acnt0 = await cft.get_tok_bal(api, acnt0.addr.data, tok_id.data)
assert tok_bal_acnt0 == 0
tok_bal_acnt1 = await cft.get_tok_bal(api, acnt1.addr.data, tok_id.data)
assert tok_bal_acnt1 == 1
async def test_deposit_withdraw(
self,
new_ctrt_with_tok: pv.NFTCtrt,
new_atomic_swap_ctrt: pv.AtomicSwapCtrt,
acnt0: pv.Account,
):
"""
test_deposit_withdraw tests the method deposit & withdraw.
Args:
new_ctrt_with_tok (pv.NFTCtrt): The fixture that registers a new NFT contract and issues an NFT token right after it.
new_atomic_swap_ctrt (pv.AtomicSwapCtrt): The fixture that registers a new atomic swap contract.
acnt0 (pv.Account): The account of nonce 0.
"""
nc = new_ctrt_with_tok
api = nc.chain.api
tok_id = pv.Ctrt.get_tok_id(nc.ctrt_id, pv.TokenIdx(0))
ac = new_atomic_swap_ctrt
tok_bal = await cft.get_tok_bal(api, acnt0.addr.data, tok_id.data)
assert tok_bal == 1
resp = await nc.deposit(acnt0, ac.ctrt_id.data, 0)
await cft.wait_for_block()
tx_info = await api.tx.get_info(resp["id"])
assert tx_info["status"] == "Success"
tok_bal = await cft.get_tok_bal(api, acnt0.addr.data, tok_id.data)
assert tok_bal == 0
deposited_tok_bal = await ac.get_ctrt_bal(acnt0.addr.data)
assert deposited_tok_bal.amount == 1
await nc.withdraw(acnt0, ac.ctrt_id.data, 0)
await cft.wait_for_block()
tok_bal = await cft.get_tok_bal(api, acnt0.addr.data, tok_id.data)
assert tok_bal == 1
deposited_tok_bal = await ac.get_ctrt_bal(acnt0.addr.data)
assert deposited_tok_bal.amount == 0
async def test_supersede(
self, new_ctrt: pv.NFTCtrt, acnt0: pv.Account, acnt1: pv.Account
):
"""
test_supersede tests the method supersede.
Args:
new_ctrt (pv.NFTCtrt): The fixture that registers a new NFT contract.
acnt0 (pv.Account): The account of nonce 0.
acnt1 (pv.Account): The account of nonce 1.
"""
nc = new_ctrt
api = nc.chain.api
assert (await nc.issuer) == acnt0.addr
resp = await nc.supersede(acnt0, acnt1.addr.data)
await cft.wait_for_block()
await cft.assert_tx_success(api, resp["id"])
assert (await nc.issuer) == acnt1.addr
@pytest.mark.whole
async def test_as_whole(
self,
new_ctrt_with_tok: pv.NFTCtrt,
new_atomic_swap_ctrt: pv.AtomicSwapCtrt,
acnt0: pv.Account,
acnt1: pv.Account,
):
"""
test_as_whole tests methods of NFTCtrt as a whole so as to reduce resource consumption.
Args:
new_ctrt_with_tok (pv.NFTCtrt): The fixture that registers a new NFT contract and issues an NFT token right after it.
new_atomic_swap_ctrt (pv.AtomicSwapCtrt): The fixture that registers a new atomic swap contract.
acnt0 (pv.Account): The account of nonce 0.
acnt1 (pv.Account): The account of nonce 1.
"""
nc = await self.test_register(acnt0)
await self.test_issue(nc, acnt0)
nc = new_ctrt_with_tok
ac = new_atomic_swap_ctrt
await self.test_send(nc, acnt0, acnt1)
await self.test_transfer(nc, acnt1, acnt0)
await self.test_deposit_withdraw(nc, ac, acnt0)
await self.test_supersede(nc, acnt0, acnt1)
class _TestNFTCtrtV2Base(TestNFTCtrt):
"""
_TestNFTCtrtV2Base is the collection of general functional tests of NFT contract V2.
"""
@pytest.fixture
@abc.abstractmethod
async def new_ctrt(self, acnt0: pv.Account, acnt1: pv.Account) -> pv.NFTCtrtV2Base:
"""
new_ctrt is the fixture that registers a new NFT contract V2 instance.
Args:
acnt0 (pv.Account): The account of nonce 0.
acnt1 (pv.Account): The account of nonce 1.
Returns:
pv.NFTCtrtV2Base: The pv.NFTCtrtV2Base instance.
"""
@pytest.fixture
@abc.abstractmethod
async def new_atomic_swap_ctrt(
self,
new_ctrt_with_tok: pv.NFTCtrtV2Blacklist,
acnt0: pv.Account,
) -> pv.AtomicSwapCtrt:
"""
new_atomic_swap_ctrt is the fixture that registers a new atomic swap contract.
Args:
new_ctrt_with_tok (pv.NFTCtrtV2Base): The fixture that registers a new NFT contract and issues an NFT token right after it.
acnt0 (pv.Account): The account of nonce 0.
Returns:
pv.AtomicSwapCtrt: The AtomicSwapCtrt instance.
"""
@pytest.fixture
def arbitrary_ctrt_id(self) -> str:
"""
arbitrary_ctrt_id is the fixture that returns an arbitrary contract ID
Returns:
str: The contract ID.
"""
return "CF5Zkj2Ycx72WrBnjrcNHvJRVwsbNX1tjgT"
async def test_supersede(
self, new_ctrt: pv.NFTCtrtV2Whitelist, acnt0: pv.Account, acnt1: pv.Account
):
"""
test_supersede tests the method supersede.
Args:
new_ctrt (pv.NFTCtrtV2Whitelist): The fixture that registers a new NFT contract V2 with whitelist.
acnt0 (pv.Account): The account of nonce 0.
acnt1 (pv.Account): The account of nonce 1.
"""
nc = new_ctrt
api = nc.chain.api
assert (await nc.issuer) == acnt0.addr
assert (await nc.regulator) == acnt0.addr
resp = await nc.supersede(acnt0, acnt1.addr.data, acnt1.addr.data)
await cft.wait_for_block()
await cft.assert_tx_success(api, resp["id"])
assert (await nc.issuer) == acnt1.addr
assert (await nc.regulator) == acnt1.addr
async def test_update_list_user(
self, new_ctrt: pv.NFTCtrtV2Whitelist, acnt0: pv.Account, acnt1: pv.Account
):
"""
test_update_list_user tests the method update_list_user.
Args:
new_ctrt (pv.NFTCtrtV2Whitelist): The fixture that registers a new NFT contract V2 with whitelist.
acnt0 (pv.Account): The account of nonce 0.
acnt1 (pv.Account): The account of nonce 1.
"""
nc = new_ctrt
api = nc.chain.api
in_list = await nc.is_user_in_list(acnt1.addr.data)
assert in_list == False
resp = await nc.update_list_user(
by=acnt0,
addr=acnt1.addr.data,
val=True,
)
await cft.wait_for_block()
await cft.assert_tx_success(api, resp["id"])
in_list = await nc.is_user_in_list(acnt1.addr.data)
assert in_list == True
resp = await nc.update_list_user(
by=acnt0,
addr=acnt1.addr.data,
val=False,
)
await cft.wait_for_block()
await cft.assert_tx_success(api, resp["id"])
in_list = await nc.is_user_in_list(acnt1.addr.data)
assert in_list == False
async def test_update_list_ctrt(
self, new_ctrt: pv.NFTCtrtV2Whitelist, acnt0: pv.Account, arbitrary_ctrt_id: str
):
"""
test_update_list_ctrt tests the method update_list_ctrt.
Args:
new_ctrt (pv.NFTCtrtV2Whitelist): The fixture that registers a new NFT contract V2 with whitelist.
acnt0 (pv.Account): The account of nonce 0.
arbitrary_ctrt_id (str): An arbitrary contract ID
"""
nc = new_ctrt
api = nc.chain.api
target_ctrt_id = arbitrary_ctrt_id
in_list = await nc.is_ctrt_in_list(target_ctrt_id)
assert in_list == False
resp = await nc.update_list_ctrt(
by=acnt0,
addr=target_ctrt_id,
val=True,
)
await cft.wait_for_block()
await cft.assert_tx_success(api, resp["id"])
in_list = await nc.is_ctrt_in_list(target_ctrt_id)
assert in_list == True
resp = await nc.update_list_ctrt(
by=acnt0,
addr=target_ctrt_id,
val=False,
)
await cft.wait_for_block()
await cft.assert_tx_success(api, resp["id"])
in_list = await nc.is_ctrt_in_list(target_ctrt_id)
assert in_list == False
async def test_register(self, acnt0: pv.Account) -> pv.NFTCtrtV2Whitelist:
"""
test_register tests the method register.
Args:
acnt0 (pv.Account): The account of nonce 0.
Returns:
pv.NFTCtrtV2Whitelist: The registered NFTCtrtV2Whitelist
"""
nc: pv.NFTCtrtV2Whitelist = await pv.NFTCtrtV2Whitelist.register(acnt0)
await cft.wait_for_block()
assert (await nc.issuer) == acnt0.addr
assert (await nc.maker) == acnt0.addr
assert (await nc.regulator) == acnt0.addr
return nc
@pytest.mark.whole
async def test_as_whole(
self,
new_ctrt_with_tok: pv.NFTCtrtV2Whitelist,
new_atomic_swap_ctrt: pv.AtomicSwapCtrt,
acnt0: pv.Account,
acnt1: pv.Account,
arbitrary_ctrt_id: str,
):
"""
test_as_whole tests method of NFTCtrtV2Whitelist as a whole so as to reduce resource consumption.
Args:
new_ctrt_with_tok (pv.NFTCtrtV2Whitelist): The fixture that registers a new NFT contract and issues an NFT token right after it.
new_atomic_swap_ctrt (pv.AtomicSwapCtrt): The fixture that registers a new atomic swap contract.
acnt0 (pv.Account): The account of nonce 0.
acnt1 (pv.Account): The account of nonce 1.
arbitrary_ctrt_id (str): An arbitrary contract ID
"""
nc = await self.test_register(acnt0)
await self.test_update_list_user(nc, acnt0, acnt1)
await self.test_update_list_ctrt(nc, acnt0, arbitrary_ctrt_id)
await self.test_issue(nc, acnt0)
nc = new_ctrt_with_tok
ac = new_atomic_swap_ctrt
await self.test_send(nc, acnt0, acnt1)
await self.test_transfer(nc, acnt1, acnt0)
await self.test_deposit_withdraw(nc, ac, acnt0)
await self.test_supersede(nc, acnt0, acnt1)
class TestNFTCtrtV2Whitelist(_TestNFTCtrtV2Base):
"""
TestNFTCtrtV2Whitelist is the collection of functional tests of NFT contract V2 with whitelist.
"""
@pytest.fixture
async def new_ctrt(
self, acnt0: pv.Account, acnt1: pv.Account
) -> pv.NFTCtrtV2Whitelist:
"""
new_ctrt is the fixture that registers a new NFT contract V2 with whitelist.
Args:
acnt0 (pv.Account): The account of nonce 0.
acnt1 (pv.Account): The account of nonce 1.
Returns:
pv.NFTCtrtV2Whitelist: The NFTCtrtV2Whitelist instance.
"""
nc = await pv.NFTCtrtV2Whitelist.register(acnt0)
await cft.wait_for_block()
await nc.update_list_user(acnt0, acnt0.addr.data, True)
await nc.update_list_user(acnt0, acnt1.addr.data, True)
return nc
@pytest.fixture
async def new_atomic_swap_ctrt(
self,
new_ctrt_with_tok: pv.NFTCtrtV2Whitelist,
acnt0: pv.Account,
) -> pv.AtomicSwapCtrt:
"""
new_atomic_swap_ctrt is the fixture that registers a new atomic swap contract.
Args:
new_ctrt_with_tok (pv.NFTCtrtV2Whitelist): The fixture that registers a new NFT contract and issues an NFT token right after it.
acnt0 (pv.Account): The account of nonce 0.
Returns:
pv.AtomicSwapCtrt: The AtomicSwapCtrt instance.
"""
nc = new_ctrt_with_tok
api = nc.chain.api
tok_id = pv.Ctrt.get_tok_id(nc.ctrt_id, pv.TokenIdx(0))
ac = await pv.AtomicSwapCtrt.register(acnt0, tok_id.data)
await cft.wait_for_block()
assert (await ac.maker) == acnt0.addr
assert (await ac.tok_id) == tok_id
resp = await nc.update_list_ctrt(acnt0, ac.ctrt_id.data, True)
await cft.wait_for_block()
await cft.assert_tx_success(api, resp["id"])
return ac
class TestNFTCtrtV2Blacklist(_TestNFTCtrtV2Base):
"""
TestNFTCtrtV2Blacklist is the collection of functional tests of NFT contract V2 with blacklist.
"""
@pytest.fixture
async def new_ctrt(self, acnt0: pv.Account) -> pv.NFTCtrtV2Blacklist:
"""
new_ctrt is the fixture that registers a new NFT contract V2 with blacklist.
Args:
acnt0 (pv.Account): The account of nonce 0.
acnt1 (pv.Account): The account of nonce 1.
Returns:
pv.NFTCtrtV2Blacklist: The NFTCtrtV2Blacklist instance.
"""
nc = await pv.NFTCtrtV2Blacklist.register(acnt0)
await cft.wait_for_block()
return nc
@pytest.fixture
async def new_atomic_swap_ctrt(
self,
new_ctrt_with_tok: pv.NFTCtrtV2Blacklist,
acnt0: pv.Account,
) -> pv.AtomicSwapCtrt:
"""
new_atomic_swap_ctrt is the fixture that registers a new atomic swap contract.
Args:
new_ctrt_with_tok (pv.NFTCtrtV2Blacklist): The fixture that registers a new NFT contract and issues an NFT token right after it.
acnt0 (pv.Account): The account of nonce 0.
Returns:
pv.AtomicSwapCtrt: The AtomicSwapCtrt instance.
"""
nc = new_ctrt_with_tok
tok_id = pv.Ctrt.get_tok_id(nc.ctrt_id, pv.TokenIdx(0))
ac = await pv.AtomicSwapCtrt.register(acnt0, tok_id.data)
await cft.wait_for_block()
assert (await ac.maker) == acnt0.addr
assert (await ac.tok_id) == tok_id
return ac
```
#### File: func_test/test_ctrt/test_pay_chan_ctrt.py
```python
import asyncio
import time
from typing import Tuple
import pytest
import py_vsys as pv
from test.func_test import conftest as cft
class TestPayChanCtrt:
"""
TestPayChanCtrt is the collection of functional tests of Payment Channel Contract.
"""
TOK_MAX = 100
TOK_UNIT = 1
INIT_LOAD = TOK_MAX // 2
@pytest.fixture
async def new_tok_ctrt(self, acnt0: pv.Account) -> pv.TokCtrtWithoutSplit:
"""
new_tok_ctrt is the fixture that registers a new token contract without split instance.
Args:
acnt0 (pv.Account): The account of nonce 0.
Returns:
pv.TokCtrtWithoutSplit: The token contract instance.
"""
tc = await pv.TokCtrtWithoutSplit.register(acnt0, self.TOK_MAX, self.TOK_UNIT)
await cft.wait_for_block()
await tc.issue(acnt0, self.TOK_MAX)
await cft.wait_for_block()
return tc
@pytest.fixture
async def new_ctrt(
self, acnt0: pv.Account, new_tok_ctrt: pv.TokCtrtWithoutSplit
) -> pv.PayChanCtrt:
"""
new_ctrt is the fixture that registers a new Payment Channel contract.
Args:
acnt0 (pv.Account): The account of nonce 0.
new_tok_ctrt (pv.TokCtrtWithoutSplit): The fixture that registers a new Token contract.
Returns:
pv.PayChanCtrt: The PayChanCtrt instance.
"""
tc = new_tok_ctrt
api = acnt0.api
pc = await pv.PayChanCtrt.register(acnt0, tc.tok_id.data)
await cft.wait_for_block()
resp = await tc.deposit(acnt0, pc.ctrt_id.data, self.TOK_MAX)
await cft.wait_for_block()
await cft.assert_tx_success(api, resp["id"])
return pc
@pytest.fixture
async def new_ctrt_with_chan(
self,
acnt0: pv.Account,
acnt1: pv.Account,
new_ctrt: pv.PayChanCtrt,
) -> Tuple[pv.PayChanCtrt, str]:
"""
new_ctrt_with_chan is the fixture that registers a new Payment Channel
contract and creates a channel.
Args:
acnt0 (pv.Account): The account of nonce 0.
acnt1 (pv.Account): The account of nonce 1.
new_ctrt (pv.PayChanCtrt): The fixture that registers a new Payment Channel contract.
Returns:
Tuple[pv.PayChanCtrt, str]: The PayChanCtrt instance & channel id
"""
pc = new_ctrt
load_amount = self.INIT_LOAD
later = int(time.time()) + 60 * 10
resp = await pc.create_and_load(
by=acnt0,
recipient=acnt1.addr.data,
amount=load_amount,
expire_at=later,
)
await cft.wait_for_block()
chan_id = resp["id"]
return pc, chan_id
async def test_register(
self,
acnt0: pv.Account,
new_tok_ctrt: pv.TokCtrtWithoutSplit,
new_ctrt: pv.PayChanCtrt,
) -> pv.PayChanCtrt:
"""
test_register tests the method register.
Args:
acnt0 (pv.Account): The account of nonce 0.
new_tok_ctrt (pv.TokCtrtWithoutSplit): The fixture that registers a new Token contract.
new_ctrt (pv.PayChanCtrt): The fixture that registers a new Payment Channel contract.
Returns:
pv.PayChanCtrt: The PayChanCtrt instance.
"""
tc = new_tok_ctrt
pc = new_ctrt
assert (await pc.maker) == acnt0.addr
assert (await pc.tok_id) == tc.tok_id
ctrt_bal = await pc.get_ctrt_bal(acnt0.addr.data)
assert ctrt_bal.amount == self.TOK_MAX
return pc
async def test_create_and_load(
self,
acnt0: pv.Account,
acnt1: pv.Account,
new_ctrt: pv.PayChanCtrt,
) -> str:
"""
test_create_and_load tests the method create_and_load.
Args:
acnt0 (pv.Account): The account of nonce 0.
acnt1 (pv.Account): The account of nonce 1.
new_ctrt (pv.PayChanCtrt): The fixture that registers a new Payment Channel contract.
Returns:
str: The channel ID.
"""
pc = new_ctrt
api = acnt0.api
load_amount = self.INIT_LOAD
later = int(time.time()) + 60 * 10
resp = await pc.create_and_load(
by=acnt0,
recipient=acnt1.addr.data,
amount=load_amount,
expire_at=later,
)
await cft.wait_for_block()
await cft.assert_tx_success(api, resp["id"])
chan_id = resp["id"]
chan_creator = await pc.get_chan_creator(chan_id)
assert chan_creator == acnt0.addr
chan_creator_pub_key = await pc.get_chan_creator_pub_key(chan_id)
assert chan_creator_pub_key == acnt0.key_pair.pub
chan_accum_load = await pc.get_chan_accum_load(chan_id)
assert chan_accum_load.amount == load_amount
chan_accum_pay = await pc.get_chan_accum_pay(chan_id)
assert chan_accum_pay.amount == 0
chan_exp_time = await pc.get_chan_exp_time(chan_id)
assert chan_exp_time.unix_ts == later
chan_status = await pc.get_chan_status(chan_id)
assert chan_status is True
return chan_id
async def test_extend_exp_time(
self,
acnt0: pv.Account,
new_ctrt_with_chan: Tuple[pv.PayChanCtrt, str],
) -> None:
"""
test_extend_exp_time tests the method extend_exp_time.
Args:
acnt0 (pv.Account): The account of nonce 0.
new_ctrt_with_chan (Tuple[pv.PayChanCtrt, str]): The fixture that registers a new Payment Channel contract
and creates a new channel.
"""
pc, chan_id = new_ctrt_with_chan
api = acnt0.api
chan_exp_time_old = await pc.get_chan_exp_time(chan_id)
new_later = chan_exp_time_old.unix_ts + 300
resp = await pc.extend_exp_time(
by=acnt0,
chan_id=chan_id,
expire_at=new_later,
)
await cft.wait_for_block()
await cft.assert_tx_success(api, resp["id"])
chan_exp_time = await pc.get_chan_exp_time(chan_id)
assert chan_exp_time.unix_ts == new_later
async def test_load(
self,
acnt0: pv.Account,
new_ctrt_with_chan: Tuple[pv.PayChanCtrt, str],
) -> None:
"""
test_load tests the method load.
Args:
acnt0 (pv.Account): The account of nonce 0.
new_ctrt_with_chan (Tuple[pv.PayChanCtrt, str]): The fixture that registers a new Payment Channel contract
and creates a new channel.
"""
pc, chan_id = new_ctrt_with_chan
api = acnt0.api
chan_load_old = await pc.get_chan_accum_load(chan_id)
assert chan_load_old.amount == self.INIT_LOAD
more_load = self.INIT_LOAD // 2
resp = await pc.load(acnt0, chan_id, more_load)
await cft.wait_for_block()
await cft.assert_tx_success(api, resp["id"])
chan_load = await pc.get_chan_accum_load(chan_id)
assert chan_load.amount == self.INIT_LOAD + more_load
async def test_abort(
self,
acnt0: pv.Account,
new_ctrt_with_chan: Tuple[pv.PayChanCtrt, str],
) -> None:
"""
test_abort tests the method abort.
Args:
acnt0 (pv.Account): The account of nonce 0.
new_ctrt_with_chan (Tuple[pv.PayChanCtrt, str]): The fixture that registers a new Payment Channel contract
and creates a new channel.
"""
pc, chan_id = new_ctrt_with_chan
api = acnt0.api
chan_status = await pc.get_chan_status(chan_id)
assert chan_status is True
resp = await pc.abort(acnt0, chan_id)
await cft.wait_for_block()
await cft.assert_tx_success(api, resp["id"])
chan_status = await pc.get_chan_status(chan_id)
assert chan_status is False
async def test_unload(
self,
acnt0: pv.Account,
acnt1: pv.Account,
new_ctrt: pv.PayChanCtrt,
) -> None:
"""
test_unload tests the method unload.
Args:
acnt0 (pv.Account): The account of nonce 0.
acnt1 (pv.Account): The account of nonce 1.
new_ctrt (pv.PayChanCtrt): The fixture that registers a new Payment Channel contract.
"""
pc = new_ctrt
api = acnt0.api
load_amount = self.TOK_MAX // 10
later = int(time.time()) + cft.AVG_BLOCK_DELAY * 2
# create a channel
resp = await pc.create_and_load(
by=acnt0,
recipient=acnt1.addr.data,
amount=load_amount,
expire_at=later,
)
await cft.wait_for_block()
await cft.assert_tx_success(api, resp["id"])
chan_id = resp["id"]
bal_old = await pc.get_ctrt_bal(acnt0.addr.data)
# wait until the channel expires
await asyncio.sleep(cft.AVG_BLOCK_DELAY * 2)
resp = await pc.unload(acnt0, chan_id)
await cft.wait_for_block()
await cft.assert_tx_success(api, resp["id"])
bal = await pc.get_ctrt_bal(acnt0.addr.data)
assert bal.amount == bal_old.amount + load_amount
async def test_offchain_pay_and_collect_payment(
self,
acnt0: pv.Account,
acnt1: pv.Account,
new_ctrt_with_chan: Tuple[pv.PayChanCtrt, str],
) -> None:
"""
test_offchain_pay_and_collect_payment tests the method
- offchain_pay
- collect_payment.
Args:
acnt0 (pv.Account): The account of nonce 0.
acnt1 (pv.Account): The account of nonce 1.
new_ctrt_with_chan (Tuple[pv.PayChanCtrt, str]): The fixture that registers a new Payment Channel contract
and creates a new channel.
"""
pc, chan_id = new_ctrt_with_chan
api = acnt0.api
sig = await pc.offchain_pay(
key_pair=acnt0.key_pair,
chan_id=chan_id,
amount=self.INIT_LOAD,
)
resp = await pc.collect_payment(
by=acnt1,
chan_id=chan_id,
amount=self.INIT_LOAD,
signature=sig,
)
await cft.wait_for_block()
await cft.assert_tx_success(api, resp["id"])
accum_pay = await pc.get_chan_accum_pay(chan_id)
assert accum_pay.amount == self.INIT_LOAD
acnt1_bal = await pc.get_ctrt_bal(acnt1.addr.data)
assert acnt1_bal.amount == self.INIT_LOAD
@pytest.mark.whole
async def test_as_whole(
self,
acnt0: pv.Account,
acnt1: pv.Account,
new_tok_ctrt: pv.TokCtrtWithoutSplit,
new_ctrt: pv.PayChanCtrt,
) -> None:
"""
test_as_whole tests methods of PayChanCtrt as a whole so as to reduce resource consumption.
Args:
acnt0 (pv.Account): The account of nonce 0.
acnt1 (pv.Account): The account of nonce 1.
new_tok_ctrt (pv.TokCtrtWithoutSplit): The token contract instance.
new_ctrt (pv.PayChanCtrt): The fixture that registers a new Payment Channel contract.
"""
tc = new_tok_ctrt
pc = new_ctrt
await self.test_register(acnt0, tc, pc)
chan_id = await self.test_create_and_load(acnt0, acnt1, pc)
pc_with_chan = (pc, chan_id)
await self.test_extend_exp_time(acnt0, pc_with_chan)
await self.test_load(acnt0, pc_with_chan)
await self.test_offchain_pay_and_collect_payment(acnt0, acnt1, pc_with_chan)
await self.test_abort(acnt0, pc_with_chan)
await self.test_unload(acnt0, acnt1, pc)
```
#### File: func_test/test_ctrt/test_tok_ctrt.py
```python
import pytest
import py_vsys as pv
from test.func_test import conftest as cft
class TestTokCtrtWithoutSplit:
"""
TestTokCtrtWithoutSplit is the collection of functional tests of Token contract without split.
"""
@pytest.fixture
async def new_ctrt(self, acnt0: pv.Account) -> pv.TokCtrtWithoutSplit:
"""
new_ctrt is the fixture that registers a new token contract.
Args:
acnt0 (pv.Account): the account of nonce 0.
Returns:
pv.TokCtrtWithoutSplit: the TokCtrtWithoutSplit instance.
"""
tc = await pv.TokCtrtWithoutSplit.register(acnt0, 50, 1)
await cft.wait_for_block()
return tc
@pytest.fixture
async def new_ctrt_with_tok(
self, new_ctrt: pv.TokCtrtWithoutSplit, acnt0: pv.Account
) -> pv.TokCtrtWithoutSplit:
"""
new_ctrt_with_tok is the fixture that registers a new TokenWithoutSplit contract and issues tokens right after it.
Args:
new_ctrt (pv.TokCtrtWithoutSplit): The fixture that registers a new TokenWithoutSplit contract.
acnt0 (pv.Account): The account of nonce 0.
Returns:
pv.TokCtrtWithoutSplit: The TokCtrtWithoutSplit instance.
"""
tc = new_ctrt
await tc.issue(acnt0, 50)
await cft.wait_for_block()
return tc
@pytest.fixture
async def new_atomic_swap_ctrt(
self,
new_ctrt_with_tok: pv.TokCtrtWithoutSplit,
acnt0: pv.Account,
) -> pv.AtomicSwapCtrt:
"""
new_atomic_swap_ctrt is the fixture that registers a new atomic swap contract.
Args:
new_ctrt_with_tok (pv.TokCtrtWithoutSplit): The fixture that registers a new token contract and issues tokens right after it.
acnt0 (pv.Account): The account of nonce 0.
Returns:
pv.AtomicSwapCtrt: The AtomicSwapCtrt instance.
"""
tc = new_ctrt_with_tok
ac = await pv.AtomicSwapCtrt.register(acnt0, tc.tok_id.data)
await cft.wait_for_block()
assert (await ac.maker) == acnt0.addr
assert (await ac.tok_id) == tc.tok_id
return ac
async def test_register(self, acnt0: pv.Account) -> pv.TokCtrtWithoutSplit:
"""
test_register tests the method register.
Args:
acnt0 (pv.Account): The account of nonce 0.
Returns:
pv.TokCtrtWithoutSplit: The TokCtrtWithoutSplit instance.
"""
tc = await pv.TokCtrtWithoutSplit.register(acnt0, 50, 1)
await cft.wait_for_block()
assert (await tc.issuer) == acnt0.addr
assert (await tc.maker) == acnt0.addr
return tc
async def test_issue(self, new_ctrt: pv.TokCtrtWithoutSplit, acnt0: pv.Account):
"""
test_issue tests the method issue.
Args:
new_ctrt (pv.TokCtrtWithoutSplit): The fixture that registers a new token contract without split.
acnt0 (pv.Account): The account of nonce 0.
"""
tc = new_ctrt
api = tc.chain.api
resp = await tc.issue(acnt0, 50)
await cft.wait_for_block()
await cft.assert_tx_success(api, resp["id"])
tok_bal = await cft.get_tok_bal(api, acnt0.addr.data, tc.tok_id.data)
assert tok_bal == 50
async def test_send(
self,
new_ctrt_with_tok: pv.TokCtrtWithoutSplit,
acnt0: pv.Account,
acnt1: pv.Account,
):
"""
test_send tests the method send
Args:
new_ctrt_with_tok (pv.TokCtrtWithoutSplit): The fixture that registers a new token contract and issues tokens right after it.
acnt0 (pv.Account): The account of nonce 0.
acnt1 (pv.Account): The account of nonce 1.
"""
tc = new_ctrt_with_tok
api = tc.chain.api
tok_bal_acnt0 = await cft.get_tok_bal(api, acnt0.addr.data, tc.tok_id.data)
assert tok_bal_acnt0 == 50
tok_bal_acnt1 = await cft.get_tok_bal(api, acnt1.addr.data, tc.tok_id.data)
assert tok_bal_acnt1 == 0
resp = await tc.send(acnt0, acnt1.addr.data, 50)
await cft.wait_for_block()
await cft.assert_tx_success(api, resp["id"])
tok_bal_acnt0 = await cft.get_tok_bal(api, acnt0.addr.data, tc.tok_id.data)
assert tok_bal_acnt0 == 0
tok_bal_acnt1 = await cft.get_tok_bal(api, acnt1.addr.data, tc.tok_id.data)
assert tok_bal_acnt1 == 50
async def test_transfer(
self,
new_ctrt_with_tok: pv.TokCtrtWithoutSplit,
acnt0: pv.Account,
acnt1: pv.Account,
):
"""
test_transfer tests the method transfer.
Args:
new_ctrt_with_tok (pv.TokCtrtWithoutSplit): The fixture that registers a new token contract and issues tokens right after it.
acnt0 (pv.Account): The account of nonce 0.
acnt1 (pv.Account): The account of nonce 1.
"""
tc = new_ctrt_with_tok
api = tc.chain.api
tok_bal_acnt0 = await cft.get_tok_bal(api, acnt0.addr.data, tc.tok_id.data)
assert tok_bal_acnt0 == 50
tok_bal_acnt1 = await cft.get_tok_bal(api, acnt1.addr.data, tc.tok_id.data)
assert tok_bal_acnt1 == 0
resp = await tc.transfer(acnt0, acnt0.addr.data, acnt1.addr.data, 50)
await cft.wait_for_block()
await cft.assert_tx_success(api, resp["id"])
tok_bal_acnt0 = await cft.get_tok_bal(api, acnt0.addr.data, tc.tok_id.data)
assert tok_bal_acnt0 == 0
tok_bal_acnt1 = await cft.get_tok_bal(api, acnt1.addr.data, tc.tok_id.data)
assert tok_bal_acnt1 == 50
async def test_deposit_and_withdraw(
self,
new_ctrt_with_tok: pv.TokCtrtWithoutSplit,
new_atomic_swap_ctrt: pv.AtomicSwapCtrt,
acnt0: pv.Account,
):
"""
test_deposit_and_withdraw tests the method deposit & withdraw.
Args:
new_ctrt_with_tok (pv.TokCtrtWithoutSplit): The fixture that registers a new token contract and issues tokens right after it.
new_atomic_swap_ctrt (pv.AtomicSwapCtrt): The fixture that registers a new atomic swap contract.
acnt0 (pv.Account): The account of nonce 0.
"""
tc = new_ctrt_with_tok
api = tc.chain.api
ac = new_atomic_swap_ctrt
await cft.wait_for_block()
assert (await ac.maker) == acnt0.addr
assert (await ac.tok_id) == tc.tok_id
tok_bal = await cft.get_tok_bal(api, acnt0.addr.data, tc.tok_id.data)
assert tok_bal == 50
resp = await tc.deposit(acnt0, ac.ctrt_id.data, 10)
await cft.wait_for_block()
tx_info = await api.tx.get_info(resp["id"])
assert tx_info["status"] == "Success"
tok_bal = await cft.get_tok_bal(api, acnt0.addr.data, tc.tok_id.data)
assert tok_bal == 40
deposited_tok_bal = await ac.get_ctrt_bal(acnt0.addr.data)
assert deposited_tok_bal.amount == 10
# withdraw
await tc.withdraw(acnt0, ac.ctrt_id.data, 10)
await cft.wait_for_block()
tok_bal = await cft.get_tok_bal(api, acnt0.addr.data, tc.tok_id.data)
assert tok_bal == 50
deposited_tok_bal = await ac.get_ctrt_bal(acnt0.addr.data)
assert deposited_tok_bal.amount == 0
async def test_destroy(
self, new_ctrt_with_tok: pv.TokCtrtWithoutSplit, acnt0: pv.Account
):
"""
test_destroy tests the method destroy.
Args:
new_ctrt_with_tok (pv.TokCtrtWithoutSplit): The fixture that registers a new token contract and issues tokens right after it.
"""
tc = new_ctrt_with_tok
api = tc.chain.api
tok_bal = await cft.get_tok_bal(api, acnt0.addr.data, tc.tok_id.data)
assert tok_bal == 50
resp = await tc.destroy(acnt0, 10)
await cft.wait_for_block()
await cft.assert_tx_success(api, resp["id"])
tok_bal_acnt0 = await cft.get_tok_bal(api, acnt0.addr.data, tc.tok_id.data)
assert tok_bal_acnt0 == 40
async def test_supersede(
self, new_ctrt: pv.TokCtrtWithoutSplit, acnt0: pv.Account, acnt1: pv.Account
):
"""
test_supersede tests the method supersede.
Args:
new_ctrt (pv.TokCtrtWithoutSplit): The fixture that registers a new token contract.
acnt0 (pv.Account): The account of nonce 0.
acnt1 (pv.Account): The account of nonce 1.
"""
tc = new_ctrt
api = tc.chain.api
assert (await tc.issuer) == acnt0.addr
resp = await tc.supersede(acnt0, acnt1.addr.data)
await cft.wait_for_block()
await cft.assert_tx_success(api, resp["id"])
assert (await tc.issuer) == acnt1.addr
@pytest.mark.whole
async def test_as_whole(
self,
new_ctrt_with_tok: pv.TokCtrtWithoutSplit,
new_atomic_swap_ctrt: pv.AtomicSwapCtrt,
acnt0: pv.Account,
acnt1: pv.Account,
):
"""
test_as_whole tests methods of TokenWithSplitCtrt as a whole so as to reduce resource consumption.
Args:
new_ctrt_with_tok (pv.TokCtrtWithoutSplit): The fixture that registers a new token contract and issues tokens right after it.
new_atomic_swap_ctrt (pv.AtomicSwapCtrt): The fixture that registers a new atomic swap contract.
acnt0 (pv.Account): The account of nonce 0.
acnt1 (pv.Account): The account of nonce 1.
"""
tc = await self.test_register(acnt0)
await self.test_issue(tc, acnt0)
tc = new_ctrt_with_tok
ac = new_atomic_swap_ctrt
await self.test_send(tc, acnt0, acnt1)
await self.test_transfer(tc, acnt1, acnt0)
await self.test_deposit_and_withdraw(tc, ac, acnt0)
await self.test_destroy(tc, acnt0)
await self.test_supersede(tc, acnt0, acnt1)
class TestTokCtrtWithSplit(TestTokCtrtWithoutSplit):
"""
TestTokCtrtWithSplit is the collection of functional tests of Token contract with split.
"""
@pytest.fixture
async def new_ctrt(self, acnt0: pv.Account) -> pv.TokCtrtWithSplit:
"""
new_ctrt is the fixture that registers a new token contract with split.
Args:
acnt0 (pv.Account): the account of nonce 0.
Returns:
pv.TokCtrtWithoutSplit: the TokCtrtWithSplit instance.
"""
tc = await pv.TokCtrtWithSplit.register(acnt0, 50, 1)
await cft.wait_for_block()
return tc
async def test_split(self, new_ctrt: pv.TokCtrtWithSplit, acnt0: pv.Account):
"""
test_split tests the method split.
Args:
new_ctrt (pv.TokCtrtWithSplit): The fixture that registers a new token contract.
acnt0 (pv.Account): The account of nonce 0.
"""
tc = new_ctrt
api = tc.chain.api
resp = await tc.split(acnt0, 12)
await cft.wait_for_block()
await cft.assert_tx_success(api, resp["id"])
new_unit = await api.ctrt.get_tok_info(tc.tok_id.data)
assert 12 == new_unit["unity"]
class TestTokCtrtWithoutSplitV2Whitelist(TestTokCtrtWithoutSplit):
"""
TestTokWithoutSplitV2Whitelist is the collection of functional tests of Token contract with white list.
"""
@pytest.fixture
async def new_ctrt(
self, acnt0: pv.Account, acnt1: pv.Account
) -> pv.TokCtrtWithoutSplitV2Whitelist:
"""
new_ctrt is the fixture that registers a new token contract with white list.
Args:
acnt0 (pv.Account): the account of nonce 0.
Returns:
pv.TokCtrtWithoutSplitV2Whitelist: the TokCtrtWithoutSplitV2Whitelist instance.
"""
tc = await pv.TokCtrtWithoutSplitV2Whitelist.register(acnt0, 50, 1)
await cft.wait_for_block()
await tc.update_list_user(acnt0, acnt0.addr.data, True)
await tc.update_list_user(acnt0, acnt1.addr.data, True)
return tc
@pytest.fixture
def arbitrary_ctrt_id(self) -> str:
"""
arbitrary_ctrt_id is the fixture that returns an arbitrary contract ID
Returns:
str: The contract ID.
"""
return "CEzFs69VesVBHTefZVVCAddcbMzMQAjchCX"
@pytest.fixture
async def new_atomic_swap_ctrt(
self,
new_ctrt_with_tok: pv.TokCtrtWithoutSplitV2Whitelist,
acnt0: pv.Account,
) -> pv.AtomicSwapCtrt:
"""
new_atomic_swap_ctrt is the fixture that registers a new atomic swap contract.
Args:
new_ctrt_with_tok (pv.TokCtrtWithoutSplitV2Whitelist): The fixture that registers a new token contract and issues a token right after it.
acnt0 (pv.Account): The account of nonce 0.
Returns:
pv.AtomicSwapCtrt: The AtomicSwapCtrt instance.
"""
tc = new_ctrt_with_tok
api = tc.chain.api
ac = await pv.AtomicSwapCtrt.register(acnt0, tc.tok_id.data)
await cft.wait_for_block()
assert (await ac.maker) == acnt0.addr
assert (await ac.tok_id) == tc.tok_id
resp = await tc.update_list_ctrt(acnt0, ac.ctrt_id.data, True)
await cft.wait_for_block()
await cft.assert_tx_success(api, resp["id"])
return ac
async def test_supersede(
self,
new_ctrt: pv.TokCtrtWithoutSplitV2Whitelist,
acnt0: pv.Account,
acnt1: pv.Account,
):
"""
test_supersede tests the method supersede.
Args:
new_ctrt (pv.TokCtrtWithoutSplitV2Whitelist): The fixture that registers a new token contract V2 with whitelist.
acnt0 (pv.Account): The account of nonce 0.
acnt1 (pv.Account): The account of nonce 1.
"""
tc = new_ctrt
api = tc.chain.api
assert (await tc.issuer) == acnt0.addr
assert (await tc.regulator) == acnt0.addr
resp = await tc.supersede(acnt0, acnt1.addr.data, acnt1.addr.data)
await cft.wait_for_block()
await cft.assert_tx_success(api, resp["id"])
assert (await tc.issuer) == acnt1.addr
assert (await tc.regulator) == acnt1.addr
async def test_update_list_user(
self,
new_ctrt: pv.TokCtrtWithoutSplitV2Whitelist,
acnt0: pv.Account,
acnt1: pv.Account,
):
"""
test_update_list_user tests the method update_list_user.
Args:
new_ctrt (pv.TokCtrtWithoutSplitV2Whitelist): The fixture that registers a new token contract V2 with whitelist.
acnt0 (pv.Account): The account of nonce 0.
acnt1 (pv.Account): The account of nonce 1.
"""
tc = new_ctrt
api = tc.chain.api
in_list = await tc.is_user_in_list(acnt1.addr.data)
assert in_list == False
resp = await tc.update_list_user(
by=acnt0,
addr=acnt1.addr.data,
val=True,
)
await cft.wait_for_block()
await cft.assert_tx_success(api, resp["id"])
in_list = await tc.is_user_in_list(acnt1.addr.data)
assert in_list == True
resp = await tc.update_list_user(
by=acnt0,
addr=acnt1.addr.data,
val=False,
)
await cft.wait_for_block()
await cft.assert_tx_success(api, resp["id"])
in_list = await tc.is_user_in_list(acnt1.addr.data)
assert in_list == False
async def test_update_list_ctrt(
self,
new_ctrt: pv.TokCtrtWithoutSplitV2Whitelist,
acnt0: pv.Account,
arbitrary_ctrt_id: str,
):
"""
test_update_list_ctrt tests the method update_list_ctrt.
Args:
new_ctrt (pv.TokCtrtWithoutSplitV2Whitelist): The fixture that registers a new token contract V2 with whitelist.
acnt0 (pv.Account): The account of nonce 0.
arbitrary_ctrt_id (str): An arbitrary contract ID
"""
tc = new_ctrt
api = tc.chain.api
target_ctrt_id = arbitrary_ctrt_id
in_list = await tc.is_ctrt_in_list(target_ctrt_id)
assert in_list == False
resp = await tc.update_list_ctrt(
by=acnt0,
addr=target_ctrt_id,
val=True,
)
await cft.wait_for_block()
await cft.assert_tx_success(api, resp["id"])
in_list = await tc.is_ctrt_in_list(target_ctrt_id)
assert in_list == True
resp = await tc.update_list_ctrt(
by=acnt0,
addr=target_ctrt_id,
val=False,
)
await cft.wait_for_block()
await cft.assert_tx_success(api, resp["id"])
in_list = await tc.is_ctrt_in_list(target_ctrt_id)
assert in_list == False
async def test_register(
self, acnt0: pv.Account
) -> pv.TokCtrtWithoutSplitV2Whitelist:
"""
test_register tests the method register.
Args:
acnt0 (pv.Account): The account of nonce 0.
Returns:
pv.TokCtrtWithoutSplitV2Whitelist: The registered TokCtrtWithoutSplitV2Whitelist
"""
tc = await pv.TokCtrtWithoutSplitV2Whitelist.register(acnt0, 50, 1)
await cft.wait_for_block()
assert (await tc.issuer) == acnt0.addr
assert (await tc.maker) == acnt0.addr
assert (await tc.regulator) == acnt0.addr
return tc
class TestTokCtrtWithoutSplitV2Blacklist(TestTokCtrtWithoutSplitV2Whitelist):
"""
TestTokWithoutSplitV2Blacklist is the collection of functional tests of token contract V2 with blacklist.
"""
@pytest.fixture
async def new_ctrt(self, acnt0: pv.Account) -> pv.TokCtrtWithoutSplitV2Blacklist:
"""
new_ctrt is the fixture that registers a new token contract V2 with blacklist.
Args:
acnt0 (pv.Account): The account of nonce 0.
acnt1 (pv.Account): The account of nonce 1.
Returns:
pv.TokCtrtWithoutSplitV2Blacklist: The TokCtrtWithoutSplitV2Blacklist instance.
"""
tc = await pv.TokCtrtWithoutSplitV2Blacklist.register(acnt0, 50, 1)
await cft.wait_for_block()
return tc
@pytest.fixture
async def new_atomic_swap_ctrt(
self,
new_ctrt_with_tok: pv.TokCtrtWithoutSplitV2Blacklist,
acnt0: pv.Account,
) -> pv.AtomicSwapCtrt:
"""
new_atomic_swap_ctrt is the fixture that registers a new atomic swap contract.
Args:
new_ctrt_with_tok (pv.TokCtrtWithoutSplitV2Blacklist): The fixture that registers a new token contract and issues tokens right after it.
acnt0 (pv.Account): The account of nonce 0.
Returns:
pv.AtomicSwapCtrt: The AtomicSwapCtrt instance.
"""
tc = new_ctrt_with_tok
ac = await pv.AtomicSwapCtrt.register(acnt0, tc.tok_id.data)
await cft.wait_for_block()
assert (await ac.maker) == acnt0.addr
assert (await ac.tok_id) == tc.tok_id
return ac
```
#### File: func_test/test_ctrt/test_v_option_ctrt.py
```python
import pytest
import time
import asyncio
import py_vsys as pv
from test.func_test import conftest as cft
class TestVOptionCtrt:
"""
TestVOptionCtrt is the collection of functional tests of V Option contract.
"""
MAX_ISSUE_AMOUNT = 1000
MINT_AMOUNT = 200
UNLOCK_AMOUNT = 100
EXEC_TIME_DELTA = 50
EXEC_DDL_DELTA = 95
@pytest.fixture
async def new_base_tok_ctrt(self, acnt0: pv.Account) -> pv.TokCtrtWithoutSplit:
"""
new_base_tok_ctrt is the fixture that registers a new base token contract.
Args:
acnt0 (pv.Account): the account of nonce 0.
Returns:
pv.TokCtrtWithoutSplit: the TokCtrtWithoutSplit instance.
"""
tc = await pv.TokCtrtWithoutSplit.register(acnt0, 1000, 1)
await cft.wait_for_block()
return tc
@pytest.fixture
async def new_target_tok_ctrt(self, acnt0: pv.Account) -> pv.TokCtrtWithoutSplit:
"""
new_target_tok_ctrt is the fixture that registers a new target token contract.
Args:
acnt0 (pv.Account): the account of nonce 0.
Returns:
pv.TokCtrtWithoutSplit: the TokCtrtWithoutSplit instance.
"""
tc = await pv.TokCtrtWithoutSplit.register(acnt0, 1000, 1)
await cft.wait_for_block()
return tc
@pytest.fixture
async def new_option_tok_ctrt(self, acnt0: pv.Account) -> pv.TokCtrtWithoutSplit:
"""
new_option_tok_ctrt is the fixture that registers a new option token contract.
Args:
acnt0 (pv.Account): the account of nonce 0.
Returns:
pv.TokCtrtWithoutSplit: the TokCtrtWithoutSplit instance.
"""
tc = await pv.TokCtrtWithoutSplit.register(acnt0, 1000, 1)
await cft.wait_for_block()
return tc
@pytest.fixture
async def new_proof_tok_ctrt(self, acnt0: pv.Account) -> pv.TokCtrtWithoutSplit:
"""
new_proof_tok_ctrt is the fixture that registers a new proof token contract.
Args:
acnt0 (pv.Account): the account of nonce 0.
Returns:
pv.TokCtrtWithoutSplit: the TokCtrtWithoutSplit instance.
"""
tc = await pv.TokCtrtWithoutSplit.register(acnt0, 1000, 1)
await cft.wait_for_block()
return tc
@pytest.fixture
async def new_base_tok_ctrt_with_tok(
self, new_base_tok_ctrt: pv.TokCtrtWithoutSplit, acnt0: pv.Account
) -> pv.TokCtrtWithoutSplit:
"""
new_base_tok_ctrt_with_tok is the fixture that registers a new TokenWithoutSplit contract and issues base tokens right after it.
Args:
new_base_tok_ctrt (pv.TokCtrtWithoutSplit): The fixture that registers a new TokenWithoutSplit contract.
acnt0 (pv.Account): The account of nonce 0.
Returns:
pv.TokCtrtWithoutSplit: The TokCtrtWithoutSplit instance.
"""
tc = new_base_tok_ctrt
await tc.issue(acnt0, 1000)
await cft.wait_for_block()
return tc
@pytest.fixture
async def new_target_tok_ctrt_with_tok(
self, new_target_tok_ctrt: pv.TokCtrtWithoutSplit, acnt0: pv.Account
) -> pv.TokCtrtWithoutSplit:
"""
new_target_tok_ctrt_with_tok is the fixture that registers a new TokenWithoutSplit contract and issues target tokens right after it.
Args:
new_target_tok_ctrt (pv.TokCtrtWithoutSplit): The fixture that registers a new TokenWithoutSplit contract.
acnt0 (pv.Account): The account of nonce 0.
Returns:
pv.TokCtrtWithoutSplit: The TokCtrtWithoutSplit instance.
"""
tc = new_target_tok_ctrt
await tc.issue(acnt0, 1000)
await cft.wait_for_block()
return tc
@pytest.fixture
async def new_option_tok_ctrt_with_tok(
self, new_option_tok_ctrt: pv.TokCtrtWithoutSplit, acnt0: pv.Account
) -> pv.TokCtrtWithoutSplit:
"""
new_option_tok_ctrt_with_tok is the fixture that registers a new TokenWithoutSplit contract and issues option tokens right after it.
Args:
new_option_tok_ctrt (pv.TokCtrtWithoutSplit): The fixture that registers a new TokenWithoutSplit contract.
acnt0 (pv.Account): The account of nonce 0.
Returns:
pv.TokCtrtWithoutSplit: The TokCtrtWithoutSplit instance.
"""
tc = new_option_tok_ctrt
await tc.issue(acnt0, 1000)
await cft.wait_for_block()
return tc
@pytest.fixture
async def new_proof_tok_ctrt_with_tok(
self, new_proof_tok_ctrt: pv.TokCtrtWithoutSplit, acnt0: pv.Account
) -> pv.TokCtrtWithoutSplit:
"""
new_proof_tok_ctrt_with_tok is the fixture that registers a new TokenWithoutSplit contract and issues proof tokens right after it.
Args:
new_proof_tok_ctrt (pv.TokCtrtWithoutSplit): The fixture that registers a new TokenWithoutSplit contract.
acnt0 (pv.Account): The account of nonce 0.
Returns:
pv.TokCtrtWithoutSplit: The TokCtrtWithoutSplit instance.
"""
tc = new_proof_tok_ctrt
await tc.issue(acnt0, 1000)
await cft.wait_for_block()
return tc
@pytest.fixture
async def new_v_option_ctrt(
self,
acnt0: pv.Account,
new_base_tok_ctrt_with_tok: pv.TokCtrtWithoutSplit,
new_target_tok_ctrt_with_tok: pv.TokCtrtWithoutSplit,
new_option_tok_ctrt_with_tok: pv.TokCtrtWithoutSplit,
new_proof_tok_ctrt_with_tok: pv.TokCtrtWithoutSplit,
) -> pv.VStableSwapCtrt:
"""
new_v_option_ctrt is the fixture that registers a new V Option contract.
Args:
acnt0 (pv.Account): The account of nonce 0.
new_base_tok_ctrt_with_tok (pv.TokCtrtWithoutSplit): The fixture that registers a new token contract without split and issues base tokens right after it.
new_target_tok_ctrt_with_tok (pv.TokCtrtWithoutSplit): The fixture that registers a new token contract without split and issues target tokens right after it.
new_option_tok_ctrt_with_tok (pv.TokCtrtWithoutSplit): The fixture that registers a new token contract without split and issues option tokens right after it.
new_proof_tok_ctrt_with_tok (pv.TokCtrtWithoutSplit): The fixture that registers a new token contract without split and issues proof tokens right after it.
Returns:
pv.VStableSwapCtrt: The VStableSwapCtrt instance.
"""
base_tc = new_base_tok_ctrt_with_tok
target_tc = new_target_tok_ctrt_with_tok
option_tc = new_option_tok_ctrt_with_tok
proof_tc = new_proof_tok_ctrt_with_tok
base_tok_id = pv.Ctrt.get_tok_id(base_tc.ctrt_id, pv.TokenIdx(0))
target_tok_id = pv.Ctrt.get_tok_id(target_tc.ctrt_id, pv.TokenIdx(0))
option_tok_id = pv.Ctrt.get_tok_id(option_tc.ctrt_id, pv.TokenIdx(0))
proof_tok_id = pv.Ctrt.get_tok_id(proof_tc.ctrt_id, pv.TokenIdx(0))
oc = await pv.VOptionCtrt.register(
acnt0,
base_tok_id.data,
target_tok_id.data,
option_tok_id.data,
proof_tok_id.data,
int(time.time() + self.EXEC_TIME_DELTA),
int(time.time() + self.EXEC_DDL_DELTA),
)
await cft.wait_for_block()
await asyncio.gather(
base_tc.deposit(acnt0, oc.ctrt_id.data, 1000),
target_tc.deposit(acnt0, oc.ctrt_id.data, 1000),
option_tc.deposit(acnt0, oc.ctrt_id.data, 1000),
proof_tc.deposit(acnt0, oc.ctrt_id.data, 1000),
)
await cft.wait_for_block()
return oc
@pytest.fixture
async def new_v_option_ctrt_activated(
self,
acnt0: pv.Account,
new_v_option_ctrt: pv.VOptionCtrt,
) -> pv.VOptionCtrt:
"""
new_v_option_ctrt_activated is the fixture that registers a new V Option contract and activate it.
Args:
acnt0 (pv.Account): The account of nonce 0.
new_v_option_ctrt (pv.VOptionCtrt): The fixture that registers a new V Option contract.
Returns:
pv.VOptionCtrt: The VOptionCtrt instance.
"""
oc = new_v_option_ctrt
api = acnt0.api
resp = await oc.activate(
by=acnt0,
max_issue_num=self.MAX_ISSUE_AMOUNT,
price=10,
price_unit=1,
)
await cft.wait_for_block()
await cft.assert_tx_success(api, resp["id"])
return oc
@pytest.fixture
async def new_v_option_ctrt_activated_and_minted(
self,
acnt0: pv.Account,
new_v_option_ctrt_activated: pv.VOptionCtrt,
) -> pv.VOptionCtrt:
"""
new_v_option_ctrt_activated_and_minted is the fixture that
- registers a new V Option contract.
- activate it
- mint option tokens
Args:
acnt0 (pv.Account): The account of nonce 0.
new_v_option_ctrt_activated (pv.VOptionCtrt): The fixture that registers a new V Option contract & activates it.
Returns:
pv.VOptionCtrt: The VOptionCtrt instance.
"""
oc = new_v_option_ctrt_activated
api = acnt0.api
resp = await oc.mint(
by=acnt0,
amount=self.MINT_AMOUNT,
)
await cft.wait_for_block()
await cft.assert_tx_success(api, resp["id"])
return oc
async def test_register(
self,
acnt0: pv.Account,
new_v_option_ctrt: pv.VOptionCtrt,
) -> pv.VOptionCtrt:
"""
test_register tests the method register.
Args:
acnt0 (pv.Account): The account of nonce 0.
new_v_option_ctrt (pv.VOptionCtrt): The fixture that registers a new V Option contract.
Returns:
pv.VOptionCtrt: The VOptionCtrt instance.
"""
oc = new_v_option_ctrt
assert (await oc.maker) == acnt0.addr
return oc
async def test_activate(self, new_v_option_ctrt_activated: pv.VOptionCtrt) -> None:
"""
test_activate tests the method activate.
Args:
new_v_option_ctrt_activated (pv.VOptionCtrt): The fixture that registers a new V Option contract and activates it.
"""
oc = new_v_option_ctrt_activated
assert (await oc.max_issue_num).data == self.MAX_ISSUE_AMOUNT
async def test_mint(
self, acnt0: pv.Account, new_v_option_ctrt_activated_and_minted: pv.VOptionCtrt
) -> None:
"""
test_mint tests the method mint.
Args:
acnt0 (pv.Account): The account of nonce 0.
new_v_option_ctrt_activated_and_minted (pv.VOptionCtrt): The fixture that registers a new V Option contract, activates it, and mints option tokens.
"""
oc = new_v_option_ctrt_activated_and_minted
assert (await oc.get_target_tok_bal(acnt0.addr.data)).data == (
self.MAX_ISSUE_AMOUNT - self.MINT_AMOUNT
)
async def test_unlock(
self, acnt0: pv.Account, new_v_option_ctrt_activated_and_minted: pv.VOptionCtrt
) -> None:
"""
test_unlock tests the method unlock.
Args:
acnt0 (pv.Account): The account of nonce 0.
new_v_option_ctrt_activated_and_minted (pv.VOptionCtrt): The fixture that registers a new V Option contract activated and minted.
"""
oc = new_v_option_ctrt_activated_and_minted
api = acnt0.api
resp = await oc.unlock(by=acnt0, amount=self.UNLOCK_AMOUNT)
await cft.wait_for_block()
unlock_tx_id = resp["id"]
await cft.assert_tx_success(api, unlock_tx_id)
assert (
await oc.get_target_tok_bal(acnt0.addr.data)
).data == self.MAX_ISSUE_AMOUNT - self.MINT_AMOUNT + self.UNLOCK_AMOUNT
async def test_execute_and_collect(
self, acnt0: pv.Account, new_v_option_ctrt_activated_and_minted: pv.VOptionCtrt
) -> None:
"""
test_execute_and_collect tests the method execute and collect.
Args:
new_v_option_ctrt_activated_and_minted (pv.VOptionCtrt): The fixture that registers a new V Option contract activated and minted.
"""
oc = new_v_option_ctrt_activated_and_minted
api = acnt0.api
exec_amount = 10
target_tok_bal_init = await oc.get_target_tok_bal(acnt0.addr.data)
await asyncio.sleep(cft.AVG_BLOCK_DELAY * 6)
exe_tx = await oc.execute(acnt0, exec_amount)
await cft.wait_for_block()
exe_tx_id = exe_tx["id"]
await cft.assert_tx_success(api, exe_tx_id)
target_tok_bal_exec = await oc.get_target_tok_bal(acnt0.addr.data)
assert (target_tok_bal_exec.data - target_tok_bal_init.data) == exec_amount
await asyncio.sleep(cft.AVG_BLOCK_DELAY * 5)
col_tx = await oc.collect(acnt0, 10)
await cft.wait_for_block()
col_tx_id = col_tx["id"]
await cft.assert_tx_success(api, col_tx_id)
target_tok_bal_col = await oc.get_target_tok_bal(acnt0.addr.data)
assert (target_tok_bal_col.data - target_tok_bal_exec.data) == 9
@pytest.mark.whole
async def test_as_whole(
self,
acnt0: pv.Account,
new_v_option_ctrt_activated_and_minted: pv.VOptionCtrt,
) -> None:
"""
test_as_whole tests methods of VOptionVtrt as a whole so as to reduce resource consumption.
Args:
acnt0 (pv.Account): The account of nonce 0.
new_v_option_ctrt_activated_and_minted (pv.VOptionCtrt): The fixture that registers a new V Option contract, activates it, and mints.
"""
oc = new_v_option_ctrt_activated_and_minted
await self.test_register(acnt0, oc)
await self.test_activate(oc)
await self.test_mint(acnt0, oc)
await self.test_unlock(acnt0, oc)
await self.test_execute_and_collect(acnt0, oc)
``` |
{
"source": "josepic99/mesas",
"score": 3
} |
#### File: Flockers/flockers/boid.py
```python
import numpy as np
from mesa import Agent
class Boid(Agent):
'''
A Boid-style flocker agent.
The agent follows three behaviors to flock:
- Cohesion: steering towards neighboring agents.
- Separation: avoiding getting too close to any other agent.
- Alignment: try to fly in the same direction as the neighbors.
Boids have a vision that defines the radius in which they look for their
neighbors to flock with. Their speed (a scalar) and heading (a unit vector)
define their movement. Separation is their desired minimum distance from
any other Boid.
'''
def __init__(self, unique_id, model, pos, speed=5, heading=None,
vision=5, separation=1):
'''
Create a new Boid flocker agent.
Args:
unique_id: Unique agent identifyer.
pos: Starting position
speed: Distance to move per step.
heading: numpy vector for the Boid's direction of movement.
vision: Radius to look around for nearby Boids.
separation: Minimum distance to maintain from other Boids.
'''
super().__init__(unique_id, model)
self.pos = pos
self.speed = speed
if heading is not None:
self.heading = heading
else:
self.heading = np.random.random(2)
self.heading /= np.linalg.norm(self.heading)
self.vision = vision
self.separation = separation
def cohere(self, neighbors):
'''
Return the vector toward the center of mass of the local neighbors.
'''
center = np.array([0.0, 0.0])
for neighbor in neighbors:
center += np.array(neighbor.pos)
return center / len(neighbors)
def separate(self, neighbors):
'''
Return a vector away from any neighbors closer than separation dist.
'''
my_pos = np.array(self.pos)
sep_vector = np.array([0, 0])
for neighbor in neighbors:
their_pos = np.array(neighbor.pos)
dist = np.linalg.norm(my_pos - their_pos)
if dist < self.separation:
sep_vector -= np.int64(their_pos - my_pos)
return sep_vector
def match_heading(self, neighbors):
'''
Return a vector of the neighbors' average heading.
'''
mean_heading = np.array([0, 0])
for neighbor in neighbors:
mean_heading += np.int64(neighbor.heading)
return mean_heading / len(neighbors)
def step(self):
'''
Get the Boid's neighbors, compute the new vector, and move accordingly.
'''
neighbors = self.model.space.get_neighbors(self.pos, self.vision, False)
if len(neighbors) > 0:
cohere_vector = self.cohere(neighbors)
separate_vector = self.separate(neighbors)
match_heading_vector = self.match_heading(neighbors)
self.heading += (cohere_vector +
separate_vector +
match_heading_vector)
self.heading /= np.linalg.norm(self.heading)
new_pos = np.array(self.pos) + self.heading * self.speed
new_x, new_y = new_pos
self.model.space.move_agent(self, (new_x, new_y))
``` |
{
"source": "josepilco7501/TECSUP-DAE-2021-2",
"score": 2
} |
#### File: lab02/encuesta/views.py
```python
from django.shortcuts import render
# Create your views here.
def index(request):
context = {
'titulo':"Formulario",
}
return render(request, 'encuesta/formulario.html',context)
def enviar(request):
context = {
'titulo' : "Respuesta",
'nombre' : request.POST['nombre'],
'clave' : request.POST['password'],
'educacion' : request.POST['educacion'],
'nacionalidad' : request.POST['nacionalidad'],
'idiomas' : request.POST.getlist('idiomas'),
'correo' : request.POST['email'],
'website' : request.POST['sitioweb'],
}
return render(request, 'encuesta/respuesta.html',context)
```
#### File: lab02/operaciones/views.py
```python
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def calculadora(request):
context = {
'titulo' : "Ingrese los numeros",
}
return render(request,'operaciones/formulario.html',context)
def resultado(request):
a=request.POST['numeroa']
b=request.POST['numerob']
if request.POST['operacion'] == 'suma':
resultado= int(a)+int(b)
if request.POST['operacion'] == 'resta':
resultado= int(a)-int(b)
if request.POST['operacion'] == 'multiplicacion':
resultado= int(a)*int(b)
context = {
'operacion' : request.POST['operacion'],
'numeroa' : request.POST['numeroa'],
'numerob' : request.POST['numerob'],
'titulo' : "Resultado de la operación",
'resultado' :resultado
}
return render(request,'operaciones/resultados.html',context)
def datosCilindro(request):
context = {
'titulo' : "CÁLCULO DEL VOLUMEN DE UN CILINDRO "
}
return render(request,'operaciones/formCilindro.html',context)
def resultVolumen(request):
diametro=request.POST['diametro']
altura=request.POST['altura']
radio=float(diametro)/2
volumen=(3.1416*(radio)**2)*float(altura)
context = {
'titulo' : 'VOLUMEN DEL CILINDRO',
'volumen' : volumen
}
return render(request,'operaciones/resultVolumen.html',context)
``` |
{
"source": "JosepLeder/CLKRobovat",
"score": 2
} |
#### File: policies/cnn_grasp_policy/CornellDataset.py
```python
import os
import glob
import torch
import numpy as np
from torch.utils.data import Dataset
from torchvision.transforms import ToTensor
from robovat.utils.grasp_rect import GraspRectangle, rectangles2image
from imageio import imread
def normalize(img):
minimg = np.min(img)
maximg = np.max(img)
return (img - minimg) * 2 / (maximg - minimg) - 1
class CornellDataset(Dataset):
def __init__(self, data_path='/home/josep/e/orange/cornell/') -> None:
graspf = glob.glob(os.path.join(data_path, '*', 'pcd*cpos.txt'))
depthf = [f.replace('cpos.txt', 'd.tiff') for f in graspf]
self.grs = []
self.depth = []
self.output_shape = [424, 512]
for g, d in zip(graspf, depthf):
grasp_rects = GraspRectangle.load_from_cornell_file(g, self.output_shape)
self.grs.append(grasp_rects)
depth = np.array(imread(d))
depth = depth[:self.output_shape[0], :self.output_shape[1]]
new_shape = (1, depth.shape[0], depth.shape[1])
self.depth.append(depth.reshape(new_shape))
def __len__(self):
return len(self.depth)
def __getitem__(self, idx):
return torch.tensor(normalize(self.depth[idx]), dtype=torch.float32), \
rectangles2image(self.grs[idx], self.depth[idx].shape)
```
#### File: robovat/policies/grasp_policy.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import numpy as np
from robovat.policies import image_grasp_sampler
from robovat.policies import policy
from robovat.utils.yaml_config import YamlConfig
class AntipodalGrasp4DofPolicy(policy.Policy):
"""Antipodal grasp 4-DoF policy."""
def __init__(self,
env,
config=None):
"""Initialize.
Args:
env: Environment.
config: Policy configuration.
"""
super(AntipodalGrasp4DofPolicy, self).__init__(env, config)
config = self.config
self._sampler = image_grasp_sampler.AntipodalDepthImageGraspSampler(
friction_coef=config.SAMPLER.FRICTION_COEF,
depth_grad_thresh=config.SAMPLER.DEPTH_GRAD_THRESH,
depth_grad_gaussian_sigma=config.SAMPLER.DEPTH_GRAD_GAUSSIAN_SIGMA,
downsample_rate=config.SAMPLER.DOWNSAMPLE_RATE,
max_rejection_samples=config.SAMPLER.MAX_REJECTION_SAMPLES,
crop=config.SAMPLER.CROP,
min_dist_from_boundary=config.SAMPLER.MIN_DIST_FROM_BOUNDARY,
min_grasp_dist=config.SAMPLER.MIN_GRASP_DIST,
angle_dist_weight=config.SAMPLER.ANGLE_DIST_WEIGHT,
depth_samples_per_grasp=config.SAMPLER.DEPTH_SAMPLES_PER_GRASP,
min_depth_offset=config.SAMPLER.MIN_DEPTH_OFFSET,
max_depth_offset=config.SAMPLER.MAX_DEPTH_OFFSET,
depth_sample_window_height=(
config.SAMPLER.DEPTH_SAMPLE_WINDOW_HEIGHT),
depth_sample_window_width=(
config.SAMPLER.DEPTH_SAMPLE_WINDOW_WIDTH),
gripper_width=config.GRIPPER_WIDTH)
@property
def default_config(self):
"""Load the default configuration file."""
config_path = os.path.join('configs', 'policies',
'antipodal_grasp_4dof_policy.yaml')
assert os.path.exists(config_path), (
'Default configuration file %s does not exist' % (config_path)
)
return YamlConfig(config_path).as_easydict()
def _action(self, observation):
"""Implementation of action.
Args:
observation: The observation of the current step.
Returns:
action: The action of the current step.
"""
depth = observation['depth']
intrinsics = observation['intrinsics']
grasps = self._sampler.sample(depth, intrinsics, 1)
return np.squeeze(grasps, axis=0)
# TODO: initial grasp policy
class GraspSegPolicy(policy.Policy):
"""grasp 4-DoF policy from segmentation mask."""
def __init__(self,
env,
config=None):
"""Initialize.
Args:
env: Environment.
config: Policy configuration.
"""
super(GraspSegPolicy, self).__init__(env, config)
self.config = config
self.env = env
self.sampler = image_grasp_sampler.SegmentationGraspSampler(gripper_width=0.04)
@property
def default_config(self):
"""Load the default configuration file."""
config_path = os.path.join('configs', 'policies',
'antipodal_grasp_4dof_policy.yaml')
assert os.path.exists(config_path), (
'Default configuration file %s does not exist' % (config_path)
)
return YamlConfig(config_path).as_easydict()
def _action(self, observation):
"""Implementation of action.
Args:
observation: The observation of the current step.
Returns:
action: The action of the current step.
"""
rgb = observation['rgb']
depth = observation['depth']
camera = self.env.camera()
grasps = self.sampler.sample(rgb, depth, camera, 1)
print("="*50)
print(grasps)
print("="*50)
return np.squeeze(grasps, axis=0)
```
#### File: robovat/reward_fns/repeat_graspable_reward.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from robovat.reward_fns import reward_fn
from robovat.utils.logging import logger
class RepeatGraspReward(reward_fn.RewardFn):
"""Reward function of the environments."""
def __init__(self,
name,
end_effector_name,
graspable_names,
terminate_after_grasp=True,
streaming_length=1000):
"""Initialize.
Args:
name: Name of the reward.
end_effector_name: Name of the end effector.
graspable_names: Names of the graspable objects.
terminate_after_grasp: The episode will be terminated after a grasp
attemp if True.
streaming_length: The streaming length for keeping the history.
"""
self.name = name
self.end_effector_name = end_effector_name
self.graspable_names = graspable_names
self.terminate_after_grasp = terminate_after_grasp
self.streaming_length = streaming_length
self.env = None
self.end_effector = None
self.graspables = None
self.history = []
def on_episode_start(self):
"""Called at the start of each episode."""
self.end_effector = self.env.simulator.bodies[self.end_effector_name]
self.graspables = [self.env.simulator.bodies[name] for name in self.graspable_names]
def get_reward(self):
"""Returns the reward value of the current step.
Returns:
success: The success signal.
terminate_after_grasp: The termination signal.
"""
termination = (self.env._num_steps >= self.env.config.RESET.MAX_ACTIONS_PER_EPS) or \
(len(self.env.graspables) <= self.env.config.SIM.GRASPABLE.NUM // 2)
success = self.env.success
self._update_history(success)
success_rate = np.mean(self.history or [-1])
logger.debug('Grasp Success: %r, Success Rate %.3f',
success, success_rate)
return success, termination
def _update_history(self, success):
"""Update the reward history.
Args:
The success signal.
"""
self.history.append(success)
if len(self.history) > self.streaming_length:
self.history = self.history[-self.streaming_length:]
```
#### File: CLKRobovat/tools/run_env.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import ast
import os
import random
import socket
import uuid
from builtins import input
import numpy as np
import h5py
import _init_paths # NOQA
from robovat import envs
from robovat import policies
from robovat.io import hdf5_utils
from robovat.io.episode_generation import generate_episodes
from robovat.simulation.simulator import Simulator
from robovat.utils import time_utils
from robovat.utils.logging import logger
from robovat.utils.yaml_config import YamlConfig
def parse_args():
"""
Parse arguments.
Returns:
args: The parsed arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--env', dest='env', type=str, help='The environment.', required=True)
parser.add_argument('--policy', dest='policy', type=str, help='The policy.', default=None)
parser.add_argument('--env_config', dest='env_config', type=str, help='The configuration file for the environment.', default=None)
parser.add_argument('--policy_config', dest='policy_config', type=str, help='The configuration file for the policy.', default=None)
parser.add_argument('--config_bindings', dest='config_bindings', type=str, help='The configuration bindings.', default=None)
parser.add_argument('--use_simulator', dest='use_simulator', type=int, help='Run experiments in the simulation is it is True.', default=1)
parser.add_argument(
'--assets',
dest='assets_dir',
type=str,
help='The assets directory.',
default='./assets')
parser.add_argument(
'--output',
dest='output_dir',
type=str,
help='The output directory to save the episode history.',
default=None)
parser.add_argument(
'--num_steps',
dest='num_steps',
type=int,
help='Maximum number of time steps for each episode.',
default=None)
parser.add_argument(
'--num_episodes',
dest='num_episodes',
type=int,
help='Maximum number of episodes.',
default=None)
parser.add_argument(
'--num_episodes_per_file',
dest='num_episodes_per_file',
type=int,
help='The maximum number of episodes saved in each file.',
default=1000)
parser.add_argument(
'--debug',
dest='debug',
type=int,
help='True for debugging, False otherwise.',
default=0)
parser.add_argument(
'--worker_id',
dest='worker_id',
type=int,
help='The worker ID for running multiple simulations in parallel.',
default=0)
parser.add_argument(
'--seed',
dest='seed',
type=int,
help='None for random; any fixed integers for deterministic.',
default=None)
parser.add_argument(
'--pause',
dest='pause',
type=bool,
help='Whether to pause between episodes.',
default=False)
parser.add_argument(
'--timeout',
dest='timeout',
type=float,
help='Seconds of timeout for an episode.',
default=120)
args = parser.parse_args()
return args
def parse_config_files_and_bindings(args):
if args.env_config is None:
env_config = None
else:
env_config = YamlConfig(args.env_config).as_easydict()
if args.policy_config is None:
policy_config = None
else:
policy_config = YamlConfig(args.policy_config).as_easydict()
if args.config_bindings is not None:
parsed_bindings = ast.literal_eval(args.config_bindings)
logger.info('Config Bindings: %r', parsed_bindings)
env_config.update(parsed_bindings)
policy_config.update(parsed_bindings)
return env_config, policy_config
def main():
args = parse_args()
# Configuration.
env_config, policy_config = parse_config_files_and_bindings(args)
# Set the random seed.
if args.seed is not None:
random.seed(args.seed)
np.random.seed(args.seed)
# Simulator.
if args.use_simulator:
simulator = Simulator(worker_id=args.worker_id,
use_visualizer=bool(args.debug),
assets_dir=args.assets_dir)
else:
simulator = None
# Environment.
env_class = getattr(envs, args.env)
env = env_class(simulator=simulator,
config=env_config,
debug=args.debug)
# Policy.
policy_class = getattr(policies, args.policy)
policy = policy_class(env=env, config=policy_config)
# Output directory.
if args.output_dir is not None:
hostname = socket.gethostname()
hostname = hostname.split('.')[0]
output_dir = os.path.abspath(args.output_dir)
output_dir = os.path.join(output_dir, hostname, '%02d' % (args.key))
if not os.path.isdir(output_dir):
logger.info('Making output directory %s...', output_dir)
os.makedirs(output_dir)
# Generate and write episodes.
logger.info('Start running...')
# env.reset()
num_episodes_this_file = 0
for episode_index, episode in generate_episodes(
env,
policy,
num_steps=args.num_steps,
num_episodes=args.num_episodes,
timeout=args.timeout,
debug=args.debug):
if args.output_dir:
# Create a file for saving the episode data.
if num_episodes_this_file == 0:
timestamp = time_utils.get_timestamp_as_string()
filename = 'episodes_%s.hdf5' % (timestamp)
output_path = os.path.join(output_dir, filename)
logger.info('Created a new file %s...', output_path)
# Append the episode to the file.
logger.info('Saving episode %d to file %s (%d / %d)...',
episode_index,
output_path,
num_episodes_this_file,
args.num_episodes_per_file)
with h5py.File(output_path, 'a') as fout:
name = str(uuid.uuid4())
group = fout.create_group(name)
hdf5_utils.write_data_to_hdf5(group, episode)
num_episodes_this_file += 1
num_episodes_this_file %= args.num_episodes_per_file
if args.pause:
input('Press [Enter] to start a new episode.')
if __name__ == '__main__':
main()
``` |
{
"source": "JosepLeder/Lekai-Reinforcement-Learning-Notes",
"score": 3
} |
#### File: code/agent/DDQN.py
```python
import gym
import math
import random
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from collections import namedtuple, deque
from itertools import count
from gym.wrappers.monitoring.video_recorder import VideoRecorder
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
# set up matplotlib
is_ipython = 'inline' in matplotlib.get_backend()
if is_ipython:
from IPython import display
plt.ion()
# set up device
device = torch.device("cpu")
# define transition
Transition = namedtuple('Transition',
('state', 'action', 'next_state', 'reward'))
steps_cnt = 0
episode_durations = []
evaluate_performance = []
# Create a queue to store tranitions and use it for experience replay
class ReplayMemory(object):
def __init__(self, capacity):
self.capacity = capacity
self.memory = deque(maxlen=capacity)
self.position = 0
def push(self, *args):
self.memory.append(Transition(*args))
def sample(self, batch_size):
return random.sample(self.memory, batch_size)
def __len__(self):
return len(self.memory)
class DQN(nn.Module):
# use simple MLP network as Q-Function
def __init__(self, inputs, outputs):
super(DQN, self).__init__()
self.l1 = nn.Linear(inputs, inputs * 24)
self.l2 = nn.Linear(inputs * 24, inputs * 24)
self.head = nn.Linear(inputs * 24, outputs)
def forward(self, x):
x = F.relu(self.l1(x))
x = F.relu(self.l2(x))
return self.head(x.view(x.size(0), -1))
class Agent(object):
def __init__(self, hp, env_name='CartPole-v1'):
# set up environment
self.env = gym.make(env_name)
self.env_name = env_name
# Get number of actions from gym action and state space
random_state = self.env.reset()
self.n_states = random_state.size
self.n_actions = self.env.action_space.n
# Create network to represent Q-Function
self.current_net = DQN(self.n_states, self.n_actions).to(device)
self.target_net = DQN(self.n_states, self.n_actions).to(device)
self.target_net.load_state_dict(self.current_net.state_dict())
self.target_net.eval()
self.optimizer = optim.Adam(self.current_net.parameters())
self.memory = ReplayMemory(hp.MEM_REPLAY_SIZE)
self.steps_cnt = 0
class HyperParameters(object):
def __init__(self, params):
self.MEM_REPLAY_SIZE = params['MEM_REPLAY_SIZE']
self.BATCH_SIZE = params['BATCH_SIZE']
self.GAMMA = params['GAMMA']
self.EPS_START = params['EPS_START']
self.EPS_END = params['EPS_END']
self.EPS_DECAY = params['EPS_DECAY']
self.EVALUATE_FREQUENCY = params['EVALUATE_FREQUENCY']
self.ALTER_TARGET_UPDATE_RATE = params['ALTER_TARGET_UPDATE_RATE']
self.MAX_EPISODES = params['MAX_EPISODES']
def state2tensor(state):
return torch.tensor(state, dtype=torch.float).unsqueeze(0).to(device)
# Using epsilon greedy policy to select an action
def select_action(state, agent, hp):
global steps_cnt
sample = random.random()
eps_threshold = hp.EPS_END + (hp.EPS_START - hp.EPS_END) * \
math.exp(-1. * steps_cnt / hp.EPS_DECAY)
steps_cnt += 1
if sample > eps_threshold:
with torch.no_grad():
# t.max(1) will return largest column value of each row.
# second column on max result is index of where max element was
# found, so we pick action with the larger expected reward.
return agent.current_net(state).max(1)[1].view(1, 1)
else:
return torch.tensor([[random.randrange(agent.n_actions)]], device=device, dtype=torch.long)
# Select the action with most rewards
def select_best_action(state, agent):
with torch.no_grad():
return agent.current_net(state).max(1)[1].view(1, 1)
def plot_durations(scores_to_win, hp):
plt.figure(2)
plt.clf()
durations_t = torch.tensor(episode_durations, dtype=torch.float)
scores_to_win = torch.tensor([scores_to_win] * len(episode_durations))
plt.title('Training...')
plt.xlabel('Episode')
plt.ylabel('Duration')
plt.plot(durations_t.numpy(), label="Explore Reward")
plt.plot([x * hp.EVALUATE_FREQUENCY for x in range(0, len(evaluate_performance))],
evaluate_performance, label="Optimal Score")
plt.plot(scores_to_win.numpy(), label="Target Score")
# Compute and plot current 10 episodes averages
if len(durations_t) >= 10:
means = durations_t.unfold(0, 10, 1).mean(1).view(-1)
means = torch.cat((durations_t[0:9], means))
plt.plot(means.numpy(), label="Average Explore Reward")
plt.legend()
plt.pause(0.001) # pause a bit so that plots are updated
if is_ipython:
display.clear_output(wait=True)
display.display(plt.gcf())
def optimize_model(agent, hp):
if len(agent.memory) < hp.BATCH_SIZE:
return
transitions = agent.memory.sample(hp.BATCH_SIZE)
# Transpose the batch (see https://stackoverflow.com/a/19343/3343043 for
# detailed explanation). This converts batch-array of Transitions
# to Transition of batch-arrays.
batch = Transition(*zip(*transitions))
# Compute a mask of non-final states and concatenate the batch elements
# (a final state would've been the one after which simulation ended)
non_final_mask = torch.tensor(tuple(map(lambda s: s is not None,
batch.next_state)), device=device, dtype=torch.bool)
non_final_next_states = torch.cat([s for s in batch.next_state
if s is not None])
state_batch = torch.cat(batch.state)
action_batch = torch.cat(batch.action)
reward_batch = torch.cat(batch.reward)
# Compute Q(s_t, a) - the model computes Q(s_t), then we select the
# columns of actions taken. These are the actions which would've been taken
# for each batch state according to current_net
state_action_values = agent.current_net(state_batch).gather(1, action_batch)
# Compute argmax(a{t+1})[Q(s_{t+1}, a_{t+1})] for all next states.
# Expected values of actions for non_final_next_states are computed based
# on the "older" target_net; selecting their best reward with max(1)[0].
next_state_values = torch.zeros(hp.BATCH_SIZE, device=device)
next_state_values[non_final_mask] = agent.target_net(non_final_next_states).max(1)[0].detach()
# Compute the expected Q values
expected_state_action_values = (next_state_values * hp.GAMMA) + reward_batch
# Compute MSE loss
loss = F.mse_loss(state_action_values, expected_state_action_values.unsqueeze(1))
# Optimize the model
agent.optimizer.zero_grad()
loss.backward()
# clip the gradient to avoid gradient gradient explosion
for param in agent.current_net.parameters():
param.grad.data.clamp_(-1, 1)
agent.optimizer.step()
def evaluate_model(agent, scores_to_win):
durations = []
for i_episode in range(1):
# Initialize the environment and state
state = agent.env.reset()
state = state2tensor(state)
total_reward = 0
for t in count():
# Select the action with the most rewards
action = select_best_action(state, agent=agent)
next_state, reward, done, _ = agent.env.step(action.item())
total_reward += reward
if done:
next_state = None
else:
next_state = state2tensor(next_state)
state = next_state
if done or t + 1 >= 2000:
durations.append(total_reward)
break
mean = np.mean(durations)
evaluate_performance.append(mean)
if mean > scores_to_win:
print("Solved! Mean scores: {}".format(mean))
return True
else:
print("Unsolved! Mean scores: {}".format(mean))
return False
def get_scores_to_win(agent):
try:
scores_to_win = agent.env.unwrapped.reward_threshold
except AttributeError:
try:
scores_to_win = agent.env.spec.reward_threshold
except AttributeError:
scores_to_win = agent.env.unwrapped.spec.reward_threshold
return scores_to_win if scores_to_win is not None else -10
def get_reward(agent, state, next_state, reward, done, total_reward):
if agent.env_name in ['CartPole-v0', 'CartPole-v1', 'Acrobot-v1']:
return reward
elif agent.env_name == 'MountainCar-v0':
if done:
return 210 + total_reward
else:
return abs(next_state[0] - state[0])[0]
else:
return 0
def save_video(agent, video_path):
num_episodes = 0
# video_recorder = None
video_recorder = VideoRecorder(
agent.env, video_path, enabled=video_path is not None)
state = agent.env.reset()
state = state2tensor(state)
for t in count():
agent.env.unwrapped.render()
video_recorder.capture_frame()
action = select_best_action(state=state, agent=agent)
next_state, rew, done, info = agent.env.step(action.item())
next_state = state2tensor(next_state)
state = next_state
if done:
# save video of first episode
print("Saved video.")
video_recorder.close()
video_recorder.enabled = False
break
def train_model(params, env='CartPole-v1'):
hp = HyperParameters(params)
agent = Agent(hp, env_name=env)
keys = agent.current_net.state_dict().keys()
scores_to_win = get_scores_to_win(agent=agent)
for i_episode in range(hp.MAX_EPISODES):
# Initialize the environment and state
state = agent.env.reset()
state = state2tensor(state)
total_reward = 0
for t in count():
# Select and perform an action
action = select_action(state, agent, hp)
next_state, reward, done, _ = agent.env.step(action.item())
reward = get_reward(agent, state, next_state, reward, done, -t)
total_reward += reward
reward = torch.tensor([reward], device=device)
if done:
next_state = None
else:
next_state = state2tensor(next_state)
# Store the transition in memory
agent.memory.push(state, action, next_state, reward)
# Move to the next state
state = next_state
# Perform one step of the optimization (on the target network)
optimize_model(agent=agent, hp=hp)
# Update the target network using alternative target network method
# phi = tau * phi + (1 - tau) * phi_updated
target_state = agent.target_net.state_dict()
policy_state = agent.current_net.state_dict()
for key in keys:
target_state[key] = hp.ALTER_TARGET_UPDATE_RATE * target_state[key] + \
(1 - hp.ALTER_TARGET_UPDATE_RATE) * policy_state[key]
agent.target_net.load_state_dict(target_state)
if done:
episode_durations.append(total_reward)
plot_durations(scores_to_win, hp=hp)
break
if i_episode % hp.EVALUATE_FREQUENCY == 0 and evaluate_model(agent=agent, scores_to_win=scores_to_win):
print("Train finished after {} episodes!".format(i_episode + 1))
break
plot_durations(scores_to_win, hp=hp)
save_video(agent=agent, video_path="video/" + "DDQN_" + env + ".mp4")
agent.env.render()
agent.env.close()
plt.ioff()
plt.show()
if __name__ == '__main__':
train_model({'MEM_REPLAY_SIZE': 150000,
'BATCH_SIZE': 128,
'GAMMA': 0.999,
'EPS_START': 0.9,
'EPS_END': 0.08,
'EPS_DECAY': 200,
'EVALUATE_FREQUENCY': 20,
'ALTER_TARGET_UPDATE_RATE': 0.995,
'MAX_EPISODES': 1000})
```
#### File: Lekai-Reinforcement-Learning-Notes/code/distributions.py
```python
import torch
import torch.nn as nn
from utils import init
class DiagGaussianDistribution(nn.Module):
def __init__(self, num_inputs, num_outputs):
super(DiagGaussianDistribution, self).__init__()
init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0))
self.fc_mean = init_(nn.Linear(num_inputs, num_outputs))
self._bias = nn.Parameter(torch.zeros(num_outputs).unsqueeze(1))
def forward(self, x):
action_mean = self.fc_mean(x)
# An ugly hack for my KFAC implementation.
zeros = torch.zeros(action_mean.size())
if zeros.dim() == 2:
bias = self._bias.t().view(1, -1)
else:
bias = self._bias.t().view(1, -1, 1, 1)
action_logstd = zeros + bias
return FixedNormal(action_mean, action_logstd.exp())
# Normal
class FixedNormal(torch.distributions.Normal):
def log_probs(self, actions):
return super().log_prob(actions).sum(-1, keepdim=True)
def entrop(self):
return super.entropy().sum(-1)
def mode(self):
return self.mean
``` |
{
"source": "JosepLeder/RL-Graph-Matching",
"score": 3
} |
#### File: agent/nets/GraphConvNet.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_scatter import scatter_mean
from torch.nn import Linear, ReLU
from torch_geometric.nn import GraphConv
class GraphConvNet(nn.Module):
def __init__(self, n_feat, n_hid, n_out):
super(GraphConvNet).__init__()
self.conv1 = GraphConv(n_feat, n_hid)
self.conv2 = GraphConv(n_hid, n_hid * 2)
self.conv3 = GraphConv(n_hid * 2, n_out)
def forward(self, data):
data.x = F.elu(self.conv1(data.x, data.edge_index))
data.x = F.elu(self.conv2(data.x, data.edge_index))
data.x = F.elu(self.conv3(data.x, data.edge_index))
x_1 = scatter_mean(data.x, data.batch, dim=0)
x = x_1
return x
class DoubleGraphConvNet(nn.Module):
def __init__(self, graph, subgraph, point):
super(DoubleGraphConvNet).__init__()
self.graph_conv = GraphConvNet(graph.n_feat, graph.n_feat * 2, graph.n_feat * 3)
self.subgraph_conv = GraphConvNet(subgraph.n_feat, subgraph.n_feat * 2, subgraph.n_feat * 3)
self.l1 = Linear(graph.n_feat * 3 + subgraph.n_feat * 3 + point, 600)
self.l2 = Linear(600, 256)
self.l3 = Linear(256, graph.n_feat)
def forward(self, graph, subgraph, point):
x1 = self.graph_conv(graph)
x2 = self.subgraph_conv(subgraph)
x = torch.cat([x1, x2, point])
x = ReLU(self.l1(x))
x = ReLU(self.l2(x))
x = self.l3(x)
return x
``` |
{
"source": "josepmartorell/File-Duplicate-Detector",
"score": 3
} |
#### File: josepmartorell/File-Duplicate-Detector/FileManager.py
```python
import os
import hashlib
import utils
from os import strerror
class Main:
def __init__(self, extension):
self.extension = extension
self.count = 0
self.my_dict = {}
self.the_file = ()
self.hash = ''
if self.extension != "":
self.hash = ' sha256'
else:
self.hash = ' md5'
def detect(self):
try:
# walk through de directories searching for extension's files
for (dirname, dirs, files) in os.walk('../../Downloads/'):
print('hashing files in', ''.join(dirname).strip('./'), 'dir ...\n')
utils.progress()
print("\t")
for filename in files:
the_file = os.path.join(dirname, filename)
if self.extension == '':
h = hashlib.md5()
else:
h = hashlib.sha256()
with open(the_file, 'rb') as afile:
buf = afile.read()
h.update(buf)
thehash = h.hexdigest()
# in case of duplication files it fills the tuple of
# the dictionary or if not is the case simply a value
if filename.endswith(self.extension):
if thehash in self.my_dict:
self.my_dict[thehash] += (the_file,)
else:
self.my_dict[thehash] = (the_file,)
check = utils.is_empty(self.my_dict)
if check is not True:
# print hashes and values
for key in self.my_dict.keys():
snap = ' '.join(self.my_dict[key]).lstrip('./Downloads')
print('File: ', snap)
print('Hash: ', key, self.hash)
else:
print('there are no files with the ' + utils.ext + ' extension')
except IOError as e:
print("I/O error occurred: ", strerror(e.errno))
if __name__ == '__main__':
obj = Main(utils.ext)
obj.detect()
utils.unzip()
utils.remove()
``` |
{
"source": "josepmartorell/ScratchPad",
"score": 3
} |
#### File: ScratchPad/flaskproject/views.py
```python
from datetime import datetime
from flaskproject import app
from flask import (request, render_template, url_for, redirect)
from flaskproject.models import Note
from flaskproject.forms import NoteForm
from flaskproject import db
@app.route('/')
@app.route('/home')
def entry_point():
"""Renders the home page."""
notes = Note.query.filter_by(is_deleted=False).all()
return render_template(
'index.html',
title='Note List',
year=datetime.now().year,
notes=notes,
)
@app.route('/new-note', methods=['GET', 'POST'])
def create_note():
if request.method == 'POST':
form = NoteForm(request.form)
if form.validate():
note = Note(form.subject.data, form.detail.data)
db.session.add(note)
db.session.commit()
return redirect(url_for('entry_point'))
else:
form = NoteForm()
return render_template(
'create_note.html',
title='Create New Note',
year=datetime.now().year,
form=form
)
@app.route('/note/<int:id>/detail/')
def note_detail(id):
note = Note.query.get(id)
return render_template(
'note_detail.html',
title='Note Detail',
year=datetime.now().year,
note =note
)
@app.route('/note/<int:id>/edit/', methods=['GET', 'POST'])
def edit_note(id):
if request.method == 'POST':
note = Note.query.get(id)
form = NoteForm(request.form)
if form.validate():
note.edit(form.subject.data, form.detail.data)
db.session.commit()
return redirect(url_for('entry_point'))
else:
note = Note.query.get(id)
form = NoteForm()
form.subject.data = note.subject
form.detail.data = note.detail
return render_template(
'edit_note.html',
title='Edit Note',
year=datetime.now().year,
form=form,
note_id=note.id,
)
@app.route('/note/<int:id>/delete/')
def delete_note(id):
note = Note.query.get(id)
note.delete()
db.session.commit()
return redirect(url_for('entry_point'))
``` |
{
"source": "josepmartorell/Selenium_Beautifulsoup4",
"score": 2
} |
#### File: Selenium_Beautifulsoup4/solole/main.py
```python
import json
import time
import operator
from time import sleep
from email import encoders
from bs4 import BeautifulSoup
from selenium import webdriver
# todo: -> Check driver version periodically
# check = input("\nPress enter to check Selenium driver version...")
# os.system('python -c "import selenium; print(selenium.__version__)"')
# As you are using Selenium 3.8.0 you have to use GeckoDriver mandatory. But again as you are using Firefox v46.0 you
# have to set the capability marionette to False through DesiredCapabilities() as follows REF:
# https://stackoverflow.com/questions/47782650/selenium-common-exceptions-sessionnotcreatedexception-message-unable-to-find-a/47785513
# from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
# cap = DesiredCapabilities().FIREFOX
# cap["marionette"] = False
from selenium.common.exceptions import TimeoutException, NoSuchElementException
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.by import By
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email.mime.text import MIMEText
from openpyxl.styles import PatternFill, Border, Side, Font
from openpyxl.styles import Alignment, Protection
from openpyxl import load_workbook
from openpyxl import Workbook
from os import remove
import target as t
import data as d
import datetime
import requests
import smtplib
import shutil
import ssl
import os
class App:
def __init__(self, keys='../../../Documents/keys.json', target_city='new york', depart_m='2', depart_w='3',
depart_d='1', return_m='2', return_w='3', return_d='7', cell_city='New York', cell_cc='US',
path='../../../Booking'): # Change this to your Target details and desired booking path
self.keys = keys
self.target_city = target_city
self.depart_m = depart_m
self.depart_w = depart_w
self.depart_d = depart_d
self.return_m = return_m
self.return_w = return_w
self.return_d = return_d
self.cell_city = cell_city
self.cell_cc = cell_cc
self.path = path
# self.driver = webdriver.Firefox(capabilities=cap, executable_path='/usr/local/bin/geckodriver') # Change
# this to your FirefoxDriver path. todo: the expresion "executable_path=' was missed in the original version
# uncapable of locating the driver!!!!
self.driver = webdriver.Firefox(
executable_path='/usr/local/bin/geckodriver') # Change this to your FirefoxDriver path.
self.error = False
self.timeout = 30
self.main_url = 'https://b2b.solole.es'
self.all_positions = []
self.all_hotels = []
self.all_addresses = []
self.all_prices = []
self.euro_symbol = '€'
self.display = []
self.cheap = []
self.index = ""
self.data = {}
self.shift = 1
self.fork = 0
self.switch = 1
self.position = 0
self.driver.get(self.main_url)
self.log_in()
if self.error is False:
self.search_target_profile()
if self.error is False:
self.scroll_down()
if self.error is False:
if not os.path.exists(path):
os.mkdir(path)
self.file_manager()
if self.switch != 1:
sleep(10)
self.driver.close()
def log_in(self, ):
try:
with open(self.keys, 'r') as a:
keys_dict = json.loads(a.read())
self.driver.implicitly_wait(10)
print('\nLogging in with username and password ...')
user_name_input = self.driver.find_element_by_xpath('//input[@placeholder="<NAME>"]')
user_name_input.send_keys(keys_dict['username'][2])
sleep(1)
password_input = self.driver.find_element_by_xpath('//input[@placeholder="<PASSWORD>aseña"]')
password_input.send_keys(keys_dict['password'][2])
sleep(1)
password_input.submit()
sleep(1)
a.close()
# self.close_settings_window_if_there()
except Exception as e:
print('Some exception occurred while trying to find username or password field\n', e)
self.error = True
def search_target_profile(self):
try:
print("Manipulating search engine ...")
search_bar = self.driver.find_element_by_xpath('//*[@id="hotelzonePred"]')
search_bar.send_keys(self.target_city)
# target_profile_url = self.main_url + '/' + self.target_city + '/'
# self.driver.get(target_profile_url)
sleep(1)
# todo: accessing a drop-down menu item directly with xpath
# element = self.driver.find_element_by_xpath('//iboosy-hotelzone/div[2]/div/button[2]/div')
# element.click()
# todo: accessing a drop-down menu item by position within the list
# https://selenium-python.readthedocs.io/navigating.html#interacting-with-the-page
all_options = self.driver.find_elements_by_class_name('dropdown-item')
all_options[0].click()
sleep(1)
self.driver.find_element_by_css_selector(
'div.w-50:nth-child(1) > div:nth-child(2) > div:nth-child(1)').click()
# todo: Within <div class = "ngb-dp-week ..."> the seven days of that week are stored each in a <div
# class = "ngb-dp-day ...">. Secondary click on the inspector on the day that interests you and copy the
# css selector, which you will use to click on in the calendar picker
self.driver.find_element_by_css_selector('div.ngb-dp-month:nth-child( ' + self.depart_m + ' ) > '
'ngb-datepicker-month-view:nth-child(1) > div:nth-child( ' + self.depart_w + ' ) > '
'div:nth-child( ' + self.depart_d + ' )').click()
self.driver.find_element_by_css_selector('div.ngb-dp-month:nth-child( ' + self.return_m + ' ) > '
'ngb-datepicker-month-view:nth-child(1) > div:nth-child( ' + self.return_w + ' ) > '
'div:nth-child( ' + self.return_d + ' )').click()
user_name_input = self.driver.find_element_by_xpath('//*[@id="nationalityPred"]')
user_name_input.clear()
user_name_input.send_keys('España')
sleep(1)
# todo: accessing a drop-down menu item directly with xpath
element = self.driver.find_element_by_xpath(
'//div[3]/iboosy-nationalities/div/div/ngb-typeahead-window/button/div/span[2]')
element.click()
login_button = self.driver.find_element_by_xpath('//*[@id="searchbtn"]')
# instead of submit it works with click
print('Loading page ...')
login_button.click()
sleep(1)
except Exception as e:
self.error = True
print('Could not find search bar\n', e)
def scroll_down(self):
self.driver.implicitly_wait(20)
try:
# self.driver.execute_script('window.scrollTo(0, document.body.scrollHeight);') # fixme: scroll
# todo REF: https://stackoverflow.com/questions/48006078/how-to-scroll-down-in-python-selenium-step-by-step
# FIXME 1: two ways to scroll down,
# 1) go down to the bottom of the page at once.
# self.driver.execute_script('window.scrollTo(0, document.body.scrollHeight);') # fixme: scroll
# FIXME 2:
# 2) Descend from item to item to the bottom of the page.
# in this example and item is the text of the button "See options":
read_mores = self.driver.find_elements_by_xpath('//div[text()="Precio desde"]')
screen = 0
for read_more in read_mores:
if screen == 4:
print('Scrolling page ...')
self.driver.execute_script("arguments[0].scrollIntoView();", read_more)
screen += 1
# read_more.click()
try:
soup = BeautifulSoup(self.driver.page_source, 'lxml') # todo: bs4
hotel_list = soup.find_all('div', {'class': 'row result-option'})
# fixme: name mechanism:
for i, hotel in enumerate(hotel_list):
self.all_positions.append(i + 1)
hotel_name = hotel.find('span', {'_ngcontent-c18': ""}).getText()
hotel_name = ' '.join(hotel_name.split())
# print("%d - %s" % (i + 1, hotel_name))
self.all_hotels.append(hotel_name)
except IOError as e:
print("I/O error occurred: ", os.strerror(e.errno))
print("Error loading the hotels ")
pass
try:
soup = BeautifulSoup(self.driver.page_source, 'lxml') # todo: bs4
address_list = soup.find_all('div', {'class': 'address'})
# fixme: address mechanism:
for i, address in enumerate(address_list):
hotel_address = address.find('span', {'_ngcontent-c18': ""}).getText()
hotel_address = ' '.join(hotel_address.split())
self.all_addresses.append(hotel_address)
print("Scraping page ...")
except IOError as e:
print("I/O error occurred: ", os.strerror(e.errno))
print("Error loading addresses ")
pass
try:
soup = BeautifulSoup(self.driver.page_source, 'lxml') # todo: bs4
price_list = soup.find_all('div', {'class': 'text-main-light prices'})
# fixme: price mechanism:
for i, price in enumerate(price_list):
hotel_price = price.find('span', {'_ngcontent-c18': ""}).getText().replace('€', '')
hotel_price = ' '.join(hotel_price.split())
if len(hotel_price) == 5:
hotel_price = " " + hotel_price
if len(hotel_price) == 6:
hotel_price = " " + hotel_price
if len(hotel_price) == 7:
hotel_price = " " + hotel_price
if len(hotel_price) == 8:
hotel_price = "" + hotel_price
self.all_prices.append(hotel_price)
except IOError as e:
print("I/O error occurred: ", os.strerror(e.errno))
print("Error loading prices ")
pass
print("\n\tSnapshoot:\n")
# display list
list = zip(self.all_prices, self.all_hotels, self.all_addresses)
for i, (j, k, v) in enumerate(list):
if len(j) == 5:
j = " " + j
if len(j) == 6:
j = " " + j
if len(j) == 7:
j = " " + j
if len(j) == 8:
j = "" + j
if i < 9:
print(" %d - %s %s %s %s %s" % (i + 1, j, self.euro_symbol, k, " - ", v))
else:
print("%d - %s %s %s %s %s" % (i + 1, j, self.euro_symbol, k, " - ", v))
print("\n\tRanking:\n")
# float cast
new_prices = []
for element in self.all_prices:
rank = float(element)
new_prices.append(rank)
# final list
display_list = zip(self.all_positions, self.all_hotels, new_prices, self.all_addresses)
ranking = sorted(display_list, key=operator.itemgetter(2))
for j, k, v, w in ranking:
if v < 100.00:
print(" ", "{0:.2f}".format(v), k)
if 99.00 < v < 1000.00:
print(" ", "{0:.2f}".format(v), k)
if 999.00 < v < 10000.00:
print(" ", "{0:.2f}".format(v), k)
if v > 9999.00:
print("", "{0:.2f}".format(v), k)
self.display = display_list
self.data = ranking
self.cheap = ranking[0]
print('\nCheapest reservations: ', self.cheap[1], self.cheap[2], self.euro_symbol)
for i, collation in enumerate(display_list):
if collation[1] == self.cheap[1]:
self.position = i
print('Pointing to the target button ', self.position + 1, ' ...')
# FIXME WARNING!!!! next line does not work with position -1 or just position! see it why...
# Coincidentally, the first is the cheapest, that is, index 1, the variable self.index starts with 0,
# therefore we must add 1. If we subtracted -1 or we did not add anything, it was out of range,
# and the spider did not I found the address.
self.index = str(self.position + 1)
if self.error is False:
self.target_button(self.index)
except NoSuchElementException:
print('Some error occurred while trying to scroll down')
self.error = True
def target_button(self, index):
if index != '1':
target_button = self.driver.find_element_by_xpath(
'//div[ ' + index + ' ]/div/div[1]/div[2]/div[2]/div[1]/div[2]/span')
self.driver.execute_script("arguments[0].scrollIntoView();", target_button)
# target_button.click()
else:
target_button = self.driver.find_element_by_xpath(
'//ng-component/div/div[1]/iboosy-navigation-bar/nav/div[2]/div[1]/a/div')
self.driver.execute_script("arguments[0].scrollIntoView();", target_button)
# target_button.click()
def file_manager(self, ):
f = open("trip_code.txt", "w+")
f.write("LM30")
f.close()
bookings_folder_path = os.path.join(self.path, 'bookings')
if not os.path.exists(bookings_folder_path):
os.mkdir(bookings_folder_path)
if self.error is False:
self.write_bookings_to_excel_file(bookings_folder_path, self.shift)
# if self.error is False:
# self.read_bookings_from_excel_file(self.path + '/bookings/bookings.xlsx')
return self.shift
def read_code(self):
global trip_code
f = open("trip_code.txt", "r")
if f.mode == 'r':
trip_code = f.read()
return trip_code
def write_code(self, input_code):
f = open("trip_code.txt", "w")
f.write(input_code)
f.close()
pass
def set_stylesheet(self, sheet, shift):
# snap style sheet:
if shift == 0:
# set title header:
header = ('Price', 'Retail', 'Profit', 'No', 'Hotel', 'Address')
sheet.cell(row=1, column=1).value = header[0]
sheet.cell(row=1, column=2).value = header[1]
sheet.cell(row=1, column=3).value = header[2]
sheet.cell(row=1, column=4).value = header[3]
sheet.cell(row=1, column=5).value = header[4]
sheet.cell(row=1, column=6).value = header[5]
# set column width:
sheet.column_dimensions['A'].width = 9
sheet.column_dimensions['B'].width = 9
sheet.column_dimensions['C'].width = 9
sheet.column_dimensions['D'].width = 4
sheet.column_dimensions['E'].width = 60
sheet.column_dimensions['F'].width = 50
# fixme: set number format: (only works in white sheets)
sheet.column_dimensions['A'].number_format = '#,##0.00'
sheet.column_dimensions['B'].number_format = '#,##0.00'
sheet.column_dimensions['C'].number_format = '#,##0.00'
# set bar title style:
for col_range in range(1, 7):
cell_title = sheet.cell(1, col_range)
cell_title.fill = PatternFill(
start_color="00c0c0c0", end_color="00c0c0c0", fill_type="solid")
cell_title = sheet.cell(1, col_range)
cell_title.font = Font(bold=True, size=11)
bd = Side(style='thick', color="000000")
cell_title.border = Border(left=bd, top=bd, right=bd, bottom=bd)
# unwrap curtain
for raw_range in range(len(self.all_positions)):
for col_range in range(1, 7):
# fixme REF: https://stackoverrun.com/es/q/3321778
# run number format with styles!
# _cell = ws.cell('A1')
# _cell.number_format = '0.00E+00'
_cell = sheet.cell(raw_range + 2, 1)
_cell.number_format = '#,##0.00'
_cell = sheet.cell(raw_range + 2, 2)
_cell.number_format = '#,##0.00'
_cell = sheet.cell(raw_range + 2, 3)
_cell.number_format = '#,##0.00'
cell_title = sheet.cell(raw_range + 2, col_range)
cell_title.fill = PatternFill(
start_color="00eaeaea", end_color="00eaeaea", fill_type="solid")
cell_title = sheet.cell(raw_range + 2, col_range)
cell_title.font = Font(bold=True, size=11)
bd = Side(style='thin', color="000000")
cell_title.border = Border(left=bd, top=bd, right=bd, bottom=bd)
# turbo style sheet:
else:
# time frame:
sheet.merge_cells('A1:I1')
time_frame = sheet['A1']
time_frame.fill = PatternFill(
start_color="00FF0000", end_color="00FF0000", fill_type="solid")
time_frame.font = Font(bold=True, size=11)
bd = Side(style='thick', color="000000")
time_frame.border = Border(left=bd, top=bd, right=bd, bottom=bd)
# timestamp
time_label = 'Snapshoot: %s Time Frame: %s%s/2020 - %s%s/2020' \
% (time.ctime(), t.dep + "/", t.start_month, t.ret + "/", t.end_month)
sheet.cell(row=1, column=1).value = time_label
# set title header:
header = ('Code', 'Price', 'Retail', 'Profit', 'CC', 'City', 'No', 'Hotel', 'Address')
sheet.cell(row=2, column=1).value = header[0]
sheet.cell(row=2, column=2).value = header[1]
sheet.cell(row=2, column=3).value = header[2]
sheet.cell(row=2, column=4).value = header[3]
sheet.cell(row=2, column=5).value = header[4]
sheet.cell(row=2, column=6).value = header[5]
sheet.cell(row=2, column=7).value = header[6]
sheet.cell(row=2, column=8).value = header[7]
sheet.cell(row=2, column=9).value = header[8]
# set number format:
# fixme: set number format: (only for in white sheets)
sheet.column_dimensions['B'].number_format = '#,##0.00'
sheet.column_dimensions['C'].number_format = '#,##0.00'
sheet.column_dimensions['D'].number_format = '#,##0.00'
# set column width:
sheet.column_dimensions['A'].width = 6
sheet.column_dimensions['B'].width = 9
sheet.column_dimensions['C'].width = 9
sheet.column_dimensions['D'].width = 9
sheet.column_dimensions['E'].width = 4
sheet.column_dimensions['F'].width = 16
sheet.column_dimensions['G'].width = 4
sheet.column_dimensions['H'].width = 60
sheet.column_dimensions['I'].width = 50
# set bar title style:
for col_range in range(1, 10):
cell_title = sheet.cell(2, col_range)
cell_title.fill = PatternFill(
start_color="00c0c0c0", end_color="00c0c0c0", fill_type="solid")
cell_title = sheet.cell(2, col_range)
cell_title.font = Font(bold=True, size=11)
bd = Side(style='thick', color="000000")
cell_title.border = Border(left=bd, top=bd, right=bd, bottom=bd)
# unwrap curtain
for raw_range in range(len(self.all_positions)):
for col_range in range(1, 10):
_cell = sheet.cell(raw_range + 3, 2)
_cell.number_format = '#,##0.00'
_cell = sheet.cell(raw_range + 3, 3)
_cell.number_format = '#,##0.00'
_cell = sheet.cell(raw_range + 3, 4)
_cell.number_format = '#,##0.00'
cell_title = sheet.cell(raw_range + 3, col_range)
cell_title.fill = PatternFill(
start_color="00eaeaea", end_color="00eaeaea", fill_type="solid")
cell_title = sheet.cell(raw_range + 3, col_range)
cell_title.font = Font(bold=True, size=11)
bd = Side(style='thin', color="000000")
cell_title.border = Border(left=bd, top=bd, right=bd, bottom=bd)
def write_bookings_to_excel_file(self, booking_path, shift):
filepath = os.path.join(booking_path, 'bookings.xlsx')
print('Writing to excel ...')
# if os.path.exists(filepath) and self.fork != 1:
# remove(filepath)
# self.fork = 1
if not os.path.exists(filepath):
workbook = Workbook()
workbook.save(filepath)
workbook.create_sheet("Spapshoot", 0)
workbook.create_sheet("Display", 1)
else:
workbook = load_workbook(filepath)
# fixme: delete the default sheet:
if "Sheet" in workbook.sheetnames:
std = workbook["Sheet"]
workbook.remove(std)
sheet = workbook.active
self.set_stylesheet(sheet, self.shift)
if shift != 1:
# write snap sheet
c = '1.374'
i = 2
for row in self.data:
cell_reference = sheet.cell(row=i, column=1)
cell_reference.value = row[2]
sheet['B{}'.format(i)] = '=PRODUCT(A{},{}'.format(i, c)
sheet['C{}'.format(i)] = '=SUM(B{},-A{}'.format(i, i)
cell_reference = sheet.cell(row=i, column=4)
cell_reference.value = row[0]
cell_reference = sheet.cell(row=i, column=5)
cell_reference.value = row[1]
cell_reference = sheet.cell(row=i, column=6)
cell_reference.value = row[3]
i += 1
else:
# write turbo sheet
c = '1.374'
i = 3
for row in self.data:
cell_reference = sheet.cell(row=i, column=1)
update_code = t.code_builder(self.read_code())
self.write_code(update_code)
cell_reference.value = update_code
cell_reference = sheet.cell(row=i, column=2)
cell_reference.value = row[2]
# REF:
# https://stackoverflow.com/questions/51044736/openpyxl-iterate-through-rows-and-apply-formula
# fixme CODE:
# for row_num in range(2, max_row_num):
# sheet['E{}'.format(row_num)] = '=CLEAN(D{})'.format(row_num)
sheet['C{}'.format(i)] = '=PRODUCT(B{},{}'.format(i, c)
sheet['D{}'.format(i)] = '=SUM(C{},-B{}'.format(i, i)
cell_reference = sheet.cell(row=i, column=5)
cell_reference.value = self.cell_cc
cell_reference = sheet.cell(row=i, column=6)
cell_reference.value = self.cell_city
cell_reference = sheet.cell(row=i, column=7)
cell_reference.value = row[0]
cell_reference = sheet.cell(row=i, column=8)
cell_reference.value = row[1]
cell_reference = sheet.cell(row=i, column=9)
cell_reference.value = row[3]
i += 1
workbook.active = 1
display_sheet = workbook.active
# select target row
# target = 1
# while sheet.cell(row=target, column=1).value is not None:
# target += 1
c = '1.374'
target = 3
while display_sheet.cell(row=target, column=6).value is not None:
target += 1
booking = self.data[0]
cell_reference = display_sheet.cell(row=target, column=1)
update_code = t.code_builder(self.read_code())
self.write_code(update_code)
cell_reference.value = update_code
cell_reference = display_sheet.cell(row=target, column=2)
cell_reference.value = booking[2]
display_sheet['C{}'.format(target)] = '=PRODUCT(B{},{}'.format(target, c)
display_sheet['D{}'.format(target)] = '=SUM(C{},-B{}'.format(target, target)
cell_reference = display_sheet.cell(row=target, column=5)
cell_reference.value = self.cell_cc
cell_reference = display_sheet.cell(row=target, column=6)
cell_reference.value = self.cell_city
cell_reference = display_sheet.cell(row=target, column=7)
cell_reference.value = booking[0]
cell_reference = display_sheet.cell(row=target, column=8)
cell_reference.value = booking[1]
cell_reference = display_sheet.cell(row=target, column=9)
cell_reference.value = booking[3]
# switch sheet
workbook.active = 0
sheet = workbook.active
self.set_stylesheet(sheet, 1)
self.set_stylesheet(display_sheet, 1)
workbook.save(filepath) # save file
if switch != 1:
spreadsheet = '//home/jmartorell/Booking/bookings/bookings.xlsx'
self.send_attachment(spreadsheet)
def send_attachment(self, file):
with open(self.keys, 'r') as a:
keys_dict = json.loads(a.read())
subject = "An email with attachment from Python"
body = "This is an email with attachment sent from Python"
sender_email = keys_dict['mailAddress'][0]
receiver_email = keys_dict['mailAddress'][1]
# password = input("Type your password and press enter:")
password = keys_dict['mailPassword'][0]
a.close()
# Create a multipart message and set headers
message = MIMEMultipart()
message["From"] = sender_email
message["To"] = receiver_email
message["Subject"] = subject
message["Bcc"] = receiver_email # Recommended for mass emails
# Add body to email
message.attach(MIMEText(body, "plain"))
filename = file # In same directory as script
# Open PDF file in binary mode
with open(filename, "rb") as attachment:
# Add file as application/octet-stream
# Email client can usually download this automatically as attachment
part = MIMEBase("application", "octet-stream")
part.set_payload(attachment.read())
# Encode file in ASCII characters to send by email
encoders.encode_base64(part)
# Add header as key/value pair to attachment part
part.add_header(
"Content-Disposition",
f"attachment; filename= {filename}",
)
# Add attachment to message and convert message to string
message.attach(part)
text = message.as_string()
# Log in to server using secure context and send email
context = ssl.create_default_context()
with smtplib.SMTP_SSL("smtp.gmail.com", 465, context=context) as server:
server.login(sender_email, password)
server.sendmail(sender_email, receiver_email, text)
print('Sending email ...')
if __name__ == '__main__':
switch = t.switch
if switch != 0:
x = 0
while x < 25:
app = App(depart_m=t.depart_month,
depart_w=t.depart_week,
depart_d=t.depart_day,
return_m=t.return_month,
return_w=t.return_week,
return_d=t.return_day,
target_city=d.tour_en[x][0],
cell_city=d.tour_en[x][0],
cell_cc=d.tour_en[x][1]
)
x += 1
else:
app = App(depart_m=t.depart_month,
depart_w=t.depart_week,
depart_d=t.depart_day,
return_m=t.return_month,
return_w=t.return_week,
return_d=t.return_day,
)
``` |
{
"source": "josepmartorell/Tkinter_GUI-Templates",
"score": 4
} |
#### File: Tkinter_GUI-Templates/templates/template3.py
```python
import tkinter
def center(tk):
w = 300
h = 100
sw = tk.winfo_screenwidth()
sh = tk.winfo_screenheight()
x = (sw - w) / 2
y = (sh - h) / 2
tk.geometry('%dx%d+%d+%d' % (w, h, x, y))
root = tkinter.Tk()
root.resizable(width=False, height=False)
name = tkinter.StringVar()
tkinter.Entry(root, textvariable=name, width=30).grid(row=0, column=1)
tkinter.Label(root, text='Name').grid(row=0, column=0)
surname = tkinter.StringVar()
tkinter.Entry(root, textvariable=surname, width=30).grid(row=1, column=1)
tkinter.Label(root, text='Surname').grid(row=1, column=0)
cell_phone = tkinter.StringVar()
tkinter.Entry(root, textvariable=cell_phone, width=30).grid(row=2, column=1)
tkinter.Label(root, text='Cell phone').grid(row=2, column=0)
contacts = {}
def save():
nam = name.get()
sur = surname.get()
cell = cell_phone.get()
contacts[cell] = [nam, sur]
return contacts
button1 = tkinter.Button(root, text='Save', command=save).grid(row=4,
column=0,
columnspan=2,
sticky='ew')
button1_lambda = tkinter.Button\
(root, text='Save', command=lambda: print(save())).grid(row=4, column=0, columnspan=2, sticky='ew')
center(root)
root.mainloop()
```
#### File: Tkinter_GUI-Templates/templates/template8.py
```python
import tkinter as tk
from tkinter import ttk
class Application(ttk.Frame):
def __init__(self, main_window):
super().__init__(main_window)
if __name__ == '__main__':
main_window.title("Position elements in Tcl/Tk")
# create notebook
self.notebook = ttk.Notebook(self)
# create content notebook
self.tab1_label = ttk.Label(self.notebook,
text="Write description for the tab 1")
# self.tab1_image = tk.PhotoImage(file="images.png")
self.tab2_label = ttk.Label(self.notebook,
text="Write description for the tab 2")
self.tab3_label = ttk.Label(self.notebook,
text="Write description for the tab 3")
self.tab4_label = ttk.Label(self.notebook,
text="Write description for the tab 4")
# add them to the notebook
self.notebook.add(self.tab1_label, text="Tab_one", padding=20)
# self.notebook.add(self.tab1_label, text="Tab_one", image=self.tab1_image, compound=tk.LEFT, padding=20)
self.notebook.add(self.tab2_label, text="Tab_two ", padding=20)
self.notebook.add(self.tab3_label, text="Tab_three ", padding=20)
self.notebook.add(self.tab4_label, text="Tab_four", padding=20)
self.notebook.pack(padx=10, pady=10)
self.pack()
root = tk.Tk()
root.resizable(width=False, height=False)
app = Application(root)
root.mainloop()
``` |
{
"source": "josepmartorell/Troubleshoot-webScraping-Miniblog",
"score": 3
} |
#### File: app/auth/models.py
```python
from flask_login import UserMixin
from werkzeug.security import generate_password_hash, check_password_hash
from app import db
class User(db.Model, UserMixin):
__tablename__ = 'blog_user'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80), nullable=False)
email = db.Column(db.String(256), unique=True, nullable=False)
password = db.Column(db.String(128), nullable=False)
is_admin = db.Column(db.Boolean, default=False)
def __init__(self, name, email):
self.name = name
self.email = email
def __repr__(self):
return f'<User {self.email}>'
def set_password(self, password):
self.password = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password, password)
def save(self):
if not self.id:
db.session.add(self)
db.session.commit()
def delete(self):
db.session.delete(self)
db.session.commit()
@staticmethod
def get_by_id(id):
return User.query.get(id)
@staticmethod
def get_by_email(email):
return User.query.filter_by(email=email).first()
@staticmethod
def get_all():
return User.query.all()
```
#### File: app/public/routes.py
```python
import logging
from flask import abort, render_template, redirect, url_for, request, current_app, send_file, flash
from flask_login import current_user
from app.models import Post, Comment
from . import public_bp
from .forms import CommentForm, ContactForm
from flask_mail import Mail, Message
from .. import mail
logger = logging.getLogger(__name__)
@public_bp.route("/")
def index():
logger.info('Displaying blog posts')
page = int(request.args.get('page', 1))
per_page = current_app.config['ITEMS_PER_PAGE']
post_pagination = Post.all_paginated(page, per_page)
return render_template("public/index.html", post_pagination=post_pagination)
@public_bp.route("/spiderweb")
def spiderweb():
return render_template("spiderweb.html")
@public_bp.route("/patch")
def patch():
logger.info('Displaying blog posts')
posts = Post.get_all()
return render_template("public/deployment.html", posts=posts)
@public_bp.route("/documentation")
def documentation():
return render_template("documentation.html")
@public_bp.route('/about', methods=['GET', 'POST'])
def about():
form = ContactForm()
if request.method == 'POST':
return 'Form posted.'
elif request.method == 'GET':
return render_template('public/contact.html', form=form)
@public_bp.route("/p/<string:slug>/", methods=['GET', 'POST'])
def show_post(slug):
logger.info('Showing a post')
logger.debug(f'Slug: {slug}')
post = Post.get_by_slug(slug)
if not post:
logger.info(f'Post {slug} does not exist')
abort(404)
form = CommentForm()
if current_user.is_authenticated and form.validate_on_submit():
content = form.content.data
comment = Comment(content=content, user_id=current_user.id,
user_name=current_user.name, post_id=post.id)
comment.save()
return redirect(url_for('public.show_post', slug=post.title_slug))
return render_template("public/post_view.html", post=post, form=form)
@public_bp.route("/patch/p/<string:slug>/", methods=['GET', 'POST'])
def show_patch(slug):
logger.info('Showing a post')
logger.debug(f'Slug: {slug}')
post = Post.get_by_slug(slug)
if not post:
logger.info(f'Post {slug} does not exist')
abort(404)
form = CommentForm()
if current_user.is_authenticated and form.validate_on_submit():
content = form.content.data
comment = Comment(content=content, user_id=current_user.id,
user_name=current_user.name, post_id=post.id)
comment.save()
return redirect(url_for('public.show_patch', slug=post.title_slug))
return render_template("public/patch_view.html", post=post, form=form)
@public_bp.route("/error")
def show_error():
res = 1 / 0
posts = Post.get_all()
return render_template("public/index.html", posts=posts)
@public_bp.route('/robots.txt')
def send_robots_txt():
return send_file(current_app.config['BASE_DIR'] + '/robots.txt')
@public_bp.route('/sitemap.xml')
def send_sitemap_xml():
return send_file(current_app.config['BASE_DIR'] + '/sitemap.xml')
@public_bp.route('/feed')
def send_feed_rss():
return send_file(current_app.config['BASE_DIR'] + '/feed.rss')
@public_bp.route('/contact', methods=['POST', 'GET'])
def contact():
form = ContactForm()
if form.validate_on_submit():
print('-------------------------')
print(request.form['name'])
print(request.form['email'])
print(request.form['subject'])
print(request.form['message'])
print('-------------------------')
send_message(request.form)
return redirect('/success')
return render_template('public/contact.html', form=form)
@public_bp.route('/success')
def success():
return render_template('about.html')
def send_message(message):
print(message.get('name'))
msg = Message(message.get('subject'), sender=message.get('email'),
recipients=['<EMAIL>'],
body=message.get('message')
)
mail.send(msg)
```
#### File: josepmartorell/Troubleshoot-webScraping-Miniblog/entrypoint.py
```python
import os
from flask import send_from_directory
from app import create_app
settings_module = os.getenv('APP_SETTINGS_MODULE')
app = create_app(settings_module)
@app.route('/media/posts/#')
def media_posts(filename):
dir_path = os.path.join(
app.config['MEDIA_DIR'],
app.config['POSTS_IMAGES_DIR'])
return send_from_directory(dir_path, filename)
``` |
{
"source": "josepmdc/TelegramMoviesBot",
"score": 3
} |
#### File: TelegramMoviesBot/src/telegram_bot.py
```python
import requests
from movie_scraper import MovieScraper
from config import TELEGRAM_SEND_MESSAGE_URL
class TelegramBot:
def __init__(self):
self.chat_id = None
self.text = None
self.first_name = None
self.username = None
self.outgoing_message_text = None
self.incoming_message_text = None
def parse_webhook_data(self, data):
message = data['message']
self.chat_id = message['chat']['id']
self.incoming_message_text = message['text'].lower()
self.first_name = message['from']['first_name']
def action(self):
success = None
if self.incoming_message_text == '/hello':
self.outgoing_message_text = "Hello {}!".format(self.first_name)
success = self.send_message()
if self.incoming_message_text == '/rad':
self.outgoing_message_text = '🤙'
success = self.send_message()
else:
self.outgoing_message_text = "We are looking for your movie..."
self.send_message()
try:
self.outgoing_message_text = "\n\n".join(MovieScraper.get_movie(self.incoming_message_text))
success = self.send_message()
except TypeError:
self.outgoing_message_text = "We couldn't find your movie 😕"
success = self.send_message()
return success
def send_message(self):
res = requests.get(TELEGRAM_SEND_MESSAGE_URL.format(self.chat_id, self.outgoing_message_text))
return True if res.status_code == 200 else False
@staticmethod
def init_webhook(url):
return requests.get(url)
``` |
{
"source": "JosepOrenga/Discover",
"score": 4
} |
#### File: discover/features/build_features.py
```python
import pandas as pd
def read_csv(path=str, sep=';'):
"""
Leer un archivo csv y los transforma a un dataframe
:param path: dirección del archivo csv
:param sep: separador del archvio
:return: dataframe
"""
# Leemos el archivo con la funcioón read_csv()
df = pd.read_csv(path, sep=sep)
return df
def merge_df(df1, df2, how, on=None):
"""
Combina dos datframes
:param df1: primer dataframe
:param df2: segundo dataframe
:param how: tipo de combinación
:param on: columna a combinar
:return: datafre combinado
"""
# Combinamos con la función merge() de pandas
df = pd.merge(df1, df2, how=how, on=[on])
return df
def denormalized_df(path1=str, path2=str, path3=str, sep=';', how=str, on1=None, on2=None):
"""
Desnormaliza 3 dataframes en uno
:param path1: ruta del primer dataframe
:param path2: ruta del segundo dataframe
:param path3: ruta del tercer dataframe
:param sep: delimitador de los archivos csv
:param how: tipo de combinación
:param on1: columna de combinación primer merge
:param on2: columna de combinación segundo merge
:return: dataframe desnormalizado
"""
# Leemos los csv con nuestro método
df1 = read_csv(path1, sep=sep)
df2 = read_csv(path2, sep=sep)
df3 = read_csv(path3, sep=sep)
# Combinamos los dataframes tracks y albums
tracks_albums = merge_df(df1, df2, how, on1)
# Renombremos la columna por la que combinar
tracks_albums = tracks_albums.rename(columns={'artist_id_x': 'artist_id'})
# Combinamos los dataframes trakcs_albums con artist
result = merge_df(tracks_albums, df3, how, on2)
# Renombremos las columnas a nombres más comprensibles
result = result.rename(columns={'name': 'artist_name', 'name_x': 'track_name',
'name_y': 'album_name', 'popularity': 'artist_popularity',
'popularity_x': 'track_popularity', 'popularity_y': 'album_popularity'})
return result
```
#### File: Discover/test/test.py
```python
import sys
import os
from os.path import dirname, abspath
d = dirname(dirname(abspath(__file__)))
sys.path.append(d)
import unittest
from discover.data.make_dataset import *
from discover.features.build_features import *
from discover.preprocessing.data_prepocessing import *
from discover.visualization.visualize import *
from discover.optimization.optimization import *
from discover.analysis.data_analysis import *
from discover.API.request import *
if __name__ == "__main__":
# Test para las funciones de data_analysis
class TestDataAnalysis(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Importamos nuestros data
cls._df = fill_null_by_mean(
capital_letter(denormalized_df('../data/tracks_norm.csv', '../data/albums_norm.csv',
'../data/artists_norm.csv', ';', 'inner', 'album_id', 'artist_id'),
'artist_name'), 'track_popularity')
def test_nulls_column(self):
# Comprobamos que se sustituyen todos los nulos
self.assertEqual(nulls_of_column(self._df, 'track_popularity'), 0)
def test_max_mean_min(self):
# Comprobamos que los valores se calculan correctamente
maximum, mean, minimum = max_mean_min(self._df, "artist_name", "Adele", "liveness")
self.assertEqual(maximum, 0.978)
self.assertEqual(mean, 0.2326340909090909)
self.assertEqual(minimum, 0.0473)
def test_mean_feature_album(self):
# Comprobamos que el valor se calcula correctamente
self.assertEqual(mean_feature_album(self._df, "artist_name", "Extremoduro", "album_name", "energy")["Agila"],
0.9075714285714286)
def test_number_of_columns(self):
# Comprobamos que el valor se calcula correctamente
self.assertEqual(number_of_columns(self._df), 33)
def test_number_of_rows(self):
# Comprobamos que el valor se calcula correctamente
self.assertEqual(number_of_rows(self._df), 35574)
def test_tracks_of_epoch(self):
# Comprobamos que el valor se calcula correctamente
self.assertEqual(tracks_of_epoch(self._df, "release_year", 1990, 1999), 4638)
def test_tracks_max_popularity_of_epoch(self):
# Comprobamos que el valor se calcula correctamente
self.assertEqual(
tracks_max_popularity_of_epoch(self._df, "track_name", "release_year", "track_popularity", 2000, 2021),
"Beggin'")
self.assertNotEqual(
tracks_max_popularity_of_epoch(self._df, "track_name", "release_year", "track_popularity", 2000, 2021),
"Airbag - Remastered")
def test_tracks_of_artist(self):
# Comprobamos que el valor se calcula correctamente
self.assertEqual(tracks_of_artist(self._df, "artist_name", "The Killers"), 206)
self.assertNotEqual(tracks_of_artist(self._df, "artist_name", "The Killers"), 123)
self.assertNotEqual(tracks_of_artist(self._df, "artist_name", "The Killers"), 'e')
def test_word_in_track(self):
# Comprobamos que el valor se cambia correctamente
self.assertEqual(word_in_track(self._df, 'track_name', 'The'), 5193)
def test_every_decade(self):
# Comprobamos que el valor se calcula correctamente
self.assertIn('<NAME>', every_decade(self._df, "artist_name", "release_year"))
self.assertIn('<NAME>', every_decade(self._df, "artist_name", "release_year"))
self.assertNotIn('AC/DC', every_decade(self._df, "artist_name", "release_year"))
class TestDataVisualization(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Importamos nuestros data
cls._df = fill_null_by_mean(
capital_letter(denormalized_df('../data/tracks_norm.csv', '../data/albums_norm.csv',
'../data/artists_norm.csv', ';', 'inner', 'album_id', 'artist_id'),
'artist_name'), 'track_popularity')
def test_density_histogram(self):
# Sabemos que la suma de altura de todas las barras debe ser uno
self.assertEqual(round(density_histogram(self._df, 'artist_name', '<NAME>', 'acousticness', 90), 2), 1)
def test_density_hist_two_singers(self):
# Como tenemos a dos cantantes la suma de la altura de sus barras debe ser 2
self.assertEqual(round(density_hist_two_singers(self._df, 'artist_name', 'Adele', 'Extremoduro', 'energy', 90), 2), 2)
def test_cosinus_similarity(self):
cs = plot_cosinus_similarity(self._df,
['artist_name', 'danceability', 'energy', 'key',
'loudness', 'mode', 'speechiness', 'acousticness',
'instrumentalness', 'liveness', 'valence', 'tempo',
'time_signature'],
['Metallica', 'Extremoduro', 'AC/DC', '<NAME>'],
'artist_name')
self.assertLessEqual(cs.all(), 1)
# Comprobamos de que se trara de una matriz simétrica
self.assertTrue(cs.all(), cs.T.all())
# Comprbamos que es una matriz 4x4 con el número de artístas introducidos
self.assertEqual(cs.shape, (4, 4))
# Comprbamos que la diagonal principal son 1
self.assertTrue(np.around(np.diag(cs), 2).all(), 1)
def test_euclidean_similarity(self):
es = plot_euclidean_similarity(self._df,
['artist_name', 'danceability', 'energy', 'key',
'loudness', 'mode', 'speechiness', 'acousticness',
'instrumentalness', 'liveness', 'valence', 'tempo',
'time_signature'],
['Metallica', 'Extremoduro', 'AC/DC', '<NAME>'],
'artist_name')
# Todos los elemtos de la matriz deben de ser cero o menor
self.assertLessEqual(es.all(), 1)
# Comprobamos de que se trara de una matriz simétrica
self.assertTrue(es.all(), es.T.all())
# Comprobamos que es una matriz 4x4 con el número de artístas introducidos
self.assertEqual(es.shape, (4, 4))
# Comprobamos que la diagonal principal son 1
self.assertTrue(np.around(np.diag(es), 2).all(), 1)
def test_barplot_albums(self):
self.assertIn('A Head Full of Dreams', barplot_albums(self._df, 'artist_name', 'Coldplay', 'album_name', 'danceability', 90))
self.assertTrue(15, len(barplot_albums(self._df, 'artist_name', 'Coldplay', 'album_name', 'danceability', 90)))
class TestOptimization(unittest.TestCase):
def test_pandas_method(self):
# Comprobamos que selecciona a todos los artistas
self.assertEqual(len(get_column_pandas('../data/artists_norm.csv', 'name')), 68)
# Comprobamos que contiene los nombre de los artistas
self.assertIn('Radiohead', get_column_pandas('../data/artists_norm.csv', 'name'))
def test_readcsv_method(self):
# Comprobamos que selecciona a todos los artistas
self.assertEqual(len(get_column_read('../data/artists_norm.csv', 'name')), 68)
# Comprobamos que contiene los nombre de los artistas
self.assertIn('Radiohead', get_column_read('../data/artists_norm.csv', 'name'))
def test_compare_methods(self):
pandas_time, read_time = compare_methods('../data/artists_norm.csv', 'artist_id')
# Comprobsmos que nuesrto méstodo es más rápido
self.assertLess(read_time, pandas_time)
class TestMakeDataset(unittest.TestCase):
def test_unzip_file(self):
# Comprobamos que los archivos se descomprimen,
# Para ello comprobamos que el directorio donde se descomprimen
# tiene más de un archivo, el zip inicial
self.assertGreater(unzip_file('../data/data.zip', '../data'), 1)
class TestAPI(unittest.TestCase):
def setUp(self):
# Cargamos los datos
# En ese caso no empleamos setUpclass, ya que altener solamente una función
# no repetimos código
self.df = api_audio(['Radiohead', '<NAME>', 'Måneskin'])
def test_from_df_to_csv(self):
# Comprobamos que el archivos se ha creado
self.assertTrue(from_df_to_csv(self.df, 'artists_audiodb.csv'))
suite_tracks = unittest.TestSuite()
suite_tracks.addTest(unittest.makeSuite(TestDataAnalysis))
suite_tracks.addTest(unittest.makeSuite(TestDataVisualization))
suite_tracks.addTest(unittest.makeSuite(TestOptimization))
suite_tracks.addTest(unittest.makeSuite(TestMakeDataset))
suite_tracks.addTest(unittest.makeSuite(TestAPI))
unittest.TextTestRunner(verbosity=2).run(suite_tracks)
``` |
{
"source": "joseppi/AugmentedReality",
"score": 3
} |
#### File: ClassExercices/Numpy Exercice 2/solutions.py
```python
import cv2
import numpy as np
def ex1():
print('Ex1 - Open an image maintaining its original color format'
'and print its internal data type, its shape, its number of'
'dimensions, and show it')
img = cv2.imread('Ellie.jpg', cv2.IMREAD_ANYCOLOR)
print('The shape of the image is {}'.format(img.shape))
print('The num of dimensions of the image is {}'.format(len(img.shape)))
print('The internal type of the image is {}'.format(img.dtype))
cv2.imshow('ellie', img)
cv2.waitKey(0)
def ex2():
print('Ex2 - Open an image maintaining its original color format'
'cast it to floats, convert it to range [0, 1] and show it')
img = cv2.imread('img/sonic.jpg', cv2.IMREAD_ANYCOLOR)
img = np.float64(img) / 255.0
cv2.imshow('Image', np.uint8(img * 255.0))
cv2.waitKey(0)
def ex3():
print('Ex3 - Create a binary image (0s and 1s) from an image file and show it')
img = cv2.imread('img/sonic.jpg', cv2.IMREAD_GRAYSCALE)
threshold = 125
img = np.float64(img > threshold)
cv2.imshow('Image', np.uint8(img * 255))
cv2.waitKey(0)
def ex4():
print('Ex4 - Open an image and apply a vignetting effect on its borders')
img = cv2.imread('img/sonic.jpg', cv2.IMREAD_ANYCOLOR)
img = np.float64(img)
height, width, _ = img.shape
centeri = height / 2
centerj = width / 2
radius = np.sqrt(width*width + height*height) / 2.0
for i in range(0, height):
for j in range(0, width):
dist = np.sqrt((centeri - i)**2 + (centerj - j)**2)
vignetting_factor = 1.0 - (dist / radius)**2
img[i, j] *= vignetting_factor
cv2.imshow('Image', np.uint8(img))
cv2.waitKey(0)
def ex5():
print("Ex5 - Open an image and invert its colors")
img = cv2.imread('img/sonic.jpg', cv2.IMREAD_ANYCOLOR)
img[:] = 255 - img
cv2.imshow('Image', img)
cv2.waitKey(0)
def ex6():
print("Ex6 - Open an image and tint it 50% with blue")
img = cv2.imread('img/sonic.jpg', cv2.IMREAD_ANYCOLOR)
img = np.float64(img)
blue = np.array([255.0, 0.0, 0.0]).reshape((1, 1, 3))
img = 0.5 * img + 0.5 * blue
cv2.imshow('Image', np.uint8(img))
cv2.waitKey(0)
pass
def ex7():
print("Ex7 - Open an image and increase the contrast by applying the"
"following formula on its pixels: col = (col - min) / (max - min),"
"where min=50 and max=200. Clamp values so they are in the range [0,1]"
", and rescale them to range [0,255] before showing it.")
img = cv2.imread('img/sonic.jpg', cv2.IMREAD_ANYCOLOR)
img = np.float64(img)
min = 50
max = 200
img = (img - min) / (max - min)
img = np.clip(img, 0.0, 1.0)
cv2.imshow('Image', np.uint8(img * 255.0))
cv2.waitKey(0)
pass
def execute():
ex1()
# ex2()
# ex3()
# ex4()
# ex5()
# ex6()
# ex7()
pass
``` |
{
"source": "joseppinilla/dimod",
"score": 2
} |
#### File: reference/composites/scalecomposite.py
```python
try:
import collections.abc as abc
except ImportError:
import collections as abc
from numbers import Number
import numpy as np
from dimod.binary_quadratic_model import BinaryQuadraticModel
from dimod.core.composite import ComposedSampler
__all__ = 'ScaleComposite',
class ScaleComposite(ComposedSampler):
"""Composite to scale variables of a problem
Scales the variables of a bqm and modifies linear and quadratic terms
accordingly.
Args:
sampler (:obj:`dimod.Sampler`):
A dimod sampler
Examples:
This example uses :class:`.ScaleComposite` to instantiate a
composed sampler that submits a simple Ising problem to a sampler.
The composed sampler scales linear, quadratic biases and offset as
indicated by options.
>>> h = {'a': -4.0, 'b': -4.0}
>>> J = {('a', 'b'): 3.2}
>>> sampler = dimod.ScaleComposite(dimod.ExactSolver())
>>> response = sampler.sample_ising(h, J, scalar=0.5,
... ignored_interactions=[('a','b')])
"""
def __init__(self, child_sampler):
self._children = [child_sampler]
@property
def children(self):
return self._children
@property
def parameters(self):
param = self.child.parameters.copy()
param.update({'scalar': [],
'bias_range': [],
'quadratic_range': [],
'ignored_variables': [],
'ignored_interactions': [],
'ignore_offset': []})
return param
@property
def properties(self):
return {'child_properties': self.child.properties.copy()}
def sample(self, bqm, scalar=None, bias_range=1, quadratic_range=None,
ignored_variables=None, ignored_interactions=None,
ignore_offset=False, **parameters):
""" Scale and sample from the provided binary quadratic model.
if scalar is not given, problem is scaled based on bias and quadratic
ranges. See :meth:`.BinaryQuadraticModel.scale` and
:meth:`.BinaryQuadraticModel.normalize`
Args:
bqm (:obj:`dimod.BinaryQuadraticModel`):
Binary quadratic model to be sampled from.
scalar (number):
Value by which to scale the energy range of the binary quadratic model.
bias_range (number/pair):
Value/range by which to normalize the all the biases, or if
`quadratic_range` is provided, just the linear biases.
quadratic_range (number/pair):
Value/range by which to normalize the quadratic biases.
ignored_variables (iterable, optional):
Biases associated with these variables are not scaled.
ignored_interactions (iterable[tuple], optional):
As an iterable of 2-tuples. Biases associated with these interactions are not scaled.
ignore_offset (bool, default=False):
If True, the offset is not scaled.
**parameters:
Parameters for the sampling method, specified by the child sampler.
Returns:
:obj:`dimod.SampleSet`
"""
ignored_variables, ignored_interactions = _check_params(
ignored_variables, ignored_interactions)
child = self.child
bqm_copy = _scaled_bqm(bqm, scalar, bias_range, quadratic_range,
ignored_variables, ignored_interactions,
ignore_offset)
response = child.sample(bqm_copy, **parameters)
return _scale_back_response(bqm, response, bqm_copy.info['scalar'],
ignored_variables, ignored_interactions,
ignore_offset)
def sample_ising(self, h, J, offset=0, scalar=None,
bias_range=1, quadratic_range=None,
ignored_variables=None, ignored_interactions=None,
ignore_offset=False, **parameters):
""" Scale and sample from the problem provided by h, J, offset
if scalar is not given, problem is scaled based on bias and quadratic
ranges.
Args:
h (dict): linear biases
J (dict): quadratic or higher order biases
offset (float, optional): constant energy offset
scalar (number):
Value by which to scale the energy range of the binary quadratic model.
bias_range (number/pair):
Value/range by which to normalize the all the biases, or if
`quadratic_range` is provided, just the linear biases.
quadratic_range (number/pair):
Value/range by which to normalize the quadratic biases.
ignored_variables (iterable, optional):
Biases associated with these variables are not scaled.
ignored_interactions (iterable[tuple], optional):
As an iterable of 2-tuples. Biases associated with these interactions are not scaled.
ignore_offset (bool, default=False):
If True, the offset is not scaled.
**parameters:
Parameters for the sampling method, specified by the child sampler.
Returns:
:obj:`dimod.SampleSet`
"""
if any(len(inter) > 2 for inter in J):
# handle HUBO
import warnings
msg = ("Support for higher order Ising models in ScaleComposite is "
"deprecated and will be removed in dimod 0.9.0. Please use "
"PolyScaleComposite.sample_hising instead.")
warnings.warn(msg, DeprecationWarning)
from dimod.reference.composites.higherordercomposites import PolyScaleComposite
from dimod.higherorder.polynomial import BinaryPolynomial
poly = BinaryPolynomial.from_hising(h, J, offset=offset)
ignored_terms = set()
if ignored_variables is not None:
ignored_terms.update(frozenset(v) for v in ignored_variables)
if ignored_interactions is not None:
ignored_terms.update(frozenset(inter) for inter in ignored_interactions)
if ignore_offset:
ignored_terms.add(frozenset())
return PolyScaleComposite(self.child).sample_poly(poly, scalar=scalar,
bias_range=bias_range,
poly_range=quadratic_range,
ignored_terms=ignored_terms,
**parameters)
bqm = BinaryQuadraticModel.from_ising(h, J, offset=offset)
return self.sample(bqm, scalar=scalar,
bias_range=bias_range,
quadratic_range=quadratic_range,
ignored_variables=ignored_variables,
ignored_interactions=ignored_interactions,
ignore_offset=ignore_offset, **parameters)
def _scale_back_response(bqm, response, scalar, ignored_interactions,
ignored_variables, ignore_offset):
"""Helper function to scale back the response of sample method"""
if len(ignored_interactions) + len(
ignored_variables) + ignore_offset == 0:
response.record.energy = np.divide(response.record.energy, scalar)
else:
response.record.energy = bqm.energies((response.record.sample,
response.variables))
return response
def _check_params(ignored_variables, ignored_interactions):
"""Helper for sample methods"""
if ignored_variables is None:
ignored_variables = set()
elif not isinstance(ignored_variables, abc.Container):
ignored_variables = set(ignored_variables)
if ignored_interactions is None:
ignored_interactions = set()
elif not isinstance(ignored_interactions, abc.Container):
ignored_interactions = set(ignored_interactions)
return ignored_variables, ignored_interactions
def _calc_norm_coeff(h, J, bias_range, quadratic_range, ignored_variables,
ignored_interactions):
"""Helper function to calculate normalization coefficient"""
if ignored_variables is None or ignored_interactions is None:
raise ValueError('ignored interactions or variables cannot be None')
def parse_range(r):
if isinstance(r, Number):
return -abs(r), abs(r)
return r
def min_and_max(iterable):
if not iterable:
return 0, 0
return min(iterable), max(iterable)
if quadratic_range is None:
linear_range, quadratic_range = bias_range, bias_range
else:
linear_range = bias_range
lin_range, quad_range = map(parse_range, (linear_range,
quadratic_range))
lin_min, lin_max = min_and_max([v for k, v in h.items()
if k not in ignored_variables])
quad_min, quad_max = min_and_max([v for k, v in J.items()
if not check_isin(k,
ignored_interactions)])
inv_scalar = max(lin_min / lin_range[0], lin_max / lin_range[1],
quad_min / quad_range[0], quad_max / quad_range[1])
if inv_scalar != 0:
return 1. / inv_scalar
else:
return 1.
def _scaled_bqm(bqm, scalar, bias_range, quadratic_range,
ignored_variables, ignored_interactions,
ignore_offset):
"""Helper function of sample for scaling"""
bqm_copy = bqm.copy()
if scalar is None:
scalar = _calc_norm_coeff(bqm_copy.linear, bqm_copy.quadratic,
bias_range, quadratic_range,
ignored_variables, ignored_interactions)
bqm_copy.scale(scalar, ignored_variables=ignored_variables,
ignored_interactions=ignored_interactions,
ignore_offset=ignore_offset)
bqm_copy.info.update({'scalar': scalar})
return bqm_copy
def check_isin(key, key_list):
return sum(set(key) == set(key_tmp) for key_tmp in key_list)
```
#### File: reference/samplers/random_sampler.py
```python
from random import choice
from dimod.core.sampler import Sampler
from dimod.sampleset import SampleSet
__all__ = ['RandomSampler']
class RandomSampler(Sampler):
"""A sampler that gives random samples for testing."""
properties = None
parameters = None
"""dict: Keyword arguments accepted by the sampling methods.
Contents are exactly `{'num_reads': []}`
"""
def __init__(self):
self.parameters = {'num_reads': []}
self.properties = {}
def sample(self, bqm, num_reads=10):
"""Give random samples for a binary quadratic model.
Variable assignments are chosen by coin flip.
Args:
bqm (:obj:`.BinaryQuadraticModel`):
Binary quadratic model to be sampled from.
num_reads (int, optional, default=10):
Number of reads.
Returns:
:obj:`.SampleSet`
"""
values = tuple(bqm.vartype.value)
def _itersample():
for __ in range(num_reads):
sample = {v: choice(values) for v in bqm.linear}
energy = bqm.energy(sample)
yield sample, energy
samples, energies = zip(*_itersample())
return SampleSet.from_samples(samples, bqm.vartype, energies)
```
#### File: dimod/tests/test_fixedvariablecomposite.py
```python
import unittest
import dimod.testing as dtest
from dimod.vartypes import Vartype
import dimod
from dimod import BinaryQuadraticModel
from dimod import FixedVariableComposite, ExactSolver, RoofDualityComposite
from dimod import SampleSet
try:
from dimod import fix_variables
except ImportError:
cpp = False
else:
cpp = True
class TestFixedVariableComposite(unittest.TestCase):
def test_instantiation_smoketest(self):
sampler = FixedVariableComposite(ExactSolver())
dtest.assert_sampler_api(sampler)
def test_sample(self):
bqm = BinaryQuadraticModel(linear={1: -1.3, 4: -0.5},
quadratic={(1, 4): -0.6},
offset=0,
vartype=Vartype.SPIN)
fixed_variables = {1: -1}
sampler = FixedVariableComposite(ExactSolver())
response = sampler.sample(bqm, fixed_variables=fixed_variables)
self.assertEqual(response.first.sample, {4: -1, 1: -1})
self.assertAlmostEqual(response.first.energy, 1.2)
def test_empty_bqm(self):
bqm = BinaryQuadraticModel(linear={1: -1.3, 4: -0.5},
quadratic={(1, 4): -0.6},
offset=0,
vartype=Vartype.SPIN)
fixed_variables = {1: -1, 4: -1}
sampler = FixedVariableComposite(ExactSolver())
response = sampler.sample(bqm, fixed_variables=fixed_variables)
self.assertIsInstance(response, SampleSet)
def test_empty_fix(self):
linear = {1: -1.3, 4: -0.5}
quadratic = {(1, 4): -0.6}
sampler = FixedVariableComposite(ExactSolver())
response = sampler.sample_ising(linear, quadratic)
self.assertIsInstance(response, SampleSet)
self.assertEqual(response.first.sample, {4: 1, 1: 1})
self.assertAlmostEqual(response.first.energy, -2.4)
class TestRoofDualityComposite(unittest.TestCase):
@unittest.skipIf(cpp, "cpp extensions built")
def test_nocpp_error(self):
with self.assertRaises(ImportError):
RoofDualityComposite(dimod.ExactSolver()).sample_ising({}, {})
@unittest.skipUnless(cpp, "no cpp extensions built")
def test_construction(self):
sampler = RoofDualityComposite(dimod.ExactSolver())
dtest.assert_sampler_api(sampler)
@unittest.skipUnless(cpp, "no cpp extensions built")
def test_3path(self):
sampler = RoofDualityComposite(dimod.ExactSolver())
sampleset = sampler.sample_ising({'a': 10}, {'ab': -1, 'bc': 1})
# all should be fixed, so should just see one
self.assertEqual(len(sampleset), 1)
self.assertEqual(set(sampleset.variables), set('abc'))
@unittest.skipUnless(cpp, "no cpp extensions built")
def test_triangle(self):
sampler = RoofDualityComposite(dimod.ExactSolver())
bqm = dimod.BinaryQuadraticModel.from_ising({}, {'ab': -1, 'bc': -1, 'ac': -1})
# two equally good solutions
sampleset = sampler.sample(bqm)
self.assertEqual(set(sampleset.variables), set('abc'))
dimod.testing.assert_response_energies(sampleset, bqm)
@unittest.skipUnless(cpp, "no cpp extensions built")
def test_triangle_sampling_mode_off(self):
sampler = RoofDualityComposite(dimod.ExactSolver())
bqm = dimod.BinaryQuadraticModel.from_ising({}, {'ab': -1, 'bc': -1, 'ac': -1})
# two equally good solutions, but with sampling mode off it will pick one
sampleset = sampler.sample(bqm, sampling_mode=False)
self.assertEqual(set(sampleset.variables), set('abc'))
self.assertEqual(len(sampleset), 1) # all should be fixed
dimod.testing.assert_response_energies(sampleset, bqm)
```
#### File: dimod/tests/test_higherordercomposite.py
```python
import numpy as np
import unittest
import dimod.testing as dtest
from dimod import HigherOrderComposite, ExactSolver
class TestFixedVariableComposite(unittest.TestCase):
def test_sample(self):
linear = {0: -0.5, 1: -0.3, 2: -0.8}
quadratic = {(0, 1, 2): -1.7}
sampler = HigherOrderComposite(ExactSolver())
response = sampler.sample_ising(linear, quadratic, penalty_strength=10,
keep_penalty_variables=False,
discard_unsatisfied=False)
self.assertEqual(response.first.sample, {0: 1, 1: 1, 2: 1})
self.assertAlmostEqual(response.first.energy, -3.3)
self.assertFalse(np.prod(response.record.penalty_satisfaction))
def test_discard(self):
linear = {0: -0.5, 1: -0.3, 2: -0.8}
quadratic = {(0, 1, 2): -1.7}
sampler = HigherOrderComposite(ExactSolver())
response = sampler.sample_ising(linear, quadratic, penalty_strength=10,
discard_unsatisfied=True)
self.assertEqual(response.first.sample, {0: 1, 1: 1, 2: 1})
self.assertAlmostEqual(response.first.energy, -3.3)
self.assertTrue(np.prod(response.record.penalty_satisfaction))
def test_penalty_variables(self):
linear = {0: -0.5, 1: -0.3, 2: -0.8}
quadratic = {(0, 1, 2): -1.7}
sampler = HigherOrderComposite(ExactSolver())
response = sampler.sample_ising(linear, quadratic, penalty_strength=10,
keep_penalty_variables=True,
discard_unsatisfied=True)
self.assertEqual(len(response.first.sample), 5)
self.assertAlmostEqual(response.first.energy, -3.3)
self.assertTrue(response.first.penalty_satisfaction)
def test_already_qubo(self):
linear = {0: -0.5, 1: -0.3}
quadratic = {(0, 1): -1.7}
sampler = HigherOrderComposite(ExactSolver())
response = sampler.sample_ising(linear, quadratic,
keep_penalty_variables=True,
discard_unsatisfied=False)
self.assertEqual(response.first.sample, {0: 1, 1: 1})
self.assertAlmostEqual(response.first.energy, -2.5)
self.assertTrue(response.first.penalty_satisfaction)
def test_already_qubo_2(self):
linear = {0: -0.5, 1: -0.3}
quadratic = {(0, 1): -1.7}
sampler = HigherOrderComposite(ExactSolver())
response = sampler.sample_ising(linear, quadratic, penalty_strength=10,
keep_penalty_variables=True,
discard_unsatisfied=True)
self.assertEqual(response.first.sample, {0: 1, 1: 1})
self.assertAlmostEqual(response.first.energy, -2.5)
self.assertTrue(response.first.penalty_satisfaction)
```
#### File: dimod/tests/test_testing.py
```python
import unittest
import dimod
class Test_assert_almost_equal_bqm(unittest.TestCase):
def test_empty(self):
bqm0 = dimod.BinaryQuadraticModel.empty(dimod.SPIN)
bqm1 = dimod.BinaryQuadraticModel.empty(dimod.SPIN)
dimod.testing.assert_bqm_almost_equal(bqm0, bqm1)
def test_self_empty(self):
bqm = dimod.BinaryQuadraticModel.empty(dimod.SPIN)
dimod.testing.assert_bqm_almost_equal(bqm, bqm)
def test_unlike_variables(self):
bqm0 = dimod.BinaryQuadraticModel.from_ising({'a': -1}, {})
bqm1 = dimod.BinaryQuadraticModel.from_ising({'b': -1}, {})
with self.assertRaises(AssertionError):
dimod.testing.assert_bqm_almost_equal(bqm0, bqm1)
def test_unlike_offset(self):
bqm0 = dimod.BinaryQuadraticModel.from_ising({'a': -1}, {}, 1.1)
bqm1 = dimod.BinaryQuadraticModel.from_ising({'a': -1}, {}, 1.2)
with self.assertRaises(AssertionError):
dimod.testing.assert_bqm_almost_equal(bqm0, bqm1)
dimod.testing.assert_bqm_almost_equal(bqm0, bqm1, places=0)
def test_unlike_linear(self):
bqm0 = dimod.BinaryQuadraticModel.from_ising({'a': -1}, {})
bqm1 = dimod.BinaryQuadraticModel.from_ising({'a': -1.1}, {})
with self.assertRaises(AssertionError):
dimod.testing.assert_bqm_almost_equal(bqm0, bqm1)
dimod.testing.assert_bqm_almost_equal(bqm0, bqm1, places=0)
def test_unlike_interactions(self):
bqm0 = dimod.BinaryQuadraticModel.from_ising({}, {'ab': -1})
bqm1 = dimod.BinaryQuadraticModel.from_ising({}, {'ab': -1.1})
with self.assertRaises(AssertionError):
dimod.testing.assert_bqm_almost_equal(bqm0, bqm1)
dimod.testing.assert_bqm_almost_equal(bqm0, bqm1, places=0)
def test_ignore_zero_interactions(self):
h = {'a': 0, 'b': 0, 'c': 0, 'd': 0}
J0 = {'ab': 0, 'bc': -1}
J1 = {'cb': -1, 'cd': 0}
bqm0 = dimod.BinaryQuadraticModel.from_ising(h, J0)
bqm1 = dimod.BinaryQuadraticModel.from_ising(h, J1)
with self.assertRaises(AssertionError):
dimod.testing.assert_bqm_almost_equal(bqm0, bqm1)
dimod.testing.assert_bqm_almost_equal(bqm0, bqm1, ignore_zero_interactions=True)
with self.assertRaises(AssertionError):
dimod.testing.assert_bqm_almost_equal(bqm1, bqm0)
dimod.testing.assert_bqm_almost_equal(bqm1, bqm0, ignore_zero_interactions=True)
``` |
{
"source": "joseppinilla/dwave-system",
"score": 2
} |
#### File: dwave/embedding/transforms.py
```python
from __future__ import division
import itertools
import numpy as np
import dimod
from six import iteritems, itervalues
from dwave.embedding.chain_breaks import majority_vote, broken_chains
from dwave.embedding.exceptions import MissingEdgeError, MissingChainError, InvalidNodeError
from dwave.embedding.utils import chain_to_quadratic
__all__ = ['embed_bqm',
'embed_ising',
'embed_qubo',
'unembed_sampleset',
]
def embed_bqm(source_bqm, embedding, target_adjacency, chain_strength=1.0,
smear_vartype=None):
"""Embed a binary quadratic model onto a target graph.
Args:
source_bqm (:obj:`.BinaryQuadraticModel`):
Binary quadratic model to embed.
embedding (dict):
Mapping from source graph to target graph as a dict of form {s: {t, ...}, ...},
where s is a source-model variable and t is a target-model variable.
target_adjacency (dict/:class:`networkx.Graph`):
Adjacency of the target graph as a dict of form {t: Nt, ...},
where t is a variable in the target graph and Nt is its set of neighbours.
chain_strength (float, optional):
Magnitude of the quadratic bias (in SPIN-space) applied between variables to create chains. Note
that the energy penalty of chain breaks is 2 * `chain_strength`.
smear_vartype (:class:`.Vartype`, optional, default=None):
When a single variable is embedded, it's linear bias is 'smeared' evenly over the
chain. This parameter determines whether the variable is smeared in SPIN or BINARY
space. By default the embedding is done according to the given source_bqm.
Returns:
:obj:`.BinaryQuadraticModel`: Target binary quadratic model.
Examples:
This example embeds a fully connected :math:`K_3` graph onto a square target graph.
Embedding is accomplished by an edge contraction operation on the target graph:
target-nodes 2 and 3 are chained to represent source-node c.
>>> import dimod
>>> import networkx as nx
>>> # Binary quadratic model for a triangular source graph
>>> bqm = dimod.BinaryQuadraticModel.from_ising({}, {('a', 'b'): 1, ('b', 'c'): 1, ('a', 'c'): 1})
>>> # Target graph is a graph
>>> target = nx.cycle_graph(4)
>>> # Embedding from source to target graphs
>>> embedding = {'a': {0}, 'b': {1}, 'c': {2, 3}}
>>> # Embed the BQM
>>> target_bqm = dimod.embed_bqm(bqm, embedding, target)
>>> target_bqm.quadratic[(0, 1)] == bqm.quadratic[('a', 'b')]
True
>>> target_bqm.quadratic # doctest: +SKIP
{(0, 1): 1.0, (0, 3): 1.0, (1, 2): 1.0, (2, 3): -1.0}
This example embeds a fully connected :math:`K_3` graph onto the target graph
of a dimod reference structured sampler, `StructureComposite`, using the dimod reference
`ExactSolver` sampler with a square graph specified. Target-nodes 2 and 3
are chained to represent source-node c.
>>> import dimod
>>> # Binary quadratic model for a triangular source graph
>>> bqm = dimod.BinaryQuadraticModel.from_ising({}, {('a', 'b'): 1, ('b', 'c'): 1, ('a', 'c'): 1})
>>> # Structured dimod sampler with a structure defined by a square graph
>>> sampler = dimod.StructureComposite(dimod.ExactSolver(), [0, 1, 2, 3], [(0, 1), (1, 2), (2, 3), (0, 3)])
>>> # Embedding from source to target graph
>>> embedding = {'a': {0}, 'b': {1}, 'c': {2, 3}}
>>> # Embed the BQM
>>> target_bqm = dimod.embed_bqm(bqm, embedding, sampler.adjacency)
>>> # Sample
>>> samples = sampler.sample(target_bqm)
>>> samples.record.sample # doctest: +SKIP
array([[-1, -1, -1, -1],
[ 1, -1, -1, -1],
[ 1, 1, -1, -1],
[-1, 1, -1, -1],
[-1, 1, 1, -1],
>>> # Snipped above samples for brevity
"""
if smear_vartype is dimod.SPIN and source_bqm.vartype is dimod.BINARY:
return embed_bqm(source_bqm.spin, embedding, target_adjacency,
chain_strength=chain_strength, smear_vartype=None).binary
elif smear_vartype is dimod.BINARY and source_bqm.vartype is dimod.SPIN:
return embed_bqm(source_bqm.binary, embedding, target_adjacency,
chain_strength=chain_strength, smear_vartype=None).spin
# create a new empty binary quadratic model with the same class as source_bqm
target_bqm = source_bqm.empty(source_bqm.vartype)
# add the offset
target_bqm.add_offset(source_bqm.offset)
# start with the linear biases, spreading the source bias equally over the target variables in
# the chain
for v, bias in iteritems(source_bqm.linear):
if v in embedding:
chain = embedding[v]
else:
raise MissingChainError(v)
if any(u not in target_adjacency for u in chain):
raise InvalidNodeError(v, next(u not in target_adjacency for u in chain))
b = bias / len(chain)
target_bqm.add_variables_from({u: b for u in chain})
# next up the quadratic biases, spread the quadratic biases evenly over the available
# interactions
for (u, v), bias in iteritems(source_bqm.quadratic):
available_interactions = {(s, t) for s in embedding[u] for t in embedding[v] if s in target_adjacency[t]}
if not available_interactions:
raise MissingEdgeError(u, v)
b = bias / len(available_interactions)
target_bqm.add_interactions_from((u, v, b) for u, v in available_interactions)
for chain in itervalues(embedding):
# in the case where the chain has length 1, there are no chain quadratic biases, but we
# none-the-less want the chain variables to appear in the target_bqm
if len(chain) == 1:
v, = chain
target_bqm.add_variable(v, 0.0)
continue
quadratic_chain_biases = chain_to_quadratic(chain, target_adjacency, chain_strength)
target_bqm.add_interactions_from(quadratic_chain_biases, vartype=dimod.SPIN) # these are spin
# add the energy for satisfied chains to the offset
energy_diff = -sum(itervalues(quadratic_chain_biases))
target_bqm.add_offset(energy_diff)
return target_bqm
def embed_ising(source_h, source_J, embedding, target_adjacency, chain_strength=1.0):
"""Embed an Ising problem onto a target graph.
Args:
source_h (dict[variable, bias]/list[bias]):
Linear biases of the Ising problem. If a list, the list's indices are used as
variable labels.
source_J (dict[(variable, variable), bias]):
Quadratic biases of the Ising problem.
embedding (dict):
Mapping from source graph to target graph as a dict of form {s: {t, ...}, ...},
where s is a source-model variable and t is a target-model variable.
target_adjacency (dict/:class:`networkx.Graph`):
Adjacency of the target graph as a dict of form {t: Nt, ...},
where t is a target-graph variable and Nt is its set of neighbours.
chain_strength (float, optional):
Magnitude of the quadratic bias (in SPIN-space) applied between variables to form a chain. Note
that the energy penalty of chain breaks is 2 * `chain_strength`.
Returns:
tuple: A 2-tuple:
dict[variable, bias]: Linear biases of the target Ising problem.
dict[(variable, variable), bias]: Quadratic biases of the target Ising problem.
Examples:
This example embeds a fully connected :math:`K_3` graph onto a square target graph.
Embedding is accomplished by an edge contraction operation on the target graph: target-nodes
2 and 3 are chained to represent source-node c.
>>> import dimod
>>> import networkx as nx
>>> # Ising problem for a triangular source graph
>>> h = {}
>>> J = {('a', 'b'): 1, ('b', 'c'): 1, ('a', 'c'): 1}
>>> # Target graph is a square graph
>>> target = nx.cycle_graph(4)
>>> # Embedding from source to target graph
>>> embedding = {'a': {0}, 'b': {1}, 'c': {2, 3}}
>>> # Embed the Ising problem
>>> target_h, target_J = dimod.embed_ising(h, J, embedding, target)
>>> target_J[(0, 1)] == J[('a', 'b')]
True
>>> target_J # doctest: +SKIP
{(0, 1): 1.0, (0, 3): 1.0, (1, 2): 1.0, (2, 3): -1.0}
This example embeds a fully connected :math:`K_3` graph onto the target graph
of a dimod reference structured sampler, `StructureComposite`, using the dimod reference
`ExactSolver` sampler with a square graph specified. Target-nodes 2 and 3 are chained to
represent source-node c.
>>> import dimod
>>> # Ising problem for a triangular source graph
>>> h = {}
>>> J = {('a', 'b'): 1, ('b', 'c'): 1, ('a', 'c'): 1}
>>> # Structured dimod sampler with a structure defined by a square graph
>>> sampler = dimod.StructureComposite(dimod.ExactSolver(), [0, 1, 2, 3], [(0, 1), (1, 2), (2, 3), (0, 3)])
>>> # Embedding from source to target graph
>>> embedding = {'a': {0}, 'b': {1}, 'c': {2, 3}}
>>> # Embed the Ising problem
>>> target_h, target_J = dimod.embed_ising(h, J, embedding, sampler.adjacency)
>>> # Sample
>>> samples = sampler.sample_ising(target_h, target_J)
>>> for sample in samples.samples(n=3, sorted_by='energy'): # doctest: +SKIP
... print(sample)
...
{0: 1, 1: -1, 2: -1, 3: -1}
{0: 1, 1: 1, 2: -1, 3: -1}
{0: -1, 1: 1, 2: -1, 3: -1}
"""
source_bqm = dimod.BinaryQuadraticModel.from_ising(source_h, source_J)
target_bqm = embed_bqm(source_bqm, embedding, target_adjacency, chain_strength=chain_strength)
target_h, target_J, __ = target_bqm.to_ising()
return target_h, target_J
def embed_qubo(source_Q, embedding, target_adjacency, chain_strength=1.0):
"""Embed a QUBO onto a target graph.
Args:
source_Q (dict[(variable, variable), bias]):
Coefficients of a quadratic unconstrained binary optimization (QUBO) model.
embedding (dict):
Mapping from source graph to target graph as a dict of form {s: {t, ...}, ...},
where s is a source-model variable and t is a target-model variable.
target_adjacency (dict/:class:`networkx.Graph`):
Adjacency of the target graph as a dict of form {t: Nt, ...},
where t is a target-graph variable and Nt is its set of neighbours.
chain_strength (float, optional):
Magnitude of the quadratic bias (in SPIN-space) applied between variables to form a chain. Note
that the energy penalty of chain breaks is 2 * `chain_strength`.
Returns:
dict[(variable, variable), bias]: Quadratic biases of the target QUBO.
Examples:
This example embeds a square source graph onto fully connected :math:`K_5` graph.
Embedding is accomplished by an edge deletion operation on the target graph: target-node
0 is not used.
>>> import dimod
>>> import networkx as nx
>>> # QUBO problem for a square graph
>>> Q = {(1, 1): -4.0, (1, 2): 4.0, (2, 2): -4.0, (2, 3): 4.0,
... (3, 3): -4.0, (3, 4): 4.0, (4, 1): 4.0, (4, 4): -4.0}
>>> # Target graph is a fully connected k5 graph
>>> K_5 = nx.complete_graph(5)
>>> 0 in K_5
True
>>> # Embedding from source to target graph
>>> embedding = {1: {4}, 2: {3}, 3: {1}, 4: {2}}
>>> # Embed the QUBO
>>> target_Q = dimod.embed_qubo(Q, embedding, K_5)
>>> (0, 0) in target_Q
False
>>> target_Q # doctest: +SKIP
{(1, 1): -4.0,
(1, 2): 4.0,
(2, 2): -4.0,
(2, 4): 4.0,
(3, 1): 4.0,
(3, 3): -4.0,
(4, 3): 4.0,
(4, 4): -4.0}
This example embeds a square graph onto the target graph of a dimod reference structured
sampler, `StructureComposite`, using the dimod reference `ExactSolver` sampler with a
fully connected :math:`K_5` graph specified.
>>> import dimod
>>> import networkx as nx
>>> # QUBO problem for a square graph
>>> Q = {(1, 1): -4.0, (1, 2): 4.0, (2, 2): -4.0, (2, 3): 4.0,
... (3, 3): -4.0, (3, 4): 4.0, (4, 1): 4.0, (4, 4): -4.0}
>>> # Structured dimod sampler with a structure defined by a K5 graph
>>> sampler = dimod.StructureComposite(dimod.ExactSolver(), list(K_5.nodes), list(K_5.edges))
>>> sampler.adjacency # doctest: +SKIP
{0: {1, 2, 3, 4},
1: {0, 2, 3, 4},
2: {0, 1, 3, 4},
3: {0, 1, 2, 4},
4: {0, 1, 2, 3}}
>>> # Embedding from source to target graph
>>> embedding = {0: [4], 1: [3], 2: [1], 3: [2], 4: [0]}
>>> # Embed the QUBO
>>> target_Q = dimod.embed_qubo(Q, embedding, sampler.adjacency)
>>> # Sample
>>> samples = sampler.sample_qubo(target_Q)
>>> for datum in samples.data(): # doctest: +SKIP
... print(datum)
...
Sample(sample={1: 0, 2: 1, 3: 1, 4: 0}, energy=-8.0)
Sample(sample={1: 1, 2: 0, 3: 0, 4: 1}, energy=-8.0)
Sample(sample={1: 1, 2: 0, 3: 0, 4: 0}, energy=-4.0)
Sample(sample={1: 1, 2: 1, 3: 0, 4: 0}, energy=-4.0)
Sample(sample={1: 0, 2: 1, 3: 0, 4: 0}, energy=-4.0)
Sample(sample={1: 1, 2: 1, 3: 1, 4: 0}, energy=-4.0)
>>> # Snipped above samples for brevity
"""
source_bqm = dimod.BinaryQuadraticModel.from_qubo(source_Q)
target_bqm = embed_bqm(source_bqm, embedding, target_adjacency, chain_strength=chain_strength)
target_Q, __ = target_bqm.to_qubo()
return target_Q
def unembed_sampleset(target_sampleset, embedding, source_bqm,
chain_break_method=None, chain_break_fraction=False):
"""Unembed the samples set.
Construct a sample set for the source binary quadratic model (BQM) by
unembedding the given samples from the target BQM.
Args:
target_sampleset (:obj:`dimod.SampleSet`):
SampleSet from the target BQM.
embedding (dict):
Mapping from source graph to target graph as a dict of form
{s: {t, ...}, ...}, where s is a source variable and t is a target
variable.
source_bqm (:obj:`dimod.BinaryQuadraticModel`):
Source binary quadratic model.
chain_break_method (function, optional):
Method used to resolve chain breaks.
See :mod:`dwave.embedding.chain_breaks`.
chain_break_fraction (bool, optional, default=False):
If True, a 'chain_break_fraction' field is added to the unembedded
samples which report what fraction of the chains were broken before
unembedding.
Returns:
:obj:`.SampleSet`:
Examples:
>>> import dimod
...
>>> # say we have a bqm on a triangle and an embedding
>>> J = {('a', 'b'): -1, ('b', 'c'): -1, ('a', 'c'): -1}
>>> bqm = dimod.BinaryQuadraticModel.from_ising({}, J)
>>> embedding = {'a': [0, 1], 'b': [2], 'c': [3]}
...
>>> # and some samples from the embedding
>>> samples = [{0: -1, 1: -1, 2: -1, 3: -1}, # [0, 1] is unbroken
{0: -1, 1: +1, 2: +1, 3: +1}] # [0, 1] is broken
>>> energies = [-3, 1]
>>> embedded = dimod.SampleSet.from_samples(samples, dimod.SPIN, energies)
...
>>> # unembed
>>> samples = dwave.embedding.unembed_sampleset(embedded, embedding, bqm)
>>> samples.record.sample # doctest: +SKIP
array([[-1, -1, -1],
[ 1, 1, 1]], dtype=int8)
"""
if chain_break_method is None:
chain_break_method = majority_vote
variables = list(source_bqm)
try:
chains = [embedding[v] for v in variables]
except KeyError:
raise ValueError("given bqm does not match the embedding")
chain_idxs = [[target_sampleset.variables.index[v] for v in chain] for chain in chains]
record = target_sampleset.record
unembedded, idxs = chain_break_method(record.sample, chain_idxs)
# dev note: this is a bug in dimod that empty unembedded is not handled,
# in the future this try-except can be removed
try:
energies = source_bqm.energies((unembedded, variables))
except ValueError:
datatypes = [('sample', np.dtype(np.int8), (len(variables),)), ('energy', np.float)]
datatypes.extend((name, record[name].dtype, record[name].shape[1:])
for name in record.dtype.names
if name not in {'sample', 'energy'})
if chain_break_fraction:
datatypes.append(('chain_break_fraction', np.float64))
# there are no samples so everything is empty
data = np.rec.array(np.empty(0, dtype=datatypes))
return dimod.SampleSet(data, variables, target_sampleset.info.copy(), target_sampleset.vartype)
reserved = {'sample', 'energy'}
vectors = {name: record[name][idxs]
for name in record.dtype.names if name not in reserved}
if chain_break_fraction:
vectors['chain_break_fraction'] = broken_chains(record.sample, chain_idxs).mean(axis=1)[idxs]
return dimod.SampleSet.from_samples((unembedded, variables),
target_sampleset.vartype,
energy=energies,
info=target_sampleset.info.copy(),
**vectors)
```
#### File: dwave-system/tests/test_embedding_chain_breaks.py
```python
import unittest
import numpy as np
import dimod
import dwave.embedding
class TestBrokenChains(unittest.TestCase):
def test_broken_chains_typical(self):
S = np.array([[-1, 1, -1, 1],
[1, 1, -1, -1],
[-1, 1, -1, -1]])
chains = [[0, 1], [2, 3]]
broken = dwave.embedding.broken_chains(S, chains)
np.testing.assert_array_equal([[1, 1], [0, 0], [1, 0]], broken)
def test_broken_chains_chains_length_0(self):
S = np.array([[-1, 1, -1, 1],
[1, 1, -1, -1],
[-1, 1, -1, -1]])
chains = [[0, 1], [], [2, 3]]
broken = dwave.embedding.broken_chains(S, chains)
np.testing.assert_array_equal([[1, 0, 1], [0, 0, 0], [1, 0, 0]], broken)
def test_broken_chains_single_sample(self):
S = [-1, 1, 1, 1]
chains = [[0, 1], [2, 3]]
with self.assertRaises(ValueError):
dwave.embedding.broken_chains(S, chains)
def test_matrix(self):
samples_matrix = np.array([[-1, +1, -1, +1],
[+1, +1, +1, +1],
[-1, -1, +1, -1],
[-1, -1, +1, +1]], dtype='int8')
chain_list = [(0, 1), (2, 3)]
broken = dwave.embedding.broken_chains(samples_matrix, chain_list)
class TestDiscard(unittest.TestCase):
def test_discard_no_breaks_all_ones_identity_embedding(self):
samples_matrix = np.array(np.ones((100, 50)), dtype='int8')
chain_list = [[idx] for idx in range(50)]
new_matrix, idxs = dwave.embedding.discard(samples_matrix, chain_list)
np.testing.assert_equal(new_matrix, samples_matrix)
def test_discard_no_breaks_all_ones_one_var_embedding(self):
samples_matrix = np.array(np.ones((100, 50)), dtype='int8')
chain_list = [[idx for idx in range(50)]]
new_matrix, idxs = dwave.embedding.discard(samples_matrix, chain_list)
self.assertEqual(new_matrix.shape, (100, 1))
def test_discard_typical(self):
samples_matrix = np.array([[-1, +1, -1, +1],
[+1, +1, +1, +1],
[-1, -1, +1, -1],
[-1, -1, +1, +1]], dtype='int8')
chain_list = [(0, 1), (2, 3)]
new_matrix, idxs = dwave.embedding.discard(samples_matrix, chain_list)
np.testing.assert_equal(new_matrix, [[+1, +1],
[-1, +1]])
def test_mixed_chain_types(self):
chains = [(0, 1), [2, 3], {4, 5}]
samples = [[1, 1, 1, 1, 1, 1], [0, 1, 0, 1, 0, 1]]
unembedded, idx = dwave.embedding.discard(samples, chains)
np.testing.assert_array_equal(unembedded, [[1, 1, 1]])
np.testing.assert_array_equal(idx, [0])
class TestMajorityVote(unittest.TestCase):
def test_typical_spin(self):
S = np.array([[-1, +1, -1, +1],
[+1, +1, -1, +1],
[-1, +1, -1, -1]])
chains = [[0, 1, 2], [3]]
samples, idx = dwave.embedding.majority_vote(S, chains)
np.testing.assert_equal(samples, [[-1, +1],
[+1, +1],
[-1, -1]])
def test_typical_binary(self):
S = np.array([[0, 1, 0, 1],
[1, 1, 0, 1],
[0, 1, 0, 0]])
chains = [[0, 1, 2], [3]]
samples, idx = dwave.embedding.majority_vote(S, chains)
np.testing.assert_equal(samples, [[0, 1],
[1, 1],
[0, 0]])
def test_four_chains(self):
S = [[-1, -1, -1, -1],
[+1, -1, -1, -1],
[+1, +1, -1, -1],
[-1, +1, -1, -1],
[-1, +1, +1, -1],
[+1, +1, +1, -1],
[+1, -1, +1, -1],
[-1, -1, +1, -1],
[-1, -1, +1, +1],
[+1, -1, +1, +1],
[+1, +1, +1, +1],
[-1, +1, +1, +1],
[-1, +1, -1, +1],
[+1, +1, -1, +1],
[+1, -1, -1, +1],
[-1, -1, -1, +1]]
chains = [[0], [1], [2, 3]]
samples, idx = dwave.embedding.majority_vote(S, chains)
self.assertEqual(samples.shape, (16, 3))
self.assertEqual(set().union(*samples), {-1, 1}) # should be spin-valued
class TestMinimizeEnergy(unittest.TestCase):
def test_minimize_energy(self):
embedding = {0: (0, 5), 1: (1, 6), 2: (2, 7), 3: (3, 8), 4: (4, 10)}
h = []
j = {(0, 1): -1, (0, 2): 2, (0, 3): 2, (0, 4): -1,
(2, 1): -1, (1, 3): 2, (3, 1): -1, (1, 4): -1,
(2, 3): 1, (4, 2): -1, (2, 4): -1, (3, 4): 1}
bqm = dimod.BinaryQuadraticModel.from_ising(h, j)
solutions = [
[-1, -1, -1, -1, -1, -1, +1, +1, +1, 3, +1],
[+1, +1, +1, +1, +1, -1, +1, -1, -1, 3, -1],
[+1, +1, -1, +1, -1, -1, -1, -1, -1, 3, -1]
]
expected = [
[-1, -1, +1, +1, -1],
[+1, +1, +1, -1, +1],
[-1, -1, -1, +1, -1]
]
cbm = dwave.embedding.MinimizeEnergy(bqm, embedding)
unembedded, idx = cbm(solutions, [embedding[v] for v in range(5)])
np.testing.assert_array_equal(expected, unembedded)
def test_minimize_energy_non_clique(self):
embedding = {0: (0, 5), 1: (1, 6), 2: (2, 7), 3: (3, 8), 4: (4, 10)}
h = []
j = {(0, 1): -1, (0, 2): 2, (0, 3): 2, (0, 4): -1,
(1, 3): 2, (3, 1): -1, (1, 4): -1,
(2, 3): 1, (4, 2): -1, (2, 4): -1, (3, 4): 1}
bqm = dimod.BinaryQuadraticModel.from_ising(h, j)
solutions = [
[-1, -1, -1, -1, -1, -1, +1, +1, +1, 3, +1],
[+1, +1, +1, +1, +1, -1, +1, -1, -1, 3, -1],
[+1, +1, -1, +1, -1, -1, -1, -1, -1, 3, -1]
]
expected = [
[-1, -1, +1, +1, -1],
[+1, +1, +1, -1, +1],
[-1, -1, -1, +1, -1]
]
cbm = dwave.embedding.MinimizeEnergy(bqm, embedding)
unembedded, idx = cbm(solutions, [embedding[v] for v in range(5)])
np.testing.assert_array_equal(expected, unembedded)
def test_minimize_energy_easy(self):
chains = ({0, 1}, [2], (4, 5, 6))
embedding = {v: chain for v, chain in enumerate(chains)}
h = [-1, 0, 0]
j = {}
bqm = dimod.BinaryQuadraticModel.from_ising(h, j)
solutions = [
[-1, -1, +1, 3, -1, -1, -1],
[-1, +1, -1, 3, +1, +1, +1]
]
expected = [
[-1, +1, -1],
[+1, -1, +1]
]
cbm = dwave.embedding.MinimizeEnergy(bqm, embedding)
unembedded, idx = cbm(solutions, chains)
np.testing.assert_array_equal(expected, unembedded)
def test_empty_matrix(self):
chains = []
bqm = dimod.BinaryQuadraticModel.empty(dimod.BINARY)
solutions = [[]]
embedding = {}
cbm = dwave.embedding.MinimizeEnergy(bqm, embedding)
unembedded, idx = cbm(solutions, chains)
np.testing.assert_array_equal([[]], unembedded)
np.testing.assert_array_equal(idx, [0])
def test_empty_chains(self):
embedding = {}
h = []
j = {(0, 1): -1, (0, 2): 2, (0, 3): 2, (0, 4): -1,
(2, 1): -1, (1, 3): 2, (3, 1): -1, (1, 4): -1,
(2, 3): 1, (4, 2): -1, (2, 4): -1, (3, 4): 1}
bqm = dimod.BinaryQuadraticModel.from_ising(h, j)
solutions = [
[-1, -1, -1, -1, -1, -1, +1, +1, +1, 3, +1],
[+1, +1, +1, +1, +1, -1, +1, -1, -1, 3, -1],
[+1, +1, -1, +1, -1, -1, -1, -1, -1, 3, -1]
]
expected = [
[-1, -1, +1, +1, -1],
[+1, +1, +1, -1, +1],
[-1, -1, -1, +1, -1]
]
cbm = dwave.embedding.MinimizeEnergy(bqm, embedding)
unembedded, idx = cbm(solutions, [])
np.testing.assert_array_equal(unembedded, [[], [], []])
```
#### File: dwave-system/tests/test_polycutoffcomposite.py
```python
import unittest
import dimod
from dwave.system import PolyCutOffComposite
class CutoffChecker(dimod.PolySampler):
def __init__(self, child_sampler, expected_poly):
self.child = child_sampler
self.poly = expected_poly
def sample_poly(self, poly, **parameters):
assert self.poly == poly, '{} != {}'.format(self.poly, poly)
return self.child.sample_poly(poly, **parameters)
def parameters(self):
return self.child.parameters()
def properties(self):
return self.child.properties()
class TestConstruction(unittest.TestCase):
def test_instantiation_smoketest(self):
sampler = PolyCutOffComposite(dimod.HigherOrderComposite(dimod.ExactSolver()), 0)
self.assertTrue(hasattr(sampler, 'sample_poly'))
self.assertTrue(hasattr(sampler, 'sample_hising'))
self.assertTrue(hasattr(sampler, 'sample_hubo'))
def test_wrap_bqm(self):
with self.assertRaises(TypeError):
PolyCutOffComposite(dimod.ExactSolver(), -1)
class TestSampleHising(unittest.TestCase):
def setUp(self):
self.child = dimod.HigherOrderComposite(dimod.ExactSolver())
def test_empty(self):
h = {}
J = {}
cutoff = 1
expected = dimod.BinaryPolynomial({}, dimod.SPIN)
checker = CutoffChecker(self.child, expected)
samples = PolyCutOffComposite(checker, cutoff).sample_hising(h, J)
self.assertEqual(samples.record.sample.shape[1], 0) # no variables
def test_linear(self):
# they are all isolated
h = {'a': -1, 'b': .5}
J = {}
cutoff = 1
# we cannot check in this case because all variables are isolated
# this results in exactly one variable being sent to ExactSolver and
# we don't know which one it will be, so we just check the correctness
# of the output
samples = PolyCutOffComposite(self.child, cutoff).sample_hising(h, J)
poly = dimod.BinaryPolynomial.from_hising(h, J)
for sample, energy in samples.data(['sample', 'energy']):
self.assertAlmostEqual(energy, poly.energy(sample))
def test_4_path_isolated_tail(self):
h = {}
J = {'ab': -1, 'bc': -.5, 'cd': -.5, 'de': -.5}
cutoff = .75
expected = dimod.BinaryPolynomial({'ab': -1}, dimod.SPIN)
checker = CutoffChecker(self.child, expected)
samples = PolyCutOffComposite(checker, cutoff).sample_hising(h, J)
poly = dimod.BinaryPolynomial.from_hising(h, J)
for sample, energy in samples.data(['sample', 'energy']):
self.assertAlmostEqual(energy, poly.energy(sample))
def test_triangle(self):
h = {'a': -1}
J = {'abde': -1, 'bc': -.5, 'ca': -.5}
cutoff = .75
expected = dimod.BinaryPolynomial({'a': -1, 'abde': -1}, dimod.SPIN)
checker = CutoffChecker(self.child, expected)
samples = PolyCutOffComposite(checker, cutoff).sample_hising(h, J)
poly = dimod.BinaryPolynomial.from_hising(h, J)
for sample, energy in samples.data(['sample', 'energy']):
self.assertAlmostEqual(energy, poly.energy(sample))
# 'c' was isolated, should be 1 when restored with the ground state
self.assertEqual(samples.first.sample['c'], 1)
``` |
{
"source": "joseppinilla/embedding-methods",
"score": 2
} |
#### File: embera/architectures/generators.py
```python
import os
import tarfile
import requests
import dwave.system
import networkx as nx
import dwave_networkx as dnx
__all__ = ['graph_from_solver','dwave_online',
'rainier_graph', 'vesuvius_graph', 'dw2x_graph', 'dw2000q_graph',
'p6_graph', 'p16_graph',
'h20k_graph',
]
""" ========================== D-Wave Solver Solutions ===================== """
def graph_from_solver(solver, **kwargs):
""" D-Wave architecture graph from Dimod Structured Solver
"""
chip_id = solver.properties['chip_id']
sampler = dwave.system.DWaveSampler(solver=chip_id)
target_graph = sampler.to_networkx_graph()
target_graph.graph['chip_id'] = chip_id
return target_graph
def dwave_online(squeeze=True, **kwargs):
""" Architecture graphs from D-Wave devices `online`"""
import dwave.cloud
with dwave.cloud.Client.from_config(**kwargs) as client:
solvers = client.get_solvers()
graphs = [graph_from_solver(s) for s in solvers if s.properties.get('topology')]
if squeeze:
return graphs[0] if len(graphs)==1 else graphs
else:
return graphs
def dwave_collection(name=None):
""" Architecture graphs from current and legacy D-Wave devices
|name | nodes | edges |
| ------------------- |:--------:| ------:|
|Advantage_system1.1 | 5436 | 37440 |
|DW_2000Q_6 | 2041 | 5974 |
|DW_2000Q_5 | 2030 | 5909 |
|DW_2000Q_2_1 | 2038 | 5955 |
|DW_2000Q_QuAIL | 2031 | 5919 |
|DW_2X_LANL | 1141 | 3298 |
Returns list of NetworkX graphs with parameters:
>>> G.graph = {'columns': <int>,
'data': bool,
'family': <string>,
'labels': <string>,
'name': <string>,
'rows': <int>,
'tile': <int>}
"""
graph_list = []
path = "./collection.tar.gz"
url = "http://www.ece.ubc.ca/~jpinilla/resources/embera/architectures/dwave/collection.tar.gz"
# Download
if not os.path.isfile(path):
print(f"-> Downloading D-Wave architecture collection to {path}")
with open(path, 'wb') as f:
response = requests.get(url)
f.write(response.content)
# Unzip, untar, unpickle
with tarfile.open(path) as contents:
for member in contents.getmembers():
f = contents.extractfile(member)
G = nx.read_gpickle(f)
graph_list.append(G)
if name is None:
return graph_list
else:
try:
return next(g for g in graph_list if g.name==name)
except:
raise KeyError("Architecture graph name not found in collection")
""" =========================== D-Wave Architectures ======================= """
def rainier_graph(**kwargs):
""" D-Wave One 'Rainier' Quantum Annealer graph
https://en.wikipedia.org/wiki/D-Wave_Systems
"""
target_graph = dnx.generators.chimera_graph(4, 4, 4, **kwargs)
target_graph.graph['chip_id'] = 'Rainier'
return target_graph
def vesuvius_graph(**kwargs):
""" D-Wave Two 'Vesuvius' Quantum Annealer graph
https://en.wikipedia.org/wiki/D-Wave_Systems
"""
target_graph = dnx.generators.chimera_graph(8, 8, 4, **kwargs)
target_graph.graph['chip_id'] = 'Vesuvius'
return target_graph
def dw2x_graph(**kwargs):
""" D-Wave 2X Quantum Annealer graph
https://en.wikipedia.org/wiki/D-Wave_Systems
"""
target_graph = dnx.generators.chimera_graph(12, 12, 4, **kwargs)
target_graph.graph['chip_id'] = 'DW_2X'
return target_graph
def dw2000q_graph(**kwargs):
""" D-Wave 2000Q Quantum Annealer graph
https://en.wikipedia.org/wiki/D-Wave_Systems
"""
target_graph = dnx.generators.chimera_graph(16, 16, 4, **kwargs)
target_graph.graph['chip_id'] = 'DW_2000Q'
return target_graph
def p6_graph(**kwargs):
""" Pegasus 6 graph
https://www.dwavesys.com/sites/default/files/mwj_dwave_qubits2018.pdf
"""
target_graph = dnx.generators.pegasus_graph(6, **kwargs)
target_graph.graph['chip_id'] = 'P6'
return target_graph
def p16_graph(**kwargs):
""" Pegasus 16 graph
https://www.dwavesys.com/sites/default/files/mwj_dwave_qubits2018.pdf
"""
target_graph = dnx.generators.pegasus_graph(16, **kwargs)
target_graph.graph['chip_id'] = 'P16'
return target_graph
""" ============================== Miscellaneous =========================== """
def h20k_graph(data=True, coordinates=False):
""" HITACHI 20k-Spin CMOS digital annealer graph.
https://ieeexplore.ieee.org/document/7350099/
"""
n, m, t = 128, 80, 2
target_graph = nx.grid_graph(dim=[t, m, n])
target_graph.name = 'hitachi_graph(128,80,2)'
target_graph.graph['chip_id'] = 'HITACHI 20k'
construction = (("family", "hitachi"),
("rows", 5), ("columns", 4),
("data", data),
("labels", "coordinate" if coordinates else "int"))
target_graph.graph.update(construction)
if coordinates:
if data:
for t_node in target_graph:
(z_coord, y_coord, x_coord) = t_node
linear = x_coord + n*(y_coord + m*z_coord)
target_graph.nodes[t_node]['linear_index'] = linear
else:
coordinate_labels = {(x, y, z):x+n*(y+m*z) for (x, y, z) in target_graph}
if data:
for t_node in target_graph:
target_graph.nodes[t_node]['grid_index'] = t_node
target_graph = nx.relabel_nodes(target_graph, coordinate_labels)
return target_graph
```
#### File: embera/composites/layout_aware.py
```python
import dimod
import minorminer
from embera import disperse
from embera.preprocess import diffusion_placer
from embera.architectures.generators import dw2000q_graph
from dwave.embedding.transforms import embed_bqm, unembed_sampleset
from dimod.binary_quadratic_model import BinaryQuadraticModel
class LayoutAwareEmbeddingComposite(dimod.ComposedSampler):
def __init__(self, child_sampler, layout = None,
embedding_method=minorminer,
candidates_method=diffusion_placer,
architecture_method=dw2000q_graph,
embedding_parameters={},
candidates_parameters={} ):
if not isinstance(child_sampler, dimod.Structured):
raise dimod.InvalidComposition("LayoutAwareEmbeddingComposite should only be applied to a Structured sampler")
self._children = [child_sampler]
self._layout = layout
self._embedding = None
self._embedding_method = embedding_method
self._candidates_method = candidates_method
self._architecture_method = architecture_method
self._embedding_parameters = embedding_parameters
self._candidates_parameters = candidates_parameters
@property
def children(self):
"""list: Children property inherited from :class:`dimod.Composite` class.
For an instantiated composed sampler, contains the single wrapped structured sampler.
.. _configuration: http://dwave-cloud-client.readthedocs.io/en/latest/#module-dwave.cloud.config
"""
return self._children
@property
def parameters(self):
"""dict[str, list]: Parameters in the form of a dict.
For an instantiated composed sampler, keys are the keyword parameters accepted by the child sampler.
.. _configuration: http://dwave-cloud-client.readthedocs.io/en/latest/#module-dwave.cloud.config
"""
# does not add or remove any parameters
param = self.child.parameters.copy()
param['chain_strength'] = []
param['force_embed'] = []
#TODO: Find a way to display embedding_method.find_embedding parameters
return param
@property
def properties(self):
"""dict: Properties in the form of a dict.
For an instantiated composed sampler, contains one key :code:`'child_properties'` that
has a copy of the child sampler's properties.
.. _configuration: http://dwave-cloud-client.readthedocs.io/en/latest/#module-dwave.cloud.config
"""
properties = {'child_properties': self.child.properties.copy()}
properties['embedding_method'] = self._embedding_method.__name__
return properties
def get_ising_embedding(self, h, J, **parameters):
"""Retrieve or create a minor-embedding from Ising model
"""
bqm = BinaryQuadraticModel.from_ising(h,J)
embedding = self.get_embedding(bqm, **parameters)
return embedding
def get_qubo_embedding(self, Q, **parameters):
"""Retrieve or create a minor-embedding from QUBO
"""
bqm = BinaryQuadraticModel.from_qubo(Q)
embedding = self.get_embedding(bqm, **parameters)
return embedding
def set_embedding(self, embedding):
"""Write to the embedding parameter. Useful if embedding is taken from
a file or independent method.
Args:
embedding (dict):
Dictionary that maps labels in S_edgelist to lists of labels in the
graph of the structured sampler.
"""
self._embedding = embedding
def get_embedding(self, bqm=None, target_edgelist=None, force_embed=False, embedding_parameters={}, candidates_parameters={}):
"""Retrieve or create a minor-embedding from BinaryQuadraticModel
Args:
bqm (:obj:`dimod.BinaryQuadraticModel`):
Binary quadratic model to be sampled from.
target_edgelist (list, optional, default=<Child Structure>):
An iterable of label pairs representing the edges in the target graph.
force_embed (bool, optional, default=False):
If the sampler has an embedding return it. Otherwise, embed problem.
**parameters:
Parameters for the embedding method.
Returns:
embedding (dict):
Dictionary that maps labels in S_edgelist to lists of labels in the
graph of the structured sampler.
"""
child = self.child
layout = self._layout
embedding_method = self._embedding_method
candidates_method = self._candidates_method
architecture_method = self._architecture_method
self._embedding_parameters = embedding_parameters
self._candidates_parameters = embedding_parameters
# add self-loops to edgelist to handle singleton variables
source_edgelist = list(bqm.quadratic) + [(v, v) for v in bqm.linear]
if target_edgelist is None:
_, target_edgelist, _ = child.structure
if force_embed or not self._embedding:
Tg = architecture_method(edge_list=target_edgelist)
candidates = candidates_method.find_candidates(source_edgelist, Tg,
layout=layout,
**candidates_parameters)
embedding = embedding_method.find_embedding(source_edgelist, target_edgelist,
initial_chains = candidates,
**embedding_parameters)
self._candidates = candidates
self._embedding = embedding
if bqm and not self._embedding:
raise ValueError("no embedding found")
return self._embedding
def get_child_response(self):
return self._child_response
def sample(self, bqm, chain_strength=1.0, force_embed=False, chain_break_fraction=True, **parameters):
"""Sample from the provided binary quadratic model.
Args:
bqm (:obj:`dimod.BinaryQuadraticModel`):
Binary quadratic model to be sampled from.
chain_strength (float, optional, default=1.0):
Magnitude of the quadratic bias (in SPIN-space) applied between variables to create
chains. Note that the energy penalty of chain breaks is 2 * `chain_strength`.
force_embed (bool, optional, default=False):
If the sampler has an embedding return it. Otherwise, embed problem.
chain_break_fraction (bool, optional, default=True):
If True, a ‘chain_break_fraction’ field is added to the unembedded response which report
what fraction of the chains were broken before unembedding.
**parameters:
Parameters for the sampling method, specified by the child sampler.
Returns:
:class:`dimod.Response`
"""
# use the given embedding method with the given parameters
embedding_parameters = self._embedding_parameters
candidates_parameters = self._candidates_parameters
# solve the problem on the child system
child = self.child
# apply the embedding to the given problem to map it to the child sampler
__, target_edgelist, target_adjacency = child.structure
# get the embedding
embedding = self.get_embedding(bqm, target_edgelist=target_edgelist,
force_embed=force_embed,
candidates_parameters=candidates_parameters,
embedding_parameters=embedding_parameters)
if bqm and not embedding:
raise ValueError("no embedding found")
bqm_embedded = embed_bqm(bqm, embedding, target_adjacency, chain_strength=chain_strength)
response = child.sample(bqm_embedded, **parameters)
# Store embedded response
self._child_response = response
return unembed_sampleset(response, embedding, source_bqm=bqm,
chain_break_fraction=chain_break_fraction)
```
#### File: embedding-methods/embera/dense.py
```python
import warnings
__all__ = ["find_embedding"]
DEFAULT_CHIMERA = {'family': 'chimera', 'rows': 12, 'columns': 12, 'tile': 4}
def find_embedding(S, T, graph_dict=DEFAULT_CHIMERA, **params):
""" find_embedding(S, T, T_dict, **params)
Heuristically attempt to find a minor-embedding of a graph, representing an
Ising/QUBO, into a target graph.
Args:
S: an iterable of label pairs representing the edges in the source graph
T: an iterable of label pairs representing the edges in the Chimera graph
graph_dict: a dictionary of Chimera graph construction parameters
(default: C12 Vesuvius graph)
**params (optional): see RouterOptions_
Returns:
embedding: a dict that maps labels in S to lists of labels in T
"""
warnings.warn('Work in Progress.')
embedding = {}
return embedding
```
#### File: embera/utilities/random.py
```python
import numpy as np
from random import sample
from random import seed as _py_seed
from numpy.random import choice, shuffle
from numpy.random import multinomial, normal, uniform
from numpy.random import seed as _np_seed
__all__ = ["choice","sample","seed","shuffle",
"prob_vector","bimodal","categorical"]
""" Probability functions and distributions useful in embedding and testing
Ising models.
"""
def seed(a):
_py_seed(a)
_np_seed(a)
def prob_vector(N):
vec = [normal(0, 1) for i in range(N)]
mag = sum(x**2 for x in vec) ** .5
return [(x/mag)**2 for x in vec]
def bimodal(N, loc1=-1.0,scale1=.25,size1=None,
loc2=+1.0,scale2=.25,size2=None):
if size1 is None:
size1=N//2
if size2 is None:
size2=N-size1
samples1 = normal(loc1,scale1,size1)
samples2 = normal(loc2,scale2,size2)
samples = np.concatenate([samples1,samples2])
shuffle(samples)
return samples
def categorical(N, vals):
bins = multinomial(n=N, pvals=prob_vector(len(vals)))
samples = np.array([vals[i] for i,b in enumerate(bins) for _ in range(b)])
shuffle(samples)
return samples
```
#### File: embedding-methods/tests/test_checkerboard_transform.py
```python
import dimod
import unittest
import dimod.testing as dit
from embera import CheckerboardTransformComposite
from dwave.system import FixedEmbeddingComposite
try:
import dwave_networkx as dnx
_dnx = True
except ImportError:
_dnx = False
class TestCheckerboardTransformComposite(unittest.TestCase):
@unittest.skipUnless(_dnx, "No dwave_networkx package")
def test_instantiation(self):
C = dnx.chimera_graph(2, 2, 4)
for factory in [dimod.ExactSolver, dimod.RandomSampler, dimod.SimulatedAnnealingSampler]:
structsampler = dimod.StructureComposite(factory(),
nodelist=C.nodes(), edgelist=C.edges())
sampler = CheckerboardTransformComposite(structsampler, C)
dit.assert_sampler_api(sampler)
dit.assert_composite_api(sampler)
@unittest.skipUnless(_dnx, "No dwave_networkx package")
def test_transforms_exact(self):
C = dnx.chimera_graph(2, 2, 2)
nodelist = list(C.nodes())
edgelist = list(C.edges())
structsampler = dimod.StructureComposite(dimod.ExactSolver(),
nodelist=nodelist, edgelist=edgelist)
sampler = CheckerboardTransformComposite(structsampler, C,
aggregate=True)
h = {v:0.1 for v in nodelist}
J = {edge:-1.0 for edge in edgelist}
response = sampler.sample_ising(h,J)
# All 4 gauges must return same samples
for datum in response.data():
self.assertEqual(datum.num_occurrences, 4)
dit.assert_response_energies(response, dimod.BinaryQuadraticModel.from_ising(h,J))
@unittest.skipUnless(_dnx, "No dwave_networkx package")
def test_transform_embedded(self):
C = dnx.chimera_graph(1)
nodelist = list(C.nodes())
edgelist = list(C.edges())
structsampler = dimod.StructureComposite(dimod.ExactSolver(),
nodelist=nodelist, edgelist=edgelist)
gauges_sampler = CheckerboardTransformComposite(structsampler, C,
aggregate=True)
sampler = FixedEmbeddingComposite(gauges_sampler, {'a': [0, 4], 'b': [1, 5], 'c': [2, 6]})
h = {'a': .5, 'c': 0}
J = {('a', 'c'): -1}
response = sampler.sample_ising(h,J)
# All 4 gauges must return same samples
for datum in response.data():
self.assertEqual(datum.num_occurrences, 4)
dit.assert_response_energies(response, dimod.BinaryQuadraticModel.from_ising(h,J))
@unittest.skipUnless(_dnx, "No dwave_networkx package")
def test_chimera(self):
C = dnx.chimera_graph(4)
nodelist = list(C.nodes())
edgelist = list(C.edges())
structsampler = dimod.StructureComposite(dimod.RandomSampler(),
nodelist=nodelist, edgelist=edgelist)
Q = {(v,v):0.1 for v in nodelist}
Q.update( {edge:-1.0 for edge in edgelist} )
sampler = CheckerboardTransformComposite(structsampler, C)
response = sampler.sample_qubo(Q, num_reads=1000)
dit.assert_response_energies(response, dimod.BinaryQuadraticModel.from_qubo(Q))
```
#### File: embedding-methods/tests/test_embedding.py
```python
import unittest
import minorminer
import dimod.testing as dtest
from embera.architectures import generators
from embera.composites.embedding import EmbeddingComposite
from dimod.reference.samplers.random_sampler import RandomSampler
from dimod.reference.composites.structure import StructureComposite
class TestEmbeddingComposite(unittest.TestCase):
def test_instantiate_pegasus(self):
# Use the provided architectures
target_graph = generators.p6_graph()
# Use any sampler and make structured (i.e. Simulated Annealing, Exact) or use structured sampler if available (i.e. D-Wave machine)
structsampler = StructureComposite(RandomSampler(), target_graph.nodes, target_graph.edges)
sampler = EmbeddingComposite(structsampler, minorminer)
dtest.assert_sampler_api(sampler)
def test_instantiate_chimera(self):
# Use the provided architectures
target_graph = generators.dw2x_graph()
# Use any sampler and make structured (i.e. Simulated Annealing, Exact) or use structured sampler if available (i.e. D-Wave machine)
structsampler = StructureComposite(RandomSampler(), target_graph.nodes, target_graph.edges)
sampler = EmbeddingComposite(structsampler, minorminer)
dtest.assert_sampler_api(sampler)
``` |
{
"source": "joseppinilla/pytorchviz",
"score": 2
} |
#### File: pytorchviz/torchviz/dot.py
```python
from collections import namedtuple
from distutils.version import LooseVersion
from graphviz import Digraph
import torch
from torch.autograd import Variable
import warnings
Node = namedtuple('Node', ('name', 'inputs', 'attr', 'op'))
# Saved attrs for grad_fn (incl. saved variables) begin with `._saved_*`
SAVED_PREFIX = "_saved_"
def get_fn_name(fn, show_attrs, max_attr_chars):
name = str(type(fn).__name__)
if not show_attrs:
return name
attrs = dict()
for attr in dir(fn):
if not attr.startswith(SAVED_PREFIX):
continue
val = getattr(fn, attr)
attr = attr[len(SAVED_PREFIX):]
if torch.is_tensor(val):
attrs[attr] = "[saved tensor]"
elif isinstance(val, tuple) and any(torch.is_tensor(t) for t in val):
attrs[attr] = "[saved tensors]"
else:
attrs[attr] = str(val)
if not attrs:
return name
max_attr_chars = max(max_attr_chars, 3)
col1width = max(len(k) for k in attrs.keys())
col2width = min(max(len(str(v)) for v in attrs.values()), max_attr_chars)
sep = "-" * max(col1width + col2width + 2, len(name))
attrstr = '%-' + str(col1width) + 's: %' + str(col2width)+ 's'
truncate = lambda s: s[:col2width - 3] + "..." if len(s) > col2width else s
params = '\n'.join(attrstr % (k, truncate(str(v))) for (k, v) in attrs.items())
return name + '\n' + sep + '\n' + params
def make_dot(var, params=None, show_attrs=False, show_saved=False, max_attr_chars=50):
""" Produces Graphviz representation of PyTorch autograd graph.
If a node represents a backward function, it is gray. Otherwise, the node
represents a tensor and is either blue, orange, or green:
- Blue: reachable leaf tensors that requires grad (tensors whose `.grad`
fields will be populated during `.backward()`)
- Orange: saved tensors of custom autograd functions as well as those
saved by built-in backward nodes
- Green: tensor passed in as outputs
- Dark green: if any output is a view, we represent its base tensor with
a dark green node.
Args:
var: output tensor
params: dict of (name, tensor) to add names to node that requires grad
show_attrs: whether to display non-tensor attributes of backward nodes
(Requires PyTorch version >= 1.9)
show_saved: whether to display saved tensor nodes that are not by custom
autograd functions. Saved tensor nodes for custom functions, if
present, are always displayed. (Requires PyTorch version >= 1.9)
max_attr_chars: if show_attrs is `True`, sets max number of characters
to display for any given attribute.
"""
if LooseVersion(torch.__version__) < LooseVersion("1.9") and \
(show_attrs or show_saved):
warnings.warn(
"make_dot: showing grad_fn attributes and saved variables"
" requires PyTorch version >= 1.9. (This does NOT apply to"
" saved tensors saved by custom autograd functions.)")
if params is not None:
assert all(isinstance(p, Variable) for p in params.values())
param_map = {id(v): k for k, v in params.items()}
else:
param_map = {}
node_attr = dict(style='filled',
shape='box',
align='left',
fontsize='10',
ranksep='0.1',
height='0.2',
fontname='monospace')
dot = Digraph(node_attr=node_attr, graph_attr=dict(size="12,12"))
seen = set()
def size_to_str(size):
return '(' + (', ').join(['%d' % v for v in size]) + ')'
def get_var_name(var, name=None):
if not name:
name = param_map[id(var)] if id(var) in param_map else ''
return '%s\n %s' % (name, size_to_str(var.size()))
def add_nodes(fn):
assert not torch.is_tensor(fn)
if fn in seen:
return
seen.add(fn)
if show_saved:
for attr in dir(fn):
if not attr.startswith(SAVED_PREFIX):
continue
val = getattr(fn, attr)
seen.add(val)
attr = attr[len(SAVED_PREFIX):]
if torch.is_tensor(val):
dot.edge(str(id(fn)), str(id(val)), dir="none")
dot.node(str(id(val)), get_var_name(val, attr), fillcolor='orange')
if isinstance(val, tuple):
for i, t in enumerate(val):
if torch.is_tensor(t):
name = attr + '[%s]' % str(i)
dot.edge(str(id(fn)), str(id(t)), dir="none")
dot.node(str(id(t)), get_var_name(t, name), fillcolor='orange')
if hasattr(fn, 'variable'):
# if grad_accumulator, add the node for `.variable`
var = fn.variable
seen.add(var)
dot.node(str(id(var)), get_var_name(var), fillcolor='lightblue')
dot.edge(str(id(var)), str(id(fn)))
# add the node for this grad_fn
dot.node(str(id(fn)), get_fn_name(fn, show_attrs, max_attr_chars))
# recurse
if hasattr(fn, 'next_functions'):
for u in fn.next_functions:
if u[0] is not None:
dot.edge(str(id(u[0])), str(id(fn)))
add_nodes(u[0])
# note: this used to show .saved_tensors in pytorch0.2, but stopped
# working* as it was moved to ATen and Variable-Tensor merged
# also note that this still works for custom autograd functions
if hasattr(fn, 'saved_tensors'):
for t in fn.saved_tensors:
dot.edge(str(id(t)), str(id(fn)))
dot.node(str(id(t)), get_var_name(t), fillcolor='orange')
def add_base_tensor(var, color='darkolivegreen1'):
if var in seen:
return
seen.add(var)
dot.node(str(id(var)), get_var_name(var), fillcolor=color)
if (var.grad_fn):
add_nodes(var.grad_fn)
dot.edge(str(id(var.grad_fn)), str(id(var)))
if var._is_view():
add_base_tensor(var._base, color='darkolivegreen3')
dot.edge(str(id(var._base)), str(id(var)), style="dotted")
# handle multiple outputs
if isinstance(var, tuple):
for v in var:
add_base_tensor(v)
else:
add_base_tensor(var)
resize_graph(dot)
return dot
# For traces
def replace(name, scope):
return '/'.join([scope[name], name])
def parse(graph):
scope = {}
for n in graph.nodes():
inputs = [i.uniqueName() for i in n.inputs()]
for i in range(1, len(inputs)):
scope[inputs[i]] = n.scopeName()
uname = next(n.outputs()).uniqueName()
assert n.scopeName() != '', '{} has empty scope name'.format(n)
scope[uname] = n.scopeName()
scope['0'] = 'input'
nodes = []
for n in graph.nodes():
attrs = {k: n[k] for k in n.attributeNames()}
attrs = str(attrs).replace("'", ' ')
inputs = [replace(i.uniqueName(), scope) for i in n.inputs()]
uname = next(n.outputs()).uniqueName()
nodes.append(Node(**{'name': replace(uname, scope),
'op': n.kind(),
'inputs': inputs,
'attr': attrs}))
for n in graph.inputs():
uname = n.uniqueName()
if uname not in scope.keys():
scope[uname] = 'unused'
nodes.append(Node(**{'name': replace(uname, scope),
'op': 'Parameter',
'inputs': [],
'attr': str(n.type())}))
return nodes
def make_dot_from_trace(trace):
""" Produces graphs of torch.jit.trace outputs
Example:
>>> trace, = torch.jit.trace(model, args=(x,))
>>> dot = make_dot_from_trace(trace)
"""
# from tensorboardX
if LooseVersion(torch.__version__) >= LooseVersion("0.4.1"):
torch.onnx._optimize_trace(trace, torch._C._onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK)
elif LooseVersion(torch.__version__) >= LooseVersion("0.4"):
torch.onnx._optimize_trace(trace, False)
else:
torch.onnx._optimize_trace(trace)
graph = trace.graph()
list_of_nodes = parse(graph)
node_attr = dict(style='filled',
shape='box',
align='left',
fontsize='12',
ranksep='0.1',
height='0.2')
dot = Digraph(node_attr=node_attr, graph_attr=dict(size="12,12"))
for node in list_of_nodes:
dot.node(node.name, label=node.name.replace('/', '\n'))
if node.inputs:
for inp in node.inputs:
dot.edge(inp, node.name)
resize_graph(dot)
return dot
def resize_graph(dot, size_per_element=0.15, min_size=12):
"""Resize the graph according to how much content it contains.
Modify the graph in place.
"""
# Get the approximate number of nodes and edges
num_rows = len(dot.body)
content_size = num_rows * size_per_element
size = max(min_size, content_size)
size_str = str(size) + "," + str(size)
dot.graph_attr.update(size=size_str)
``` |
{
"source": "joseppinilla/qca-tools",
"score": 2
} |
#### File: qca-tools/examples/exact_sample.py
```python
import os
import pickle
import itertools
import networkx as nx
import matplotlib.pyplot as plt
from embedding_methods.utilities.graph_mmio import read_networkx
from embedding_methods.utilities.graph_mmio import write_networkx
from qca_tools.qca_network import QCANetworkX
from dimod.reference.samplers.exact_solver import ExactSolver
from dimod.reference.samplers.simulated_annealing import SimulatedAnnealingSampler
bench_dir = './benchmarks/'
#solver = 'dwave'
#solver = 'exact'
solver = 'sa'
def sample(mm_name, dir):
Sg = read_networkx(mm_name,mm_dir=dir)
pos = Sg.graph['pos']
plt.figure(1)
plt.clf()
nx.draw(Sg, pos=pos, with_labels=True, node_shape='s')
plt.gca().invert_yaxis()
plt.savefig(dir + 'problem.png')
h = {}
J = {}
for u,v,data in Sg.edges(data=True):
if u==v:
h[u] = data['weight']
else:
J[(u,v)] = data['weight']
if solver=='sa':
sampler = SimulatedAnnealingSampler()
elif solver=='exact':
sampler = ExactSolver()
elif solver=='dwave':
#TODO: EmbeddingComposite
sampler = DWaveSampler()
if solver=='exact':
response = sampler.sample_ising(h,J)
else:
response = sampler.sample_ising(h,J,num_reads=1)
with open(dir + 'problem.pkl','wb') as fp:
pickle.dump(Sg, fp)
with open(dir + 'response.pkl','wb') as fp:
pickle.dump(response, fp)
# energies = [datum.energy for datum in response.data()]
# plt.figure(2)
# _ = plt.hist(energies, bins=100)
# plt.savefig('response.png')
datum = next(response.data())
sample = datum.sample
plt.figure(3)
plt.clf()
nx.draw(Sg, pos=pos, labels=sample, with_labels=True, node_shape='s')
_ = nx.draw_networkx_edge_labels(Sg,pos=pos,edge_labels=J)
plt.gca().invert_yaxis()
plt.savefig(dir + 'ground_state.png')
if __name__ == "__main__":
EXHAUSTIVE = False
benchmarks = []
benchmarks.append('NOT_FT')
benchmarks.append('MAJ5_A')
benchmarks.append('MAJ5_B')
benchmarks.append('MUX')
benchmarks.append('COPLANARX')
# Vector inputs
vectors = { 'NOT_FT': [{'A':-1}],
'MAJ5_A': [{'A':-1, 'B':1, 'C':1, 'D':-1, 'E':1}],
'MAJ5_B': [{'A':-1, 'B':1, 'C':1, 'D':-1, 'E':1}],
'MUX': [{'S':-1,'A':1,'B':-1}],
'COPLANARX': [{'A':-1,'X':1}]
}
for name in benchmarks:
if EXHAUSTIVE:
vector0 = vectors[name][0]
vector_size = len(vector0)
inputs = list(vector0.keys())
combs = itertools.product([-1,1], repeat=vector_size)
pols = [ dict(zip(inputs,comb)) for comb in combs ]
else:
pols = vectors[name]
# run
for i, pol in enumerate(pols):
dir = bench_dir + name + solver + str(i) + '/'
if not os.path.exists(dir):
os.makedirs(dir)
G = QCANetworkX(bench_dir+name+'.qca', pols=pol)
G.draw_qca()
comments = "Source: %s\nPolarizations: %s" % (name, pol)
write_networkx(G, pos=G.pos, mtx_name=name, mm_dir=dir, comment=comments)
sample(name, dir)
```
#### File: qca_tools/composite/qca_sampler.py
```python
import dimod
from os.path import isfile
from qca_tools.qca_network import QCANetwork
R_MAX = 2.1
class QCAComposite(dimod.ComposedSampler):
def __init__(self, child_sampler, qca_filename, pols={}, use_ancilla=True, r_max=R_MAX):
if not isfile(qca_filename):
raise ValueError("QCA input file not found")
self._qca_filename = qca_filename
self._use_ancilla = use_ancilla
self._children = [child_sampler]
self._QCA = QCANetwork(qca_filename,r_max=r_max)
@property
def children(self):
"""list: Children property inherited from :class:`dimod.Composite` class.
For an instantiated composed sampler, contains the single wrapped structured sampler.
.. _configuration: http://dwave-cloud-client.readthedocs.io/en/latest/#module-dwave.cloud.config
"""
return self._children
@property
def parameters(self):
"""dict[str, list]: Parameters in the form of a dict.
For an instantiated composed sampler, keys are the keyword parameters accepted by the child sampler.
.. _configuration: http://dwave-cloud-client.readthedocs.io/en/latest/#module-dwave.cloud.config
"""
# does not add or remove any parameters
param = self.child.parameters.copy()
return param
@property
def properties(self):
"""dict: Properties in the form of a dict.
For an instantiated composed sampler, contains one key :code:`'child_properties'` that
has a copy of the child sampler's properties.
.. _configuration: http://dwave-cloud-client.readthedocs.io/en/latest/#module-dwave.cloud.config
"""
properties = {'child_properties': self.child.properties.copy()}
properties['qca_filename'] = self._qca_filename
properties['use_ancilla'] = self._use_ancilla
return properties
def get_outputs(self):
outputs = self.outputs
def get_qca_network(self):
return self._QCA
def sample(self, **parameters):
child = self.child
QCA = self._QCA
response = child.sample(QCA)
return response
``` |
{
"source": "josepquintana/HackUPC-2019",
"score": 3
} |
#### File: HackUPC-2019/src/LoadData.py
```python
import pandas as pd
import numpy as np
from pathlib import Path
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
class AccidentsData:
def __init__(self):
filename = Path('../data/accidents.csv')
if not filename.exists():
print('\nERROR: Missing dataset file: accidents.csv\n')
quit()
accidents = pd.read_csv(filename)
# Eliminar columnes que preeliminarment es consideren irrellevants
accidents = accidents.drop(columns=['police_force', 'local_authority_district', 'local_authority_highway',
'lsoa_of_accident_location', 'location_easting_osgr',
'location_northing_osgr'])
# One hot encoding
accidents = pd.get_dummies(accidents, columns=['1st_road_class', 'junction_detail', 'junction_control',
'2nd_road_class', 'pedestrian_crossing-human_control',
'pedestrian_crossing-physical_facilities', 'light_conditions',
'road_surface_conditions',
'special_conditions_at_site', 'carriageway_hazards'])
# Eliminar columnes associades a condició de les one hot que són desconegudes
cols_acaben_menysu = []
for colname in accidents.columns:
if colname[-3:] == '_-1':
cols_acaben_menysu.append(colname)
accidents = accidents.drop(columns=cols_acaben_menysu)
numeritza = {'urban_or_rural_area': {'Urban': 1,
'Rural': 0}
}
accidents.replace(numeritza, inplace=True)
# Si no hi ha condició excepcional, irrellevant
accidents = accidents.drop(columns=['special'
'_conditions_at_site_None', 'carriageway_hazards_None',
'1st_road_class_Unclassified',
'2nd_road_class_Unclassified'])
# Convertir hh:mm:00 a minuts desde mitjanit
accidents['time'] = accidents['time'].apply(lambda s: int(s[:-4]) * 60 + int(s[-2:]))
# Convertir aaaa:mm:dd a minuts desde mitjanit
accidents['date'] = accidents['date'].apply(lambda s: int(s[7:9]) + int(s[-2:-1]) * 30.44)
# Substituïr -10s per avg de la columna
accidents['2nd_road_number'].replace(-1, np.nan, inplace=True)
accidents['2nd_road_number'].fillna(accidents['2nd_road_number'].mean(), inplace=True)
# Normalitzat de les columnes que els cal
tobenorm = ['longitude', 'latitude', 'number_of_vehicles', 'number_of_casualties', 'date', 'time',
'1st_road_number',
'road_type', 'speed_limit', '2nd_road_number', 'weather_conditions']
norm = MinMaxScaler()
accidents[tobenorm] = norm.fit_transform(accidents[tobenorm])
#self.features = accidents.drop('target', axis=1)
self.Xtrain, self.Xtest, self.ytrain, self.ytest = train_test_split(accidents.drop('target', axis=1),
accidents['target'], train_size=.7)
def get_Xtrain(self):
return self.Xtrain
def get_Xtest(self):
return self.Xtest
def get_ytrain(self):
return self.ytrain
def get_ytest(self):
return self.ytest
class VehiclesData:
def __init__(self):
filename = Path('../data/vehicles.csv')
if not filename.exists():
print('\nERROR: Missing dataset file: vehicles.csv\n')
quit()
vehicles = pd.read_csv(filename)
vehicles = vehicles.drop(columns=['Vehicle_IMD_Decile'])
vehicles = pd.get_dummies(vehicles, columns=['Vehicle_Type', 'Towing_and_Articulation', 'Vehicle_Manoeuvre',
'Vehicle_Location-Restricted_Lane', 'Junction_Location',
'Skidding_and_Overturning', 'Hit_Object_in_Carriageway',
'Vehicle_Leaving_Carriageway', 'Hit_Object_off_Carriageway',
'1st_Point_of_Impact',
'Journey_Purpose_of_Driver', 'Propulsion_Code',
'Driver_IMD_Decile', 'Driver_Home_Area_Type'])
cols_acabenmenysu = []
for colname in vehicles.columns:
if colname[-3:] == '_-1' or colname[-5:] == '_-1.0':
cols_acabenmenysu.append(colname)
vehicles = vehicles.drop(columns=cols_acabenmenysu)
vehicles = vehicles.drop(vehicles[vehicles.Age_of_Driver < 15].index)
vehicles['Engine_Capacity_(CC)'].replace(-1, np.nan, inplace=True)
vehicles['Engine_Capacity_(CC)'].replace('-1', np.nan, inplace=True)
vehicles['Engine_Capacity_(CC)'].fillna(vehicles['Engine_Capacity_(CC)'].mean(), inplace=True)
vehicles['Age_of_Driver'].replace(-1, np.nan, inplace=True)
vehicles['Age_of_Driver'].replace('-1', np.nan, inplace=True)
vehicles['Age_of_Driver'].fillna(vehicles['Age_of_Driver'].mean(), inplace=True)
vehicles['Age_of_Vehicle'].replace(-1, np.nan, inplace=True)
vehicles['Age_of_Vehicle'].fillna(vehicles['Age_of_Vehicle'].mean(), inplace=True)
vehicles['Was_Vehicle_Left_Hand_Drive?'].replace(-1, np.nan, inplace=True)
vehicles['Was_Vehicle_Left_Hand_Drive?'].replace('-1', np.nan, inplace=True)
vehicles['Sex_of_Driver'].replace(-1, np.nan, inplace=True)
vehicles['Sex_of_Driver'].replace('-1', np.nan, inplace=True)
vehicles['Sex_of_Driver'].replace('Not known', np.nan, inplace=True)
dicvehicles = {'Sex_of_Driver': {'Male': 1.0, 'Female': 0.0},
'Was_Vehicle_Left_Hand_Drive?': {'Yes': 1.0, 'No': 0.0}
}
vehicles.replace(dicvehicles, inplace=True)
vehicles['Was_Vehicle_Left_Hand_Drive?'].fillna(vehicles['Was_Vehicle_Left_Hand_Drive?'].mean(), inplace=True)
vehicles['Sex_of_Driver'].fillna(vehicles['Sex_of_Driver'].mean(), inplace=True)
tobenorm = ['Age_of_Driver', 'Engine_Capacity_(CC)', 'Age_of_Vehicle']
norm = MinMaxScaler()
vehicles[tobenorm] = norm.fit_transform(vehicles[tobenorm])
self.valors = vehicles
def get_valors(self):
return self.valors
class MergedData:
def __init__(self, accidents, vehicles):
acctarg_train = pd.concat([accidents.get_Xtrain(), accidents.get_ytrain()], axis=1)
acctarg_test = pd.concat([accidents.get_Xtest(), accidents.get_ytest()], axis=1)
merged_train = pd.merge(acctarg_train, vehicles.get_valors(), on='accident_id')
merged_test = pd.merge(acctarg_test, vehicles.get_valors(), on='accident_id')
self.target_train = merged_train['target']
self.target_test = merged_test['target']
self.merged_train = merged_train.drop('target', axis=1)
self.merged_test = merged_test.drop('target', axis=1)
def get_merged_train(self):
return self.merged_train
def get_target_train(self):
return self.target_train
def get_merged_test(self):
return self.merged_test
def get_target_test(self):
return self.target_test
```
#### File: HackUPC-2019/src/Main.py
```python
import os
import LoadData as ld
import Training as tr
import numpy as np
import pandas as pd
from sklearn.metrics import f1_score
def main():
#os.chdir('../') # Set working directory
print("\nStarting program.\n")
print("Loading data...\n")
accidents_data = ld.AccidentsData()
vehicles_data = ld.VehiclesData()
merged_data = ld.MergedData(accidents_data, vehicles_data)
X_test = merged_data.get_merged_test()
y_test = merged_data.get_target_test()
X_train = merged_data.get_merged_train()
y_train = merged_data.get_target_train()
print("Available Models:\n")
print("1. K-nearest Neighbors")
print("2. Stochastic Gradient Descent Classifier")
print("3. Decision Tree Classifier")
print("4. Random Forest Classifier")
print("5. C-Support Vector Classification")
print("6. Logistic Regression")
print("7. Multi-Layer Perceptron Classifier")
print("\n")
mode = input("Choose Training Model: ")
print('\nTraining model...\n')
training = tr.Training(X_train, y_train)
if mode == "1":
training.knnTraining()
elif mode == "2":
training.sgdClassifierTraining()
elif mode == "3":
training.decisionTreeTraining()
elif mode == "4":
training.supportVectorMachinesTraining()
elif mode == "5":
training.supportVectorMachinesTraining()
elif mode == "6":
training.logisticRegressionTraining()
elif mode == "7":
training.mlpTraining()
else:
print("Bye!")
quit()
print('Calculating prediction...')
y_pred = training.model.predict(X_test.drop('accident_id', axis=1))
print('F1 score = ', f1_score(y_test,y_pred))
main()
``` |
{
"source": "josepquintana/tum-ucc-ml",
"score": 3
} |
#### File: tum-ucc-ml/src/preprocessing.py
```python
import pandas as pd
import os.path
import html
import spacy
import time
from string import punctuation
from langdetect import detect
from google.cloud import translate_v2 as translate
# data
TICKETS = pd.read_csv(f'./data/tickets/tickets.csv')
STATUS_LOG = pd.read_csv(f'./data/tickets/status_log.csv')
# google translate setup
CREDENTIALS_PATH = './translation-credentials.json'
CLIENT = (
translate.Client.from_service_account_json(CREDENTIALS_PATH)
if os.path.exists(CREDENTIALS_PATH)
else None
)
# spacy setup
NLP = spacy.load('de_core_news_md')
def preprocess(force=False, test=False, translate=False):
"""Preprocess ticket data into a pandas dataframe ready for futher work."""
s = './data/messages.parquet'
if force or test or not os.path.exists(s):
messages = TICKETS[[
'ID',
'Bearbeiter',
'Angelegt Am',
'Kategorie ID',
]].dropna()
messages = messages.rename(columns={
'ID': 'id',
'Bearbeiter': 'operator',
'Angelegt Am': 'timestamp',
'Kategorie ID': 'category',
})
if test:
messages = messages[:100]
ops = messages.groupby('operator')['operator'].agg(['count'])
ops = ops.reset_index().sort_values('count', ascending=False)
ops = list(ops['operator'][:-10])
messages = messages[messages['operator'].apply(lambda x: x in ops)]
messages['category'] = messages['category'].apply(lambda x: x.strip())
messages = messages[messages['category'].str.len() > 0]
messages['timestamp'] = pd.to_datetime(
messages['timestamp'],
infer_datetime_format=True,
utc=True,
)
# messages['year'] = messages['timestamp'].apply(lambda x: x.year)
messages['text'] = messages['id'].apply(
lambda x: get_first_message(fetch_ticket(x)),
)
messages = messages.dropna()
messages['text'] = messages['text'].apply(lambda x: clean(x))
messages['language'] = messages['text'].apply(
lambda x: detect_language(x),
)
messages['translated-text'] = messages.apply(
lambda row: (
row['text']
if not translate or row['language'] == 'de'
else translate_to_german(row['text'])
),
axis=1,
)
'''
messages['text'] = messages['text'].apply(
lambda x: ' '.join(extract_keywords(x)),
)
'''
messages = messages.reset_index(drop=True)
messages.to_parquet(s)
return messages
return pd.read_parquet(s)
def fetch_ticket(identifier):
"""Return data of ticket with given identifier as pandas dataframe."""
try:
return pd.read_csv(f'./data/tickets/{identifier}.csv')
except:
return None
def get_first_message(ticket_data):
"""Get first real message in ticket conversations.
Sometimes there are weird first messages contained in the ticket CSVs,
that start with SY-SYSID.
"""
if ticket_data is None:
return None
starts = ['SY-DBSYS', 'SY-SYSID']
for index, row in ticket_data.iterrows():
if not any([row['Text'].startswith(start) for start in starts]):
# filter out test messages like `2000000151`: "Es brennt"
if (
len(row['Text']) < 100
or row['Nachrichtentyp'].strip() != 'Beschreibung'
):
return None
return row['Text']
def clean(text):
"""Clean text of any weird characters."""
words = text.replace('\n', ' ').split()
out = []
for word in words:
word = word.strip('/-_<>&')
if word:
out.append(word)
return ' '.join(out)
def detect_language(text):
"""Detect the language of the given text."""
return detect(text)
def translate_to_german(text):
"""Translate text from any language to german via Google Translate API."""
assert CLIENT, 'no Google Translate credentials provided'
time.sleep(0.2) # rate limiting
s = CLIENT.translate(text, target_language='de')['translatedText']
return html.unescape(s)
def extract_keywords(text):
"""Extract and clean most important words in the text."""
tag = ['PROPN', 'ADJ', 'NOUN', 'VERB', 'ADV', 'NUM']
doc = NLP(text.lower())
result = []
for token in doc:
if(token.text in NLP.Defaults.stop_words or token.text in punctuation):
continue
if(token.pos_ in tag):
result.append(token.lemma_)
return result
``` |
{
"source": "joseprsm/gutenberg",
"score": 3
} |
#### File: gutenberg/app/routes.py
```python
from typing import List, Any, Dict
import openai
from fastapi import APIRouter, Depends
from sqlalchemy.orm import Session
from gutenberg.app.db import get_db
from gutenberg.app import models
from gutenberg.app import schemas
router = APIRouter()
@router.post('/')
def predict(prompt: schemas.Prompt, db: Session = Depends(get_db)):
db_prompt = models.Prompt(
item_name=prompt.item_name,
item_description=prompt.item_description,
target_audience=prompt.target_audience,
platform=prompt.platform
)
db.add(db_prompt)
db.commit()
db.refresh(db_prompt)
predictions: List[Dict[str, Any]] = openai.Completion.create(
engine="text-davinci-001",
prompt=generate_prompt(
prompt.item_name,
prompt.item_description,
prompt.platform,
prompt.target_audience),
temperature=0.6, n=5, max_tokens=1000
).choices
for pred in predictions:
pred_text = pred['text']
db_pred = models.Prediction(prompt_id=db_prompt.id, text=pred_text)
db.add(db_pred)
db.commit()
db.refresh(db_pred)
return {
'choices': [
pred['text'] for pred in predictions
]}
@router.get('/')
def get_predictions(db: Session = Depends(get_db), skip: int = 0, limit: int = 20):
return db.query(models.Prediction).offset(skip).limit(limit).all()
def generate_prompt(name, description, platform, target_audience):
return f"""
Write an ad for the following product to run on {platform} aimed at {target_audience}:
Product: {name}. {description}.
"""
```
#### File: gutenberg/models/recommender.py
```python
from typing import List
import tensorflow as tf
import tensorflow_recommenders as tfrs
from gutenberg.models.text import TextModel
from gutenberg.models.query import QueryModel
class Recommender(tfrs.Model):
def __init__(self,
nb_users: int,
prediction_layer_sizes: List[int] = None,
rating_layer_sizes: List[int] = None,
rating_activation_fn: str = 'relu'):
super().__init__()
self._nb_users = nb_users
self._rating_activation_fn = rating_activation_fn
self._rating_layer_sizes = rating_layer_sizes or [128, 64, 32]
self._prediction_layer_sizes = prediction_layer_sizes or [256, 128, 64]
self.query_model = QueryModel(nb_users)
self.prediction_model = TextModel(layer_sizes=self._prediction_layer_sizes)
self.rating_model = tf.keras.Sequential([
tf.keras.layers.Dense(num_units, activation=rating_activation_fn)
for num_units in self._rating_layer_sizes
])
self.rating_model.add(tf.keras.layers.Dense(1))
def compute_loss(self, inputs, training: bool = False) -> tf.Tensor:
ratings = inputs.pop('rating')
query_embeddings, candidate_embeddings, rating_predictions = self(inputs)
rating_loss = self.rating_task(labels=ratings, predictions=rating_predictions)
retrieval_loss = self.retrieval_task(query_embeddings, candidate_embeddings)
return self.rating_weight * rating_loss + self.retrieval_weight * retrieval_loss
def call(self, inputs, *_):
query_embeddings = self.query_model(inputs) # (None, 64)
candidate_embeddings = self.prediction_model(inputs['prediction']) # (None, 64)
x = tf.concat([query_embeddings, candidate_embeddings], axis=1) # (None, 128)
rating_predictions = self.rating_model(x) # (None, 1)
return query_embeddings, candidate_embeddings, rating_predictions
def get_config(self):
return {
'nb_users': self._nb_users,
'rating_layer_sizes': self._rating_layer_sizes,
'rating_activation_fn': self._rating_activation_fn,
}
```
#### File: gutenberg/models/user.py
```python
import tensorflow as tf
class UserModel(tf.keras.Model):
def __init__(self, input_dim: int, embedding_dim: int = 32, **kwargs):
super().__init__(**kwargs)
self._input_dim = input_dim
self._embedding_dim = embedding_dim
self.hashing_layer = tf.keras.layers.Hashing(input_dim)
self.embedding_layer = tf.keras.layers.Embedding(input_dim, embedding_dim)
def call(self, inputs, *_):
x = self.hashing_layer(inputs) # (None, 1)
x = self.embedding_layer(x) # (None, 32)
return x
def get_config(self):
return {
'input_dim': self._input_dim,
'embedding_dim': self._embedding_dim
}
``` |
{
"source": "joseprsm/rexify",
"score": 2
} |
#### File: rexify/models/candidate.py
```python
from typing import Dict, Any, List
import tensorflow as tf
from rexify.models.tower import Tower
class CandidateModel(Tower):
def __init__(
self,
schema: Dict[str, str],
params: Dict[str, Dict[str, Any]],
layer_sizes: List[int],
activation: str = "relu",
):
super(CandidateModel, self).__init__(
schema=schema, params=params, layer_sizes=layer_sizes, activation=activation
)
def call_feature_models(self, inputs: Dict[str, tf.Tensor]) -> List[tf.Tensor]:
return [
model(inputs[feature_name])
for feature_name, model in self.feature_models.items()
]
```
#### File: rexify/models/candidate_test.py
```python
import tensorflow as tf
class CandidateTestCase(tf.test.TestCase):
def test_something(self):
self.assertEqual(True, True) # add assertion here
if __name__ == "__main__":
tf.test.main()
```
#### File: rexify/models/recommender_test.py
```python
import tensorflow as tf
from rexify.models.recommender import Recommender
from rexify.features.sequence import slide_transform
from tests.utils import get_sample_params, get_sample_schema, load_mock_events
class RecommenderTest(tf.test.TestCase):
def setUp(self):
super(RecommenderTest, self).setUp()
self._recommender_args = {
"schema": get_sample_schema(),
"params": get_sample_params(),
"layer_sizes": [64, 32],
"activation": "relu",
}
self.model = Recommender(**self._recommender_args)
def testCall(self):
events = load_mock_events()
inputs = slide_transform(events, self._recommender_args["schema"])
inputs = list(inputs.batch(1).take(1))[0]
query_embeddings, candidate_embeddings = self.model(inputs)
self.assertIsInstance(query_embeddings, tf.Tensor)
self.assertIsInstance(candidate_embeddings, tf.Tensor)
self.assertEqual(query_embeddings.shape, tf.TensorShape([1, 32]))
self.assertEqual(candidate_embeddings.shape, tf.TensorShape([1, 32]))
# query_embedding_1 = self.model.query_model(tf.constant([[1]]))
# self.assertTrue((tf.reduce_sum(tf.cast(
# query_embeddings == query_embedding_1,
# tf.int32)) == 32).numpy())
def testComputeLoss(self):
events = load_mock_events()
mock_data = slide_transform(events, self._recommender_args["schema"])
loss = self.model.compute_loss(list(mock_data.batch(1).take(1))[0])
self.assertIsInstance(loss, tf.Tensor)
self.assertEqual(loss.shape, tf.TensorShape([]))
def testConfig(self):
self.assertIsInstance(self.model.get_config(), dict)
if __name__ == "__main__":
tf.test.main()
```
#### File: rexify/pipeline/train.py
```python
from typing import Text, List
import os
import tensorflow as tf
from tensorflow_metadata.proto.v0 import schema_pb2
from tensorflow_transform.tf_metadata import schema_utils
from tfx import v1 as tfx
from tfx_bsl.public import tfxio
from rexify.models import Recommender
BATCH_SIZE = os.environ.get("BATCH_SIZE", 512)
FEATURE_KEYS = ["userId", "itemId"]
FEATURE_SPEC = {
feature: tf.io.FixedLenFeature(shape=(1,), dtype=tf.int64)
for feature in FEATURE_KEYS
}
def _input_fn(
file_pattern: List[Text],
data_accessor: tfx.components.DataAccessor,
schema: schema_pb2.Schema,
batch_size: int = 512,
) -> tf.data.Dataset:
# todo: dataset factory not working properly
return data_accessor.tf_dataset_factory(
file_pattern, tfxio.TensorFlowDatasetOptions(batch_size=batch_size), schema
).repeat()
def run_fn(fn_args: tfx.components.FnArgs):
layer_sizes = [64, 32]
activation = "leaky_relu"
schema = schema_utils.schema_from_feature_spec(FEATURE_SPEC)
training_data: tf.data.Dataset = _input_fn(
file_pattern=fn_args.train_files,
data_accessor=fn_args.data_accessor,
schema=schema,
batch_size=512,
)
nb_users = 10_000 # len(training_data.map(lambda x: x['userId']).apply(tf.data.experimental.unique))
nb_items = 10_000 # len(training_data.map(lambda x: x['itemId']).apply(tf.data.experimental.unique))
query_params = {
"schema": {"userId": "categorical"},
"layer_sizes": layer_sizes,
"activation": activation,
"params": {"userId": {"input_dim": nb_items, "embedding_dim": 16}},
}
candidate_params = {
"schema": {"itemId": "categorical"},
"layer_sizes": layer_sizes,
"activation": activation,
"params": {"itemId": {"input_dim": nb_users, "embedding_dim": 32}},
}
model: Recommender = Recommender(
query_params=query_params,
candidate_params=candidate_params,
layer_sizes=layer_sizes,
activation=activation,
)
model.compile(optimizer=tf.keras.optimizers.Adam(0.2))
model.fit(training_data, steps_per_epoch=fn_args.train_steps)
model.save(fn_args.serving_model_dir, save_format="tf")
```
#### File: rexify/tests/test_pipeline.py
```python
import os
# from tfx.orchestration.local.local_dag_runner import LocalDagRunner
from tfx.orchestration.pipeline import Pipeline
from rexify import pipeline
PIPELINE_NAME = "rexify_test"
PIPELINE_ROOT = os.path.join("pipelines", PIPELINE_NAME)
METADATA_PATH = os.path.join("metadata", PIPELINE_NAME, "metadata.db")
SERVING_MODEL_DIR = os.path.join("serving_model", PIPELINE_NAME)
ppl = pipeline.build(
pipeline_name=PIPELINE_NAME,
pipeline_root=PIPELINE_ROOT,
data_root="data/events",
items_root="data/items",
run_fn="rexify.train.run_fn",
schema={"userId": "", "itemId": ""},
serving_model_dir=SERVING_MODEL_DIR,
metadata_path=METADATA_PATH,
)
def test_pipeline_components():
assert isinstance(ppl, Pipeline)
assert len(ppl.components) > 0
``` |
{
"source": "joseprupi/quantragrpc",
"score": 2
} |
#### File: python/quantra/FloatingRateBond.py
```python
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class FloatingRateBond(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = FloatingRateBond()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsFloatingRateBond(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
# FloatingRateBond
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# FloatingRateBond
def SettlementDays(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
# FloatingRateBond
def FaceAmount(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float64Flags, o + self._tab.Pos)
return 0.0
# FloatingRateBond
def Schedule(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
from quantra.Schedule import Schedule
obj = Schedule()
obj.Init(self._tab.Bytes, x)
return obj
return None
# FloatingRateBond
def Index(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
from quantra.Index import Index
obj = Index()
obj.Init(self._tab.Bytes, x)
return obj
return None
# FloatingRateBond
def AccrualDayCounter(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
return 0
# FloatingRateBond
def PaymentConvention(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
return 0
# FloatingRateBond
def FixingDays(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
# FloatingRateBond
def Spread(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float64Flags, o + self._tab.Pos)
return 0.0
# FloatingRateBond
def InArrears(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(20))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# FloatingRateBond
def Redemption(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(22))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float64Flags, o + self._tab.Pos)
return 0.0
# FloatingRateBond
def IssueDate(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(24))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
def Start(builder): builder.StartObject(11)
def FloatingRateBondStart(builder):
"""This method is deprecated. Please switch to Start."""
return Start(builder)
def AddSettlementDays(builder, settlementDays): builder.PrependInt32Slot(0, settlementDays, 0)
def FloatingRateBondAddSettlementDays(builder, settlementDays):
"""This method is deprecated. Please switch to AddSettlementDays."""
return AddSettlementDays(builder, settlementDays)
def AddFaceAmount(builder, faceAmount): builder.PrependFloat64Slot(1, faceAmount, 0.0)
def FloatingRateBondAddFaceAmount(builder, faceAmount):
"""This method is deprecated. Please switch to AddFaceAmount."""
return AddFaceAmount(builder, faceAmount)
def AddSchedule(builder, schedule): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(schedule), 0)
def FloatingRateBondAddSchedule(builder, schedule):
"""This method is deprecated. Please switch to AddSchedule."""
return AddSchedule(builder, schedule)
def AddIndex(builder, index): builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(index), 0)
def FloatingRateBondAddIndex(builder, index):
"""This method is deprecated. Please switch to AddIndex."""
return AddIndex(builder, index)
def AddAccrualDayCounter(builder, accrualDayCounter): builder.PrependInt8Slot(4, accrualDayCounter, 0)
def FloatingRateBondAddAccrualDayCounter(builder, accrualDayCounter):
"""This method is deprecated. Please switch to AddAccrualDayCounter."""
return AddAccrualDayCounter(builder, accrualDayCounter)
def AddPaymentConvention(builder, paymentConvention): builder.PrependInt8Slot(5, paymentConvention, 0)
def FloatingRateBondAddPaymentConvention(builder, paymentConvention):
"""This method is deprecated. Please switch to AddPaymentConvention."""
return AddPaymentConvention(builder, paymentConvention)
def AddFixingDays(builder, fixingDays): builder.PrependInt32Slot(6, fixingDays, 0)
def FloatingRateBondAddFixingDays(builder, fixingDays):
"""This method is deprecated. Please switch to AddFixingDays."""
return AddFixingDays(builder, fixingDays)
def AddSpread(builder, spread): builder.PrependFloat64Slot(7, spread, 0.0)
def FloatingRateBondAddSpread(builder, spread):
"""This method is deprecated. Please switch to AddSpread."""
return AddSpread(builder, spread)
def AddInArrears(builder, inArrears): builder.PrependBoolSlot(8, inArrears, 0)
def FloatingRateBondAddInArrears(builder, inArrears):
"""This method is deprecated. Please switch to AddInArrears."""
return AddInArrears(builder, inArrears)
def AddRedemption(builder, redemption): builder.PrependFloat64Slot(9, redemption, 0.0)
def FloatingRateBondAddRedemption(builder, redemption):
"""This method is deprecated. Please switch to AddRedemption."""
return AddRedemption(builder, redemption)
def AddIssueDate(builder, issueDate): builder.PrependUOffsetTRelativeSlot(10, flatbuffers.number_types.UOffsetTFlags.py_type(issueDate), 0)
def FloatingRateBondAddIssueDate(builder, issueDate):
"""This method is deprecated. Please switch to AddIssueDate."""
return AddIssueDate(builder, issueDate)
def End(builder): return builder.EndObject()
def FloatingRateBondEnd(builder):
"""This method is deprecated. Please switch to End."""
return End(builder)
```
#### File: python/quantra/Schedule.py
```python
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class Schedule(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = Schedule()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsSchedule(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
# Schedule
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# Schedule
def Calendar(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
return 0
# Schedule
def EffectiveDate(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# Schedule
def TerminationDate(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# Schedule
def Frequency(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
return 0
# Schedule
def Convention(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
return 0
# Schedule
def TerminationDateConvention(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
return 0
# Schedule
def DateGenerationRule(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
return 0
# Schedule
def EndOfMonth(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
def Start(builder): builder.StartObject(8)
def ScheduleStart(builder):
"""This method is deprecated. Please switch to Start."""
return Start(builder)
def AddCalendar(builder, calendar): builder.PrependInt8Slot(0, calendar, 0)
def ScheduleAddCalendar(builder, calendar):
"""This method is deprecated. Please switch to AddCalendar."""
return AddCalendar(builder, calendar)
def AddEffectiveDate(builder, effectiveDate): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(effectiveDate), 0)
def ScheduleAddEffectiveDate(builder, effectiveDate):
"""This method is deprecated. Please switch to AddEffectiveDate."""
return AddEffectiveDate(builder, effectiveDate)
def AddTerminationDate(builder, terminationDate): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(terminationDate), 0)
def ScheduleAddTerminationDate(builder, terminationDate):
"""This method is deprecated. Please switch to AddTerminationDate."""
return AddTerminationDate(builder, terminationDate)
def AddFrequency(builder, frequency): builder.PrependInt8Slot(3, frequency, 0)
def ScheduleAddFrequency(builder, frequency):
"""This method is deprecated. Please switch to AddFrequency."""
return AddFrequency(builder, frequency)
def AddConvention(builder, convention): builder.PrependInt8Slot(4, convention, 0)
def ScheduleAddConvention(builder, convention):
"""This method is deprecated. Please switch to AddConvention."""
return AddConvention(builder, convention)
def AddTerminationDateConvention(builder, terminationDateConvention): builder.PrependInt8Slot(5, terminationDateConvention, 0)
def ScheduleAddTerminationDateConvention(builder, terminationDateConvention):
"""This method is deprecated. Please switch to AddTerminationDateConvention."""
return AddTerminationDateConvention(builder, terminationDateConvention)
def AddDateGenerationRule(builder, dateGenerationRule): builder.PrependInt8Slot(6, dateGenerationRule, 0)
def ScheduleAddDateGenerationRule(builder, dateGenerationRule):
"""This method is deprecated. Please switch to AddDateGenerationRule."""
return AddDateGenerationRule(builder, dateGenerationRule)
def AddEndOfMonth(builder, endOfMonth): builder.PrependBoolSlot(7, endOfMonth, 0)
def ScheduleAddEndOfMonth(builder, endOfMonth):
"""This method is deprecated. Please switch to AddEndOfMonth."""
return AddEndOfMonth(builder, endOfMonth)
def End(builder): return builder.EndObject()
def ScheduleEnd(builder):
"""This method is deprecated. Please switch to End."""
return End(builder)
```
#### File: python/quantra/SwapHelper.py
```python
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class SwapHelper(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = SwapHelper()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsSwapHelper(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
# SwapHelper
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# SwapHelper
def Rate(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float64Flags, o + self._tab.Pos)
return 0.0
# SwapHelper
def TenorTimeUnit(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
return 0
# SwapHelper
def TenorNumber(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
# SwapHelper
def Calendar(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
return 0
# SwapHelper
def SwFixedLegFrequency(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
return 0
# SwapHelper
def SwFixedLegConvention(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
return 0
# SwapHelper
def SwFixedLegDayCounter(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
return 0
# SwapHelper
def SwFloatingLegIndex(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
return 0
# SwapHelper
def Spread(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(20))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float64Flags, o + self._tab.Pos)
return 0.0
# SwapHelper
def FwdStartDays(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(22))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
def Start(builder): builder.StartObject(10)
def SwapHelperStart(builder):
"""This method is deprecated. Please switch to Start."""
return Start(builder)
def AddRate(builder, rate): builder.PrependFloat64Slot(0, rate, 0.0)
def SwapHelperAddRate(builder, rate):
"""This method is deprecated. Please switch to AddRate."""
return AddRate(builder, rate)
def AddTenorTimeUnit(builder, tenorTimeUnit): builder.PrependInt8Slot(1, tenorTimeUnit, 0)
def SwapHelperAddTenorTimeUnit(builder, tenorTimeUnit):
"""This method is deprecated. Please switch to AddTenorTimeUnit."""
return AddTenorTimeUnit(builder, tenorTimeUnit)
def AddTenorNumber(builder, tenorNumber): builder.PrependInt32Slot(2, tenorNumber, 0)
def SwapHelperAddTenorNumber(builder, tenorNumber):
"""This method is deprecated. Please switch to AddTenorNumber."""
return AddTenorNumber(builder, tenorNumber)
def AddCalendar(builder, calendar): builder.PrependInt8Slot(3, calendar, 0)
def SwapHelperAddCalendar(builder, calendar):
"""This method is deprecated. Please switch to AddCalendar."""
return AddCalendar(builder, calendar)
def AddSwFixedLegFrequency(builder, swFixedLegFrequency): builder.PrependInt8Slot(4, swFixedLegFrequency, 0)
def SwapHelperAddSwFixedLegFrequency(builder, swFixedLegFrequency):
"""This method is deprecated. Please switch to AddSwFixedLegFrequency."""
return AddSwFixedLegFrequency(builder, swFixedLegFrequency)
def AddSwFixedLegConvention(builder, swFixedLegConvention): builder.PrependInt8Slot(5, swFixedLegConvention, 0)
def SwapHelperAddSwFixedLegConvention(builder, swFixedLegConvention):
"""This method is deprecated. Please switch to AddSwFixedLegConvention."""
return AddSwFixedLegConvention(builder, swFixedLegConvention)
def AddSwFixedLegDayCounter(builder, swFixedLegDayCounter): builder.PrependInt8Slot(6, swFixedLegDayCounter, 0)
def SwapHelperAddSwFixedLegDayCounter(builder, swFixedLegDayCounter):
"""This method is deprecated. Please switch to AddSwFixedLegDayCounter."""
return AddSwFixedLegDayCounter(builder, swFixedLegDayCounter)
def AddSwFloatingLegIndex(builder, swFloatingLegIndex): builder.PrependInt8Slot(7, swFloatingLegIndex, 0)
def SwapHelperAddSwFloatingLegIndex(builder, swFloatingLegIndex):
"""This method is deprecated. Please switch to AddSwFloatingLegIndex."""
return AddSwFloatingLegIndex(builder, swFloatingLegIndex)
def AddSpread(builder, spread): builder.PrependFloat64Slot(8, spread, 0.0)
def SwapHelperAddSpread(builder, spread):
"""This method is deprecated. Please switch to AddSpread."""
return AddSpread(builder, spread)
def AddFwdStartDays(builder, fwdStartDays): builder.PrependInt32Slot(9, fwdStartDays, 0)
def SwapHelperAddFwdStartDays(builder, fwdStartDays):
"""This method is deprecated. Please switch to AddFwdStartDays."""
return AddFwdStartDays(builder, fwdStartDays)
def End(builder): return builder.EndObject()
def SwapHelperEnd(builder):
"""This method is deprecated. Please switch to End."""
return End(builder)
``` |
{
"source": "JosepSampe/blackeagle",
"score": 2
} |
#### File: gateways/docker/function.py
```python
from swift.common.wsgi import make_subrequest
from zion.common.utils import set_object_metadata, get_object_metadata, make_swift_request
import tarfile
import os
TIMEOUT_HEADER = "X-Object-Meta-Function-Timeout"
MEMORY_HEADER = "X-Object-Meta-Function-Memory"
MAIN_HEADER = "X-Object-Meta-Function-Main"
class Function:
"""
Function main class.
"""
def __init__(self, conf, app, req, account, logger, function_obj_name):
self.conf = conf
self.app = app
self.req = req
self.account = account
self.logger = logger
self.function_obj_name = function_obj_name
self.function_name = function_obj_name.replace('.tar.gz', '')
self.functions_container = self.conf['functions_container']
self.disaggregated_compute = self.conf['disaggregated_compute']
self.scope = self.account[5:18]
# Dirs
self.main_dir = self.conf["main_dir"]
self.functions_dir = self.conf["functions_dir"]
self.cache_dir = self.conf["cache_dir"]
self.log_dir = self.conf["log_dir"]
self.bin_dir = self.conf["bin_dir"]
self._preparate_dirs()
self._load_function()
self.logger.info('Function - Function instance created')
def _preparate_dirs(self):
"""
Makes the required directories for managing the function.
"""
self.logger.info('Function - Preparing function directories')
functions_path = os.path.join(self.main_dir, self.functions_dir)
scope_path = os.path.join(functions_path, self.scope)
self.cache_path = os.path.join(scope_path, self.cache_dir)
self.log_path = os.path.join(scope_path, self.log_dir)
self.bin_path = os.path.join(scope_path, self.bin_dir)
if not os.path.exists(self.cache_path):
os.makedirs(self.cache_path)
if not os.path.exists(self.log_path):
os.makedirs(self.log_path)
def _load_function(self):
"""
Loads the function.
"""
self.logger.info('Function - Loading function: '+self.function_obj_name)
self.cached_function_obj = os.path.join(self.cache_path, self.function_obj_name)
self.function_bin_path = os.path.join(self.bin_path, self.function_name)
if not self._is_function_in_cache():
self._update_local_cache_from_swift()
self._extract_function()
self._load_function_execution_information()
def _is_function_in_cache(self):
"""
Checks whether the function is in cache.
:returns : whether the object is available in cache.
"""
in_cache = False
if os.path.isfile(self.cached_function_obj):
self.logger.info('Function - ' + self.function_obj_name + ' found in cache.')
in_cache = True
else:
self.logger.info('Function - ' + self.function_obj_name + ' not found in cache.')
in_cache = False
return in_cache
def _update_local_cache_from_swift(self):
"""
Updates the local cache of functions.
"""
self.logger.info('Function - Updating local cache from swift')
if self.disaggregated_compute:
new_env = dict(self.req.environ)
swift_path = os.path.join('/', 'v1', self.account,
self.functions_container, self.function_obj_name)
sub_req = make_subrequest(new_env, 'GET', swift_path,
swift_source='function_middleware')
resp = sub_req.get_response(self.app)
else:
resp = make_swift_request('GET', self.account, self.functions_container,
self.function_obj_name)
if resp.status_int != 200:
self.logger.info('Function - It is not possible to update the local cache')
raise FileNotFoundError
with open(self.cached_function_obj, 'wb') as fn:
fn.write(resp.body)
self.logger.info('Function - Local cache updated: '+self.cached_function_obj)
self.function_metadata = resp.headers
set_object_metadata(self.cached_function_obj, resp.headers)
def _extract_function(self):
"""
Untars the function to the bin directory.
"""
self.logger.info('Extracting .tar.gz function files')
tar = tarfile.open(self.cached_function_obj, "r:gz")
tar.extractall(path=self.function_bin_path)
tar.close()
def _load_function_execution_information(self):
"""
Loads the memory needed and the timeout of the function.
"""
self.logger.info('Function - Loading function information')
function_metadata = get_object_metadata(self.cached_function_obj)
self.logger.info('Function - Metadata: ' + str(function_metadata))
if MEMORY_HEADER not in function_metadata or TIMEOUT_HEADER not in \
function_metadata or MAIN_HEADER not in function_metadata:
raise ValueError("Error Getting Function memory and timeout values")
else:
self.memory = int(function_metadata[MEMORY_HEADER])
self.timeout = int(function_metadata[TIMEOUT_HEADER])
self.main_class = function_metadata[MAIN_HEADER]
def open_log(self):
"""
Opens the log file where the function will log.
"""
f_log_path = os.path.join(self.log_path, self.function_name)
if not os.path.exists(f_log_path):
os.makedirs(f_log_path)
f_log_file = os.path.join(f_log_path, self.function_name+'.log')
self.logger_file = open(f_log_file, 'a')
def get_timeout(self):
return self.timeout
def get_main_class(self):
return self.main_class
def get_memory(self):
return self.memory
def get_logfd(self):
return self.logger_file.fileno()
def get_name(self):
return self.function_name
def get_obj_name(self):
return self.function_obj_name
def get_bin_path(self):
return self.function_bin_path
def close_log(self):
"""
Closes the log file.
"""
self.logger_file.close()
```
#### File: zion/handlers/compute.py
```python
from zion.handlers import BaseHandler
from zion.handlers.base import NotFunctionRequest
from swift.common.utils import public
import time
class ComputeHandler(BaseHandler):
def __init__(self, request, conf, app, logger, redis):
super(ComputeHandler, self).__init__(
request, conf, app, logger, redis)
def _parse_vaco(self):
return self.req.split_path(3, 4, rest_with_last=True)
def _get_functions(self):
return eval(self.req.headers.pop('functions_data'))
def is_valid_request(self):
return 'functions_data' in self.req.headers
def handle_request(self):
if hasattr(self, self.method) and self.is_valid_request():
try:
handler = getattr(self, self.method)
getattr(handler, 'publicly_accessible')
except AttributeError:
raise NotFunctionRequest()
return handler()
else:
raise NotFunctionRequest()
@public
def GET(self):
"""
GET handler on Compute node
"""
functions_data = self._get_functions()
self.response = self.req.get_response(self.app)
# self.response = Response(body="Test", headers=self.req.headers)
t0 = time.time()
self.apply_function_onget(functions_data)
self.logger.info('------> TOAL ZION TIME: %0.6fs' % ((time.time()-t0)))
return self.response
@public
def PUT(self):
"""
PUT handler on Compute node
"""
functions_data = self._get_functions()
return self.apply_function_onput(functions_data)
``` |
{
"source": "JosepSampe/swift-linking-middleware",
"score": 2
} |
#### File: JosepSampe/swift-linking-middleware/softlink.py
```python
import os
from swift.common.utils import get_logger
from swift.common.utils import register_swift_info
from swift.common.wsgi import make_subrequest
from swift.common.swob import Request, Response
class SoftLinkMiddleware(object):
def __init__(self, app, conf):
self.app = app
self.conf = conf
self.logger = get_logger(self.conf, log_route='softlink')
self.register_info()
def register_info(self):
register_swift_info('softlink')
@property
def is_object_link(self):
return 'X-Link-To' in self.request.headers
def verify_access(self, cont, obj):
"""
Verifies access to the specified object in swift
:param cont: swift container name
:param obj: swift object name
:return response: Object response
"""
path = os.path.join('/', self.api_version, self.account, cont, obj)
self.logger.debug('Verifying access to %s' % path)
new_env = dict(self.req.environ)
if 'HTTP_TRANSFER_ENCODING' in new_env.keys():
del new_env['HTTP_TRANSFER_ENCODING']
auth_token = self.req.headers.get('X-Auth-Token')
sub_req = make_subrequest(new_env, 'HEAD', path,
headers={'X-Auth-Token': auth_token},
swift_source='softlink_middleware')
return sub_req.get_response(self.app)
def create_link(self, link_path, dest_path, heads):
"""
Creates a link to a actual object
:param link_path: swift path of the link
:param dest_path: swift path of the object to link
:param heads: original object headers
"""
self.logger.debug('Creating a link from %s to %s' %
(link_path, dest_path))
new_env = dict(self.request.environ)
if 'HTTP_TRANSFER_ENCODING' in new_env.keys():
del new_env['HTTP_TRANSFER_ENCODING']
if 'HTTP_X_COPY_FROM' in new_env.keys():
del new_env['HTTP_X_COPY_FROM']
auth_token = self.request.headers.get('X-Auth-Token')
link_path = os.path.join('/', self.api_version,
self.account, link_path)
sub_req = make_subrequest(
new_env, 'PUT', link_path,
headers={'X-Auth-Token': auth_token,
'Content-Length': 0,
'Content-Type': 'link',
'Original-Content-Length': heads["Content-Length"],
'X-Object-Sysmeta-Link-To': dest_path},
swift_source='softlink_middleware')
resp = sub_req.get_response(self.app)
return resp
def get_linked_object(self, dest_obj):
"""
Makes a subrequest to the provided container/object
:param dest_obj: container/object
:return: swift.common.swob.Response Instance
"""
dest_path = os.path.join('/', self.api_version, self.account,
dest_obj)
new_env = dict(self.req.environ)
sub_req = make_subrequest(new_env, 'GET', dest_path,
headers=self.req.headers,
swift_source='softlink_middleware')
return sub_req.get_response(self.app)
def process_object_link(self):
"""
Moves an object to the destination path and leaves a soft link in
the original path.
"""
link_path = os.path.join(self.container, self.obj)
dest_path = self.req.headers['X-Link-To']
if link_path != dest_path:
resp = self.verify_access(self.container, self.obj)
if resp.is_success:
headers = resp.headers
if "X-Object-Sysmeta-Link-To" not in resp.headers \
and resp.headers['Content-Type'] != 'link':
self.req.method = 'COPY'
self.req.headers['Destination'] = dest_path
response = self.req.get_response(self.app)
if response.is_success:
response = self.create_link(self, link_path, dest_path,
headers)
else:
msg = ("Error: The main object does not exists in Swift.\n")
response = Response(body=msg, headers={'etag': ''},
request=self.req)
else:
msg = ("Error: Link path and destination path "
"cannot be the same.\n")
response = Response(body=msg, headers={'etag': ''},
request=self.req)
return response
def __call__(self, env, start_response):
self.req = Request(env)
if self.req.method == 'GET':
resp = self.app(env, start_response)
if "X-Object-Sysmeta-Link-To" in resp.headers:
dest_obj = resp.headers["X-Object-Sysmeta-Link-To"]
return self.get_linked_object(dest_obj)
if self.req.method == 'POST':
if self.is_object_link:
resp = self.process_object_link()
return resp(env, start_response)
else:
# Pass on to downstream WSGI component
return self.app(env, start_response)
def filter_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
def softlink_filter(app):
return SoftLinkMiddleware(app, conf)
return softlink_filter
``` |
{
"source": "JosepSampe/triggerflow",
"score": 3
} |
#### File: trigger-api/api/workspaces.py
```python
import re
from triggerflow.service.storage import TriggerStorage
def create_workspace(trigger_storage: TriggerStorage, workspace: str, global_context: dict, event_source: dict):
if trigger_storage.workspace_exists(workspace=workspace):
return {'error': 'Workspace {} already exists'.format(workspace), 'err_code': 2}, 400
# Workspace name can only contain alphanumeric, hyphens or underscore characters
if not re.fullmatch(r"^[a-zA-Z0-9._-]*$", workspace):
return {'error': 'Illegal workspace name', 'err_code': 3}, 400
if {'name', 'class', 'parameters'} != set(event_source):
return {'error': 'Invalid event source', 'err_code': 4}, 400
trigger_storage.create_workspace(workspace, {event_source['name']: event_source}, global_context)
return {'message': 'Created workspace {}'.format(workspace)}, 200
def get_workspace(trigger_storage: TriggerStorage, workspace: str):
triggers = trigger_storage.get(workspace=workspace, document_id='triggers')
triggerIDs = [trigger["id"] for trigger in triggers]
event_sources = trigger_storage.get(workspace=workspace, document_id='event_sources')
event_source_names = [event_source['name'] for event_source in event_sources]
global_context = trigger_storage.get(workspace=workspace, document_id='global_context')
return {'triggers': triggerIDs, 'event_sources': event_source_names, 'global_context': global_context}, 200
def delete_workspace(trigger_storage: TriggerStorage, workspace: str):
trigger_storage.delete_workspace(workspace=workspace)
return {'message': 'Workspace {} deleted'.format(workspace)}, 200
```
#### File: triggerflow/dags/dagrun.py
```python
import pickle
import json
from uuid import uuid4
from datetime import datetime
from platform import node
from enum import Enum
from ..cache import TriggerflowCache
from ..client import (
TriggerflowClient,
TriggerflowCachedClient,
DefaultActions,
DefaultConditions
)
from triggerflow import CloudEvent
class DAGRun:
class State(Enum):
EMPTY = 'EMPTY'
INITIALIZED = 'INITIALIZED'
DEPLOYED = 'DEPLOYED'
RUNNING = 'RUNNING'
FINISHED = 'FINISHED'
ERROR = 'ERROR'
def __init__(self):
self.uuid = None
self.dagrun_id = None
self.start_time = None
self.dag_id = None
self.dag = None
self.state = DAGRun.State.EMPTY
@classmethod
def from_dag_def(cls, dag_def: 'DAG'):
dagrun = cls()
dagrun.uuid = str(uuid4())
dagrun.dagrun_id = '-'.join([dag_def.dag_id, dagrun.uuid[24:]])
dagrun.start_time = datetime.utcnow().isoformat()
dagrun.dag_id = dag_def.dag_id
dagrun.dag = dag_def
dagrun.state = DAGRun.State.INITIALIZED
dagrun.__save_cache()
return dagrun
@classmethod
def load_run(cls, dagrun_id: str):
dagrun = cls()
try:
with TriggerflowCache(path='dag-runs', file_name=dagrun_id + '.json', method='r') as dagrun_file:
metadata = json.loads(dagrun_file.read())
with TriggerflowCache(path='dag-runs', file_name=dagrun_id + '.pickle', method='rb') as dag_file:
dagrun.dag = pickle.load(dag_file)
except FileNotFoundError:
raise Exception('Dag run not found in cache')
dagrun.uuid = metadata['uuid']
dagrun.dagrun_id = metadata['dagrun_id']
dagrun.start_time = metadata['start_time']
dagrun.dag_id = metadata['dag_id']
dagrun.state = DAGRun.State[metadata['state']]
return dagrun
def run(self, silent=False):
if self.state == DAGRun.State.RUNNING:
raise Exception('DAG already running')
self.__create_triggers()
self.__trigger(silent)
self.__save_cache()
return self
def result(self, task=None):
tf = TriggerflowClient()
tf.target_workspace(self.dagrun_id)
if task is None:
return tf.get_trigger('__end__')['context']['result']
else:
if task in self.dag.tasks_dict:
task_op = self.dag.tasks_dict[task]
results = []
for downstream in task_op.downstream_relatives:
trg = tf.get_trigger(downstream.task_id)
results.append(trg['result'])
def __save_cache(self):
with TriggerflowCache(path='dag-runs', file_name=self.dagrun_id + '.json', method='w') as dagrun_file:
metadata = {
'uuid': self.uuid,
'dagrun_id': self.dagrun_id,
'start_time': self.start_time,
'dag_id': self.dag_id,
'state': self.state.name
}
dagrun_file.write(json.dumps(metadata, indent=4))
with TriggerflowCache(path='dag-runs', file_name=self.dagrun_id + '.pickle', method='wb') as dag_file:
pickle.dump(self.dag, dag_file)
def __trigger(self, silent=False):
event_source = list(self.dag.event_sources.values()).pop()
uuid = uuid4()
init_cloudevent = (
CloudEvent()
.SetSubject('__init__')
.SetEventType('event.triggerflow.init')
.SetEventID(uuid.hex)
.SetSource(f'urn:{node()}:{str(uuid)}')
)
event_source.set_stream(self.dagrun_id)
event_source.publish_cloudevent(init_cloudevent)
self.state = DAGRun.State.RUNNING
if not silent:
print('DAG Run ID: {}'.format(self.dagrun_id))
return self
def __create_triggers(self):
tf = TriggerflowCachedClient()
# Create unique workspace for this specific dag run and its event sources
event_sources = list(self.dag.event_sources.values())
# Set current DAGRun ID as topic/queue name for the event sources
[event_source.set_stream(self.dagrun_id) for event_source in event_sources]
tf.create_workspace(workspace_name=self.dagrun_id, event_source=event_sources.pop(), global_context={})
for event_source in event_sources:
tf.add_event_source(event_source)
for task in self.dag.tasks:
context = {'subject': task.task_id,
'dependencies': {},
'operator': task.get_trigger_meta(),
'result': []}
# If this task does not have upstream relatives, then it will be executed when the sentinel event __init__
# is produced, else, it will be executed every time one of its upstream relatives produces its term. event
if not task.upstream_relatives:
condition = DefaultConditions.TRUE # Initial task do not have dependencies
activation_event = CloudEvent().SetSubject('__init__').SetEventType('event.triggerflow.init')
act_events = [activation_event]
else:
condition = DefaultConditions.DAG_TASK_JOIN
act_events = []
for upstream_relative in task.upstream_relatives:
context['dependencies'][upstream_relative.task_id] = {'join': -1, 'counter': 0}
activation_event = (
CloudEvent()
.SetSubject(upstream_relative.task_id)
.SetEventType('event.triggerflow.termination.success')
)
act_events.append(activation_event)
# Add a trigger that handles this task execution: It will be fired every time one of its upstream
# relatives sends its termination event, but it is executed only when all dependencies are fulfilled
tf.add_trigger(event=act_events,
action=DefaultActions[task.trigger_action_name],
condition=condition,
context=context,
trigger_id=task.task_id,
transient=False)
# Join final tasks (those that do not have downstream relatives)
context = {'subject': '__end__',
'dependencies': {final_task.task_id: {'join': -1, 'counter': 0} for final_task in
self.dag.final_tasks},
'result': []}
activation_events = [(CloudEvent()
.SetSubject(final_task.task_id)
.SetEventType('event.triggerflow.termination.success'))
for final_task in self.dag.final_tasks]
tf.add_trigger(event=activation_events,
action=DefaultActions.TERMINATE,
condition=DefaultConditions.DAG_TASK_JOIN,
context=context,
trigger_id='__end__',
transient=False)
# Add error handling trigger: All tasks that produce a failure event type will fire this trigger
# activation_event = v1.Event().SetSubject('*').SetEventType('event.triggerflow.termination.failure')
# tf.add_trigger(event=activation_event,
# action=DefaultActions.DAG_TASK_FAILURE_HANDLER,
# condition=DefaultConditions.TRUE,
# context={},
# context_parser='DAGS',
# trigger_id='__error_handler__',
# transient=False)
#
# # Add retry handler trigger: We will use this trigger to manually fire it to retry any failed task
# activation_event = v1.Event().SetSubject('__retry__').SetEventType('event.triggerflow.termination.failure')
# tf.add_trigger(event=activation_event,
# action=DefaultActions.DAG_TASK_RETRY_HANDLER,
# condition=DefaultConditions.TRUE,
# context={},
# context_parser='DAGS',
# trigger_id='__retry_handler__',
# transient=False)
tf.commit_cached_triggers()
self.state = DAGRun.State.DEPLOYED
```
#### File: triggerflow/triggerflow/statemachine.py
```python
import boto3
import logging
import json
from arnparse import arnparse
from uuid import uuid4
from enum import Enum
from platform import node
from collections import defaultdict
from triggerflow import TriggerflowCachedClient, CloudEvent
from triggerflow.config import get_config
from triggerflow.eventsources.model import EventSource
from triggerflow.eventsources.sqs import SQSEventSource
from triggerflow.functions import ConditionActionModel, DefaultActions
log = logging.getLogger('triggerflow')
log.setLevel(logging.DEBUG)
class AwsAsfConditions(ConditionActionModel, Enum):
AWS_ASF_JOIN_STATEMACHINE = {'name': 'AWS_ASF_JOIN_STATEMACHINE'}
AWS_ASF_CONDITION = {'name': 'AWS_ASF_CONDITION'}
class AwsAsfActions(ConditionActionModel, Enum):
AWS_ASF_PASS = {'name': 'AWS_ASF_PASS'}
AWS_ASF_TASK = {'name': 'AWS_ASF_TASK'}
AWS_ASF_MAP = {'name': 'AWS_ASF_MAP'}
AWS_ASF_END_STATEMACHINE = {'name': 'AWS_ASF_END_STATEMACHINE'}
class StateMachine:
def __init__(self):
self.state_machine = None
self.run_id = None
self.event_source = None
self.config = get_config()
self.credentials = self.config['statemachines']['aws']
self.lambda_client = boto3.client('lambda',
aws_access_key_id=self.credentials['access_key_id'],
aws_secret_access_key=self.credentials['secret_access_key'],
region_name=self.credentials['region'])
@classmethod
def json(cls, file_path: str, event_source: EventSource = None):
sm = cls()
with open(file_path, 'r') as json_file:
sm_json = json.loads(json_file.read())
sm.state_machine = sm_json
sm.event_source = event_source
sm.__deploy_state_machine()
return sm
@classmethod
def string(cls, state_machine_json: str, event_source: EventSource = None):
sm = cls()
sm_json = json.loads(state_machine_json)
sm.state_machine = sm_json
sm.event_source = event_source
sm.__deploy_state_machine()
return sm
def __deploy_state_machine(self):
uuid = str(uuid4())
self.run_id = 'sm-' + uuid[24:]
# self.run_id = 'sm-test'
if self.event_source is None:
# Create the SQS queue where the termination events will be sent
self.event_source = SQSEventSource(name=self.run_id + '_' + 'SQSEventSource',
access_key_id=self.credentials['access_key_id'],
secret_access_key=self.credentials['secret_access_key'],
region=self.credentials['region'],
queue=self.run_id)
queue_arn = self.__create_sqs_queue()
lambdas_updated = {}
else:
self.event_source.set_stream(self.run_id)
self.event_source.name = self.run_id
queue_arn, lambdas_updated = None, None
triggerflow_client = TriggerflowCachedClient()
global_context = {'aws_credentials': self.credentials}
if not isinstance(self.event_source, SQSEventSource):
global_context['event_source'] = self.event_source.get_json_eventsource()
triggerflow_client.create_workspace(workspace_name=self.run_id,
event_source=self.event_source,
global_context=global_context)
state_machine_count = 0
###################################
def state_machine(states, trigger_event):
nonlocal state_machine_count, queue_arn, lambdas_updated
state_machine_id = 'StateMachine{}'.format(state_machine_count)
state_machine_count += 1
upstream_relatives = defaultdict(list)
final_states = []
choices = {}
for state_name, state in states['States'].items():
if 'End' in state and state['End']:
final_states.append(state_name)
elif 'Next' in state:
upstream_relatives[state['Next']].append(state_name)
elif state['Type'] == 'Choice':
for choice in state['Choices']:
upstream_relatives[choice['Next']].append(state_name)
upstream_relatives[states['StartAt']].extend(trigger_event)
for state_name, state in states['States'].items():
context = {'Subject': state_name, 'State': state.copy()}
if state_name in choices:
context['Condition'] = choices[state_name].copy()
if state['Type'] == 'Pass' or state['Type'] == 'Task':
subjects = upstream_relatives[state_name]
activation_events = [CloudEvent().SetEventType('lambda.success').SetSubject(sub) for sub in
subjects]
action = AwsAsfActions.AWS_ASF_TASK if state['Type'] == 'Task' else AwsAsfActions.AWS_ASF_PASS
if state['Type'] == 'Task' and isinstance(self.event_source, SQSEventSource) and \
state['Resource'] not in lambdas_updated:
self.__add_destination_to_lambda(state['Resource'], queue_arn)
lambdas_updated[state['Resource']] = True
triggerflow_client.add_trigger(event=activation_events,
condition=AwsAsfConditions.AWS_ASF_CONDITION,
action=action,
context=context,
trigger_id=state_name,
transient=False)
elif state['Type'] == 'Choice':
choices = {}
for choice in state['Choices']:
upstream_relatives[choice['Next']] = upstream_relatives[state_name]
choices[choice['Next']] = choice.copy()
elif state['Type'] == 'Parallel':
sub_state_machines = []
for branch in state['Branches']:
sub_sm_id = state_machine(branch, upstream_relatives[state_name])
sub_state_machines.append(sub_sm_id)
context['join_multiple'] = len(state['Branches'])
del context['State']
act_events = [CloudEvent().SetEventType('lambda.success').SetSubject(sub_sm) for sub_sm in
sub_state_machines]
triggerflow_client.add_trigger(event=act_events,
condition=AwsAsfConditions.AWS_ASF_JOIN_STATEMACHINE,
action=AwsAsfActions.AWS_ASF_PASS,
context=context,
trigger_id=state_name,
transient=False)
elif state['Type'] == 'Wait':
raise NotImplementedError()
elif state['Type'] == 'Map':
iterator = state_machine(state['Iterator'], [state_name])
context['join_state_machine'] = iterator
del context['State']['Iterator']
subjects = upstream_relatives[state_name]
activation_events = [CloudEvent().SetEventType('lambda.success').SetSubject(sub) for sub in
subjects]
triggerflow_client.add_trigger(event=activation_events,
condition=AwsAsfConditions.AWS_ASF_CONDITION,
action=AwsAsfActions.AWS_ASF_MAP,
context=context,
trigger_id=state_name,
transient=False)
if 'Next' in state:
upstream_relatives[state['Next']].remove(state_name)
upstream_relatives[state['Next']].append(iterator)
if 'End' in state:
final_states.remove(state_name)
final_states.append(iterator)
elif state['Type'] == 'Succeed':
raise NotImplementedError()
elif state['Type'] == 'Fail':
raise NotImplementedError()
activation_events = [CloudEvent().SetEventType('lambda.success').SetSubject(sub) for sub in final_states]
triggerflow_client.add_trigger(activation_events,
condition=AwsAsfConditions.AWS_ASF_JOIN_STATEMACHINE,
action=AwsAsfActions.AWS_ASF_END_STATEMACHINE,
context={'Subject': state_machine_id},
trigger_id=state_machine_id,
transient=False)
return state_machine_id
###################################
main_state_machine = state_machine(self.state_machine, ['__init__'])
final_trigger = {'id': main_state_machine, 'action': DefaultActions.TERMINATE.value}
triggerflow_client.update_trigger(final_trigger)
triggerflow_client.commit_cached_triggers()
return self.run_id
def trigger(self, execution_input: dict = None):
log.info("Trigger State Machine Execution: {}".format(self.run_id))
if execution_input is None:
execution_input = {}
uuid = uuid4()
init_cloudevent = (CloudEvent()
.SetSubject('__init__')
.SetEventType('lambda.success')
.SetEventID(uuid.hex)
.SetSource(f'urn:{node()}:{str(uuid)}'))
if execution_input is not None:
init_cloudevent.SetData(execution_input)
init_cloudevent.SetContentType('application/json')
self.event_source.publish_cloudevent(init_cloudevent)
log.info("Ok")
def __create_sqs_queue(self):
credentials = self.config['statemachines']['aws']
sqs_client = boto3.client('sqs',
aws_access_key_id=credentials['access_key_id'],
aws_secret_access_key=credentials['secret_access_key'],
region_name=credentials['region'])
response = sqs_client.create_queue(QueueName=self.run_id)
if 'QueueUrl' in response:
queue_url = response['QueueUrl']
log.debug('Queue URL: {}'.format(queue_url))
else:
raise Exception(response)
response = sqs_client.get_queue_attributes(QueueUrl=queue_url,
AttributeNames=['QueueArn'])
if 'Attributes' in response and 'QueueArn' in response['Attributes']:
queue_arn = response['Attributes']['QueueArn']
log.debug('Queue ARN: {}'.format(queue_arn))
else:
raise Exception('Could not retrieve Queue ARN from {}'.format(queue_url))
return queue_arn
def __add_destination_to_lambda(self, lambda_arn, source_arn):
arn = arnparse(lambda_arn)
if 'lambda' != arn.service:
raise Exception(('Resource of type {} is not currently supported, '
'as it cannot produce termination events').format(arn.service))
log.debug('Updating Lambda Destination for {}'.format(lambda_arn))
self.lambda_client.put_function_event_invoke_config(
DestinationConfig={'OnSuccess': {'Destination': source_arn}},
FunctionName=lambda_arn)
def trigger_statemachine(run_id: str, execution_input: dict = None, event_source: EventSource = None):
log.info("Trigger State Machine Execution: {}".format(run_id))
if execution_input is None:
execution_input = {}
uuid = uuid4()
init_cloudevent = (CloudEvent()
.SetSubject('__init__')
.SetEventType('lambda.success')
.SetEventID(uuid.hex)
.SetSource(f'urn:{node()}:{str(uuid)}'))
if execution_input is not None:
init_cloudevent.SetData(execution_input)
init_cloudevent.SetContentType('application/json')
if event_source is None:
credentials = get_config()['statemachines']['aws']
event_source = SQSEventSource(name=run_id + '_' + 'SQSEventSource',
access_key_id=credentials['access_key_id'],
secret_access_key=credentials['secret_access_key'],
region=credentials['region'],
queue=run_id)
event_source.set_stream(run_id)
event_source.publish_cloudevent(init_cloudevent)
log.info("Ok")
``` |
{
"source": "JosepSampe/vertigo",
"score": 2
} |
#### File: swift/vertigo_middleware/handler.py
```python
from swift.common.swob import HTTPInternalServerError, HTTPException, wsgify
from swift.common.utils import get_logger, register_swift_info
from ConfigParser import RawConfigParser
from vertigo_middleware.handlers import VertigoProxyHandler
from vertigo_middleware.handlers import VertigoObjectHandler
from vertigo_middleware.handlers.base import NotVertigoRequest
from storlets.gateway.loader import load_gateway
import redis
class VertigoHandlerMiddleware(object):
def __init__(self, app, conf, vertigo_conf):
self.app = app
self.exec_server = vertigo_conf.get('execution_server')
self.logger = get_logger(conf, log_route='vertigo_handler')
self.vertigo_conf = vertigo_conf
self.handler_class = self._get_handler(self.exec_server)
def _get_handler(self, exec_server):
"""
Generate Handler class based on execution_server parameter
:param exec_server: Where this storlet_middleware is running.
This should value shoud be 'proxy' or 'object'
:raise ValueError: If exec_server is invalid
"""
if exec_server == 'proxy':
return VertigoProxyHandler
elif exec_server == 'object':
return VertigoObjectHandler
else:
raise ValueError(
'configuration error: execution_server must be either proxy'
' or object but is %s' % exec_server)
@wsgify
def __call__(self, req):
try:
request_handler = self.handler_class(
req, self.vertigo_conf, self.app, self.logger)
self.logger.debug('vertigo_handler %s call in %s with %s/%s/%s' %
(req.method, self.exec_server,
request_handler.account,
request_handler.container,
request_handler.obj))
except HTTPException:
raise
except NotVertigoRequest:
return req.get_response(self.app)
try:
return request_handler.handle_request()
except HTTPException:
self.logger.exception('Vertigo execution failed')
raise
except Exception:
self.logger.exception('Vertigo execution failed')
raise HTTPInternalServerError(body='Vertigo execution failed')
def filter_factory(global_conf, **local_conf):
"""Standard filter factory to use the middleware with paste.deploy"""
register_swift_info('vertigo')
conf = global_conf.copy()
conf.update(local_conf)
vertigo_conf = dict()
vertigo_conf['devices'] = conf.get('devices', '/srv/node')
vertigo_conf['execution_server'] = conf.get('execution_server')
vertigo_conf['mc_timeout'] = conf.get('mc_timeout', 5)
vertigo_conf['mc_pipe'] = conf.get('mc_pipe', 'vertigo_pipe')
# vertigo_conf['api_pipe'] = conf.get('mc_pipe', 'api_pipe')
vertigo_conf['metadata_visibility'] = conf.get('metadata_visibility', True)
vertigo_conf['mc_dir'] = conf.get('mc_dir', '/home/docker_device/vertigo/scopes')
vertigo_conf['cache_dir'] = conf.get('cache_dir', '/home/docker_device/cache/scopes')
vertigo_conf['mc_container'] = conf.get('mc_container', 'microcontroller')
vertigo_conf['mc_dependency'] = conf.get('mc_dependency', 'dependency')
''' Load storlet parameters '''
configParser = RawConfigParser()
configParser.read(conf.get('__file__'))
storlet_parameters = configParser.items('filter:storlet_handler')
for key, val in storlet_parameters:
vertigo_conf[key] = val
""" Load Storlets Gateway configuration """
configParser = RawConfigParser()
configParser.read(vertigo_conf['storlet_gateway_conf'])
additional_items = configParser.items("DEFAULT")
for key, val in additional_items:
vertigo_conf[key] = val
""" Load Storlets Gateway class """
module_name = vertigo_conf.get('storlet_gateway_module', 'stub')
gateway_class = load_gateway(module_name)
vertigo_conf['storlets_gateway_module'] = gateway_class
"""
Register Lua script to retrieve policies in a single redis call
"""
vertigo_conf['redis_host'] = conf.get('redis_host', 'controller')
vertigo_conf['redis_port'] = int(conf.get('redis_port', 6379))
vertigo_conf['redis_db'] = int(conf.get('redis_db', 0))
if vertigo_conf['execution_server'] == 'proxy':
r = redis.StrictRedis(vertigo_conf['redis_host'],
vertigo_conf['redis_port'],
vertigo_conf['redis_db'])
lua = """
local t = {}
if redis.call('EXISTS', 'mc_pipeline:'..ARGV[1]..':'..ARGV[2]..':'..ARGV[3])==1 then
t = redis.call('HGETALL', 'mc_pipeline:'..ARGV[1]..':'..ARGV[2]..':'..ARGV[3])
elseif redis.call('EXISTS', 'mc_pipeline:'..ARGV[1]..':'..ARGV[2])==1 then
t = redis.call('HGETALL', 'mc_pipeline:'..ARGV[1]..':'..ARGV[2])
end
return t"""
lua_sha = r.script_load(lua)
vertigo_conf['LUA_get_mc_sha'] = lua_sha
def swift_vertigo(app):
return VertigoHandlerMiddleware(app, global_conf, vertigo_conf)
return swift_vertigo
```
#### File: vertigo_middleware/handlers/obj.py
```python
from swift.common.swob import HTTPMethodNotAllowed, Response
from swift.common.utils import public
from vertigo_middleware.handlers import VertigoBaseHandler
class VertigoObjectHandler(VertigoBaseHandler):
def __init__(self, request, conf, app, logger):
super(VertigoObjectHandler, self).__init__(
request, conf, app, logger)
def _parse_vaco(self):
_, _, acc, cont, obj = self.request.split_path(
5, 5, rest_with_last=True)
return ('v1', acc, cont, obj)
def handle_request(self):
if hasattr(self, self.request.method) and self.is_valid_request:
try:
handler = getattr(self, self.request.method)
getattr(handler, 'publicly_accessible')
except AttributeError:
return HTTPMethodNotAllowed(request=self.request)
return handler()
else:
return self.request.get_response(self.app)
# return HTTPMethodNotAllowed(request=self.request)
def _process_mc_data(self, response, mc_data):
"""
Processes the data returned from the micro-controller
"""
if mc_data['command'] == 'CONTINUE':
return response
elif mc_data['command'] == 'STORLET':
slist = mc_data['list']
self.logger.info('Going to execute Storlets: ' + str(slist))
return self.apply_storlet_on_get(response, slist)
elif mc_data['command'] == 'CANCEL':
msg = mc_data['message']
return Response(body=msg + '\n', headers={'etag': ''},
request=self.request)
@public
def GET(self):
"""
GET handler on Object
"""
response = self.request.get_response(self.app)
# start = time.time()
if self.obj.endswith('/'):
# is a pseudo-folder
mc_list = None
else:
mc_list = self.get_microcontroller_list_object(response.headers, self.method)
if mc_list:
self.logger.info('Vertigo - There are micro-controllers' +
' to execute: ' + str(mc_list))
self._setup_docker_gateway(response)
mc_data = self.mc_docker_gateway.execute_microcontrollers(mc_list)
response = self._process_mc_data(response, mc_data)
else:
self.logger.info('Vertigo - No micro-controllers to execute')
# end = time.time() - start
# f = open("/tmp/vertigo/vertigo_get_overhead.log", 'a')
# f.write(str(int(round(end * 1000)))+'\n')
# f.close()
return response
```
#### File: vertigo/Utils/deploy_storlet.py
```python
from swiftclient import client as c
def enable_account_for_storlets(url, token):
headers = dict()
headers['X-Account-Meta-storlet-enabled'] = 'True'
c.post_account(url, token, headers)
def put_storlet_object(url, token, storlet_path, storlet_name, main_class, dependency=''):
metadata = {'X-Object-Meta-Storlet-Language': 'Java',
'X-Object-Meta-Storlet-Interface-Version': '1.0',
'X-Object-Meta-Storlet-Dependency': dependency,
'X-Object-Meta-Storlet-Object-Metadata': 'no',
'X-Object-Meta-Storlet-Main': main_class}
f = open('%s/%s' % (storlet_path, storlet_name), 'r')
content_length = None
response = dict()
c.put_object(url, token, 'storlet', storlet_name, f,
content_length, None, None,
"application/octet-stream", metadata,
None, None, None, response)
f.close()
status = response.get('status')
assert (status == 200 or status == 201)
def put_storlet_dependency(url, token, local_path_to_dep, dep_name):
metadata = {'X-Object-Meta-Storlet-Dependency-Version': '1'}
f = open('%s/%s' % (local_path_to_dep, dep_name), 'r')
content_length = None
response = dict()
c.put_object(url, token, 'dependency', dep_name, f,
content_length, None, None, "application/octet-stream",
metadata, None, None, None, response)
f.close()
status = response.get('status')
assert (status == 200 or status == 201)
keystone_ip = '10.30.220.98'
keystone_url = 'http://{}:5000/v3'.format(keystone_ip)
ACCOUNT = 'vertigo'
USER_NAME = 'vertigo'
PASSWORD = '<PASSWORD>'
url, token = c.get_auth(keystone_url, ACCOUNT + ":"+USER_NAME, PASSWORD, auth_version="3")
# print url, token
"""
------------------- Deploy Storlets to Swift Cluster -----------------
"""
path = '../StorletSamples'
# No-operation Storlet
put_storlet_object(url, token, path+'/Storlet_Noop/bin', 'noop-1.0.jar', 'com.urv.storlet.noop.NoopStorlet')
# Compression Storlet
put_storlet_object(url, token, path+'/Storlet_Compress/bin', 'compress-1.0.jar', 'com.urv.storlet.compress.CompressStorlet')
# Encryption Storlet
put_storlet_object(url, token, path+'/Storlet_Crypto/bin', 'crypto-1.0.jar', 'com.urv.storlet.crypto.AESEncryptionStorlet')
# UbuntuOne Trace Storlet (SQL Filter)
put_storlet_object(url, token, path+'/Storlet_UOneTrace/bin', 'UOneTrace-1.0.jar', 'com.urv.storlet.uonetrace.UOneTraceStorlet')
# Adaptative bandwith Storlet
put_storlet_object(url, token, path+'/Storlet_Adaptative/bin', 'adaptative-1.0.jar', 'com.urv.storlet.adaptative.AdaptativeStorlet')
# Adult dataset (csv) Storlet
put_storlet_object(url, token, path+'/Storlet_Adult/bin', 'adult-1.0.jar', 'com.urv.storlet.adult.AdultStorlet')
# Grep Storlet
put_storlet_object(url, token, path+'/Storlet_Grep/bin', 'grep-1.0.jar', 'com.urv.storlet.grep.GrepStorlet', 'commons-compress-1.6.jar,grep4j-1.8.7.jar')
put_storlet_dependency(url, token, path+'/Storlet_Grep/lib', 'commons-compress-1.6.jar')
put_storlet_dependency(url, token, path+'/Storlet_Grep/lib', 'grep4j-1.8.7.jar')
# HTML parser Storlet
put_storlet_object(url, token, path+'/Storlet_ScanHtml/bin', 'ScanHtml-1.0.jar', 'com.urv.storlet.scanhtml.ScanHtml', 'commons-compress-1.6.jar,jsoup-1.8.3.jar')
put_storlet_dependency(url, token, path+'/Storlet_ScanHtml/lib', 'jsoup-1.8.3.jar')
# Blurfaces Storlet
put_storlet_object(url, token, path+'/Storlet_BlurFaces/bin', 'blurfaces-1.0.jar', 'com.ibm.storlet.blurfaces.BlurFacesStorlet', 'commons-compress-1.2.jar,blur_faces_all.tar.gz')
put_storlet_dependency(url, token, path+'/Storlet_BlurFaces/lib', 'commons-compress-1.6.jar')
put_storlet_dependency(url, token, path+'/Storlet_BlurFaces/lib', 'blur_faces_all.tar.gz')
# Watermark Storlet
put_storlet_object(url, token, path+'/Storlet_Watermark/bin', 'watermark-1.0.jar', 'com.urv.storlet.watermark.WatermarkStorlet', 'commons-compress-1.2.jar,commons-io-1.3.2.jar,ffmpeg')
put_storlet_dependency(url, token, path+'/Storlet_Watermark/lib', 'commons-io-1.3.2.jar')
put_storlet_dependency(url, token, path+'/Storlet_Watermark/lib', 'ffmpeg')
# Transcoder Storlet
put_storlet_object(url, token, path+'/Storlet_Transcoder/bin', 'transcoder-1.0.jar', 'com.urv.storlet.transcoder.TranscoderStorlet', 'fontbox-1.8.4.jar,jempbox-1.8.4.jar,pdfbox-app-1.8.4.jar')
put_storlet_dependency(url, token, path+'/Storlet_Transcoder/lib', 'fontbox-1.8.4.jar')
put_storlet_dependency(url, token, path+'/Storlet_Transcoder/lib', 'jempbox-1.8.4.jar')
put_storlet_dependency(url, token, path+'/Storlet_Transcoder/lib', 'pdfbox-app-1.8.4.jar')
print('Done!')
``` |
{
"source": "joser1945/cmr-metadata-review",
"score": 3
} |
#### File: cmr-metadata-review/lib/CSVDIF.py
```python
class DIFOutputCSV():
def __init__(self,checkerRules,wrap):
self.checkerRules = checkerRules
self.wrap = wrap
def checkAll(self, metadata):
result = ",,,"
try:
result += self.checkerRules.check_Entry_Title(metadata) + ','
except:
result += 'np' + ','
result += ",,,,"
try:
result += self.wrap(metadata,self.checkerRules.check_Dataset_Citation_Dataset_Release_Date,'Dataset_Citation.Dataset_Release_Date') + ','
except:
result += 'np' + ','
result += ",,,,,,"
try:
result += self.wrap(metadata,self.checkerRules.check_Dataset_Citation_Persistent_Identifier_Type,'Dataset_Citation.Persistent_Identifier.Type') + ','
except:
result += 'np' + ','
try:
result += self.wrap(metadata,self.checkerRules.check_Dataset_Citation_Persistent_Identifier_Identifier,'Dataset_Citation.Persistent_Identifier.Identifier') + ','
except:
result += 'np' + ','
try:
result += self.wrap(metadata,self.checkerRules.check_Dataset_Citation_Online_Resource,'Dataset_Citation.Online_Resource') + ','
except:
result += 'np' + ','
try:
result += self.wrap(metadata,self.checkerRules.check_Personnel_Role_item,'Personnel.Role') + ','
except:
result += 'np' + ','
result += ",,,,,,,,"
try:
result += self.wrap(metadata, self.checkerRules.check_Personnel_Contact_Person_Email_item, 'Personnel.Contact_Person.Email')
except:
result += 'np'
result += ','
try:
result += self.wrap(metadata, self.checkerRules.check_Personnel_Contact_Person_phone_item, 'Personnel.Contact_Person.Phone.Number') + ','
except:
result += 'np' + ','
try:
result += self.wrap(metadata,self.checkerRules.check_Personnel_Contact_Person_Phone_Type_item,'Personnel.Contact_Person.Phone.Type') + ','
except:
result += 'np' + ','
result += ",,,,,,"
try:
result += self.wrap(metadata,self.checkerRules.check_Personnel_Contact_Group_Email_item,'Personnel.Contact_Group.Email') + ","
except:
result += 'np' + ','
try:
result += self.wrap(metadata,self.checkerRules.check_Personnel_Contact_Group_Phone_item,'Personnel.Contact_Group.Phone.Number') + ','
except:
result += 'np' + ','
try:
result += self.wrap(metadata,self.checkerRules.check_Personnel_Contact_Group_Phone_Type_item,'Personnel.Contact_Group.Phone.Type') + ','
except:
result += 'np' + ','
try:
result += self.wrap(metadata,self.checkerRules.science_Keywords_item_Category,'Science_Keywords.Category') + ','
except:
result += 'np' + ','
try:
result += self.wrap(metadata,self.checkerRules.check_science_Keywords_item_topic,'Science_Keywords.Topic') + ','
except:
result += 'np' + ','
try:
result += self.wrap(metadata,self.checkerRules.check_science_Keywords_item_Term,'Science_Keywords.Term') + ','
except:
result += 'np' + ','
try:
result += self.wrap(metadata,self.checkerRules.check_science_Keywords_item_Variable_1,'Science_Keywords.Variable_Level_1') + ','
except:
result += 'np' + ','
try:
result += self.wrap(metadata,self.checkerRules.check_science_Keywords_item_Variable_2,'Science_Keywords.Variable_Level_2') + ','
except:
result += 'np' + ','
try:
result += self.wrap(metadata, self.checkerRules.check_science_Keywords_item_Variable_3,'Science_Keywords.Variable_Level_3') + ','
except:
result += 'np' + ','
result += ','
try:
result += self.wrap(metadata,self.checkerRules.check_ISO_Topic_Category,'ISO_Topic_Category') + ','
except:
result += 'np' + ','
result += ','
try:
result += self.wrap(metadata, self.checkerRules.check_Platform_item_Type, 'Platform.Type') + ','
except:
result += 'np' + ','
try:
result += self.wrap(metadata,self.checkerRules.check_Platform_item_Short_Name,'Platform.Short_Name') + ','
except:
result += 'np' + ','
try:
result += self.wrap(metadata,self.checkerRules.check_Platform_item_Long_Name,'Platform.Long_Name') + ','
except:
result += 'np' + ','
result += ",,,,,"
try:
result += self.wrap(metadata,self.checkerRules.check_Platform_item_Instrument_item_shortname,'Platform.Instrument.Short_Name') + ','
except:
result += 'np' + ','
try:
result += self.wrap(metadata,self.checkerRules.check_Platform_item_Instrument_item_longname,'Platform.Instrument.Long_Name') + ','
except:
result += 'np' + ','
result += ",,,,,,,,"
try:
result += self.wrap(metadata,self.checkerRules.check_Platform_item_Instrument_sensor_shortname,'Platform.Instrument') + ','
except:
result += 'np' + ','
try:
result += self.wrap(metadata,self.checkerRules.check_Platform_item_Instrument_sensor_longname,'Platform.Instrument') + ','
except:
result += 'np' + ','
result += ",,,,,,,,,,,"
try:
result += self.wrap(metadata,self.checkerRules.check_Temporal_Coverage_item_Begin_Date_Time,'Temporal_Coverage.Range_DateTime.Beginning_Date_Time') + ','
except:
result += 'np' + ','
try:
result += self.wrap(metadata,self.checkerRules.check_Temporal_Coverage_item_end_Date_Time,'Temporal_Coverage.Range_DateTime.Ending_Date_Time') + ','
except:
result += 'np' + ','
result += ',,,,,,,,,,,,,,,,,'
try:
result += self.wrap(metadata,self.checkerRules.check_dataset_progress,'Dataset_Progress') +','
except:
result += 'np' + ','
result += ','
try:
result += self.wrap(metadata,self.checkerRules.check_Spatial_Coverage_Granule_Spatial_Representation,'Spatial_Coverage.Granule_Spatial_Representation') + ','
except:
result += 'np' + ','
result += ','
try:
result += self.wrap(metadata,self.checkerRules.check_Spatial_Coverage_Geometry_Coordinate_System,'Spatial_Coverage.Geometry.Coordinate_System') + ','
except:
result += 'np' + ','
try:
result += self.wrap(metadata,self.checkerRules.check_Spatial_Coverage_Geometry_Bounding_Rectangle_Southernmost_Latitude,'Spatial_Coverage.Geometry.Bounding_Rectangle') + ','
except:
result += 'np' + ','
try:
result += self.wrap(metadata,self.checkerRules.check_Spatial_Coverage_Geometry_Bounding_Rectangle_Northernmost_Latitude,'Spatial_Coverage.Geometry.Bounding_Rectangle') + ','
except:
result += 'np' + ','
try:
result += self.wrap(metadata,self.checkerRules.check_Spatial_Coverage_Geometry_Bounding_Rectangle_Westernmost_Longitude,'Spatial_Coverage.Geometry.Bounding_Rectangle') + ','
except:
result += 'np' + ','
try:
result += self.wrap(metadata,self.checkerRules.check_Spatial_Coverage_Geometry_Bounding_Rectangle_Easternmost_Longitude,'Spatial_Coverage.Geometry.Bounding_Rectangle') + ','
except:
result += 'np' + ','
result += ',,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,'
try:
result += self.wrap(metadata,self.checkerRules.check_Location_Location_Category,'Location.Location_Category') + ','
except:
result += 'np' + ','
try:
result += self.wrap(metadata,self.checkerRules.check_Location_Location_Type,'Location.Location_Type') + ','
except:
result += 'np' + ','
try:
result += self.wrap(metadata,self.checkerRules.check_Location_Subregion1,'Location.Location_Subregion1') + ','
except:
result += 'np' + ','
try:
result += self.wrap(metadata,self.checkerRules.check_Location_Subregion2,'Location.Location_Subregion2') + ','
except:
result += 'np' + ','
try:
result += self.wrap(metadata, self.checkerRules.check_Location_Subregion3, 'Location.Location_Subregion3') + ','
except:
result += 'np' + ','
result += ',,,'
try:
result += self.wrap(metadata,self.checkerRules.check_Horizontal_Resolution_Range,'Data_Resolution.Horizontal_Resolution_Range') + ','
except:
result += 'np' + ','
result += ','
try:
result += self.wrap(metadata,self.checkerRules.check_Vertical_Resolution_Range,'Data_Resolution.Vertical_Resolution_Range') + ','
except:
result += 'np' + ','
result += ','
try:
result += self.wrap(metadata,self.checkerRules.check_Temporal_Resolution_Range,'Data_Resolution.Temporal_Resolution_Range') + ','
except:
result += 'np' + ','
try:
result += self.wrap(metadata,self.checkerRules.check_Project_Short_Name,'Project.Short_Name') + ','
except:
result += 'np' + ','
try:
result += self.wrap(metadata,self.checkerRules.check_Project_Long_Name,'Project.Long_Name') + ','
except:
result += 'np' + ','
result += ',,,'
try:
result += self.wrap(metadata,self.checkerRules.check_Quality,'Quality') + ','
except:
result += 'np' + ','
result += ',,'
try:
result += self.wrap(metadata,self.checkerRules.check_Dataset_Language,'Dataset_Language') + ','
except:
result += 'np' + ','
result += ','
try:
result += self.wrap(metadata,self.checkerRules.check_Organization_Organization_Type,'Organization.Organization_Type') + ','
except:
result += 'np' + ','
try:
result += self.wrap(metadata,self.checkerRules.check_Organization_Name_Short_Name,'Organization.Organization_Name.Short_Name') + ','
except:
result += 'np' + ','
try:
result += self.wrap(metadata,self.checkerRules.check_Organization_Name_Long_Name,'Organization.Organization_Name.Long_Name') + ','
except:
result += 'np' + ','
result += ',,,,,,,,,,,,,,,'
try:
result += self.wrap(metadata,self.checkerRules.check_Organization_Personnel_Contact_Person_Phone_Type,'Organization.Personnel.Contact_Person.Phone.Type') + ','
except:
result += 'np' + ','
result += ',,,,,,,,'
try:
result += self.wrap(metadata, self.checkerRules.check_Organization_Personnel_Contact_Person_Phone_Type,'Organization.Personnel.Contact_Group.Phone.Type') + ','
except:
result += 'np' + ','
result += ',,'
try:
result += self.wrap(metadata,self.checkerRules.check_Distribution_Distribution_Format,'Distribution.Distribution_Format') + ','
except:
result += 'np' + ','
result += ',,'
try:
result += self.wrap(metadata,self.checkerRules.check_Multimedia_Sample_URL,'Multimedia_Sample.URL') + ','
except:
result += 'np' + ','
result += ',,,,,,,,,,,,,,,,,,,,'
try:
result += self.wrap(metadata,self.checkerRules.check_summary_abstract,'Summary.Abstract') + ','
except:
result += 'np' + ','
result += ','
try:
temp = self.wrap(metadata,self.checkerRules.check_Related_URL_item_Content_Type,'Related_URL.URL_Content_Type.Type')
result += self.checkerRules.check_Related_URL_Content_Type(temp) + ','
except:
result += 'np' + ','
try:
result += self.wrap(metadata,self.checkerRules.check_Related_URL_Content_Type_SubType,'Related_URL.URL_Content_Type.Subtype') + ','
except:
result += 'np' + ','
result += ',,,'
try:
temp = self.wrap(metadata,self.checkerRules.check_Related_URL_Description_Item,'Related_URL.Description')
result += self.checkerRules.check_Related_URL_Description(temp) + ','
except:
result += 'np' + ','
try:
temp = self.wrap(metadata,self.checkerRules.check_Related_URL_Mime_Type,'Related_URL')
result += self.checkerRules.convertMimeType(temp) + ','
except:
result += 'np' + ','
result += ',,,,,,,,,,,,,,'
try:
result += self.wrap(metadata,self.checkerRules.check_Product_Level_ID,'Product_Level_Id') + ','
except:
result += 'np' + ','
result += ','
try:
result += self.wrap(metadata,self.checkerRules.check_Collection_Data_Type,'Collection_Data_Type') + ','
except:
result += 'np' + ','
result += ',,,,'
try:
result += self.wrap(metadata,self.checkerRules.check_Metadata_Dates_Creation,'Metadata_Dates.Metadata_Creation') + ','
except:
result += 'np' + ','
try:
result += self.wrap(metadata,self.checkerRules.check_Metadata_last_revision,'Metadata_Dates.Metadata_Last_Revision') + ','
except:
result += 'np' + ','
result += ',,'
try:
result += self.wrap(metadata, self.checkerRules.check_Metadata_data_creation,'Metadata_Dates.Data_Creation') + ','
except:
result += 'np' + ','
try:
result += self.wrap(metadata,self.checkerRules.check_Metadata_data_latest_revision,'Metadata_Dates.Data_Last_Revision') + ','
except:
result += 'np' + ','
return result
``` |
{
"source": "JoseRafaelCarmona/informacion_inicio_python",
"score": 3
} |
#### File: JoseRafaelCarmona/informacion_inicio_python/inicio.py
```python
import ip_publica as publica
from subprocess import Popen, PIPE, STDOUT
import os
#prueba con los colores#
from colorama import Fore, init, Back, Style
BIENVENIDA = '''
______ _ _ _
(____ \(_) (_) | |
____) )_ ____ ____ _ _ ____ ____ _ _ | | ___
| __ (| |/ _ ) _ \ | | / _ ) _ \| |/ || |/ _ \
| |__) ) ( (/ /| | | \ V ( (/ /| | | | ( (_| | |_| |
|______/|_|\____)_| |_|\_/ \____)_| |_|_|\____|\___/
'''
def comando(orden):
eventStatus = Popen(orden, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT)
outputStatus = eventStatus.communicate()
return outputStatus[0].decode('utf-8')
def salida_datos(ip_publica,ip_privada):
print(Fore.GREEN+'Tu direccion ip publica>> '+ Fore.WHITE+'%s'%ip_publica)
print(Fore.GREEN+'Tu direccion ip privada>> '+ Fore.WHITE+'%s'%ip_privada)
if __name__ == "__main__":
print(BIENVENIDA)
## primero vemos lo de la informacion principal antes de que comiences a usar una terminal
print('obteniendo tus direcciones ip')
print(Fore.YELLOW+'Buscando..')
ip_privada = comando('ip add | egrep -o "[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\/24"')
salida_datos(publica.obtener_ip_publica(), ip_privada)
```
#### File: JoseRafaelCarmona/informacion_inicio_python/ip_publica.py
```python
import requests
from bs4 import BeautifulSoup
def obtener_ip_publica():
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36'
}
url = 'https://www.cual-es-mi-ip.net/'
contenido_pagina = requests.get(url, headers)
soup = BeautifulSoup(contenido_pagina.text, 'html.parser')
return str(soup.find("span", {"class": "big-text font-arial"}).getText())
``` |
{
"source": "joseram97/CMPUT291Mini_Project",
"score": 3
} |
#### File: joseram97/CMPUT291Mini_Project/dataConn.py
```python
import sqlite3
from datetime import datetime
import os
#this is the data class that will hold all of the information for the
# mini-project
#The following is all code from the lab that I did for getting the data
# from the tables
connection = None
cursor = None
def connect(path):
global connection, cursor
if not os.path.isfile(path):
raise Exception("Invalid path")
connection = sqlite3.connect(path)
connection.row_factory = sqlite3.Row
cursor = connection.cursor()
cursor.execute(' PRAGMA foreign_keys=ON; ')
connection.commit()
return
def offer_ride(date,driver,seats,price,desc,src,dst,cno,enroute):
##Needed for spec 1
rno = get_max_ride_id()[0] + 1
offer_ride= '''
INSERT INTO rides(rno, price, rdate, seats, lugDesc, src, dst, driver, cno) VALUES
(:rno,:price,:date,:seats,:desc,:src,:dst,:driver,:cno);
'''
cursor.execute(offer_ride,{"rno":rno,"price":price,"date":date,"seats":seats,"desc":desc,"src":src,"dst":dst,"driver":driver,"cno":cno});
connection.commit()
return rno
def check_login(email,password):
query = '''
SELECT * FROM members WHERE email=:email AND pwd=:pass
'''
cursor.execute(query,{"email":email,"pass":password})
connection.commit()
return cursor.fetchone()
def register(email,name,phone,password):
register= '''
INSERT INTO members(email, name, phone, pwd) VALUES
(:email,:name,:phone,:pwd)
'''
cursor.execute(register,{"email":email,"name":name,"phone":phone,"pwd":password})
connection.commit()
return
def check_email(email):
q = '''
SELECT * FROM members WHERE email=:email;
'''
cursor.execute(q,{"email":email})
connection.commit()
return cursor.fetchone()
#Returns all locations by lCode
def get_locations_by_location_code(lCode):
get_locations = '''
SELECT * FROM locations WHERE lcode = :lcode;
'''
cursor.execute(get_locations,{"lcode":lCode});
connection.commit()
return cursor.fetchone()
def add_enroute(lcode,rno):
add_enroute = '''
INSERT INTO enroute(rno,lcode) VALUES
(:rno,:lcode);
'''
cursor.execute(add_enroute,{"rno":rno,"lcode":lcode});
connection.commit()
return
#Returns all locations by keyword **Used if lCode is not found**
def get_locations_by_keyword(keyword):
keyword = '%'+keyword+'%'
get_locations = '''
SELECT * FROM locations WHERE city LIKE :keyword
UNION
SELECT * FROM locations WHERE prov LIKE :keyword
UNION
SELECT * FROM locations WHERE address LIKE :keyword;
'''
cursor.execute(get_locations,{"keyword":keyword});
connection.commit()
return cursor.fetchall()
##NOTE: Incomplete
def search_for_rides(listKeys):
# listKeys is a list of all of the location keywords
keys = [""]*3
i = 0
for keyword in listKeys:
keys[i] = keyword
i = i + 1
key1 = '%'+keys[0]+'%'
key2 = '%'+keys[1]+'%'
key3 = '%'+keys[2]+'%'
ride_search = '''
SELECT DISTINCT r.*
FROM rides r, enroute e, locations l
WHERE (r.rno = e.rno
AND e.lcode = l.lcode
OR r.src = l.lcode
OR r.dst = l.lcode)
AND (l.city LIKE :key1
OR l.prov LIKE :key1
OR l.address LIKE :key1
OR l.lcode = :key1)
INTERSECT
SELECT DISTINCT r.*
FROM rides r, enroute e, locations l
WHERE (r.rno = e.rno
AND e.lcode = l.lcode
OR r.src = l.lcode
OR r.dst = l.lcode)
AND (l.city LIKE :key2
OR l.prov LIKE :key2
OR l.address LIKE :key2
OR l.lcode = :key2)
INTERSECT
SELECT DISTINCT r.*
FROM rides r, enroute e, locations l
WHERE (r.rno = e.rno
AND e.lcode = l.lcode
OR r.src = l.lcode
OR r.dst = l.lcode)
AND (l.city LIKE :key3
OR l.prov LIKE :key3
OR l.address LIKE :key3
OR l.lcode = :key3);
'''
cursor.execute(ride_search,{"key1":key1,"key2":key2,"key3":key3});
connection.commit()
return cursor.fetchall()
def post_ride_request(date, pLoc, dLoc, amount, rid, email):
#Needed for Spec 4
post_ride = '''
INSERT INTO requests(rid, email, rdate, pickup, dropoff, amount) VALUES
(:rid,:email,:date,:pLoc,:dLoc,:amount);
'''
cursor.execute(post_ride,{"rid":rid,"email":email,"date":date,"pLoc":pLoc,"dLoc":dLoc,"amount":amount});
connection.commit()
return
def get_ride_requests_by_email(email):
#Needed for Spec 5
get_rides = '''
SELECT * FROM requests WHERE email = :email;
'''
cursor.execute(get_rides,{"email":email});
connection.commit()
return cursor.fetchall()
def delete_ride_request_by_id(rid):
#Needed for Spec 5
delete_rides = '''
DELETE FROM requests WHERE rid = :rid;
'''
cursor.execute(delete_rides,{"rid":rid});
connection.commit()
return
def get_requests_by_location(lCode):
lCodeP = '%'+lCode+'%'
get_req = '''
SELECT DISTINCT *
FROM requests r
WHERE r.pickup = :lcode
UNION
SELECT DISTINCT *
FROM requests r
WHERE r.pickup IN (SELECT lcode
FROM locations
WHERE city LIKE :lcodeP);
'''
cursor.execute(get_req,{"lcode":lCode,"lcodeP":lCodeP});
connection.commit()
return cursor.fetchall()
def get_bookings_by_driver(driverEmail):
##Needed for Spec #3
get_bookings = '''
SELECT b.*
FROM bookings b, rides r
WHERE r.driver = :driverEmail
AND r.rno=b.rno;
'''
cursor.execute(get_bookings,{"driverEmail":driverEmail});
connection.commit()
return cursor.fetchall()
def remove_booking_by_id(bno,email,sender,rno):
##Needed for Spec #3
delete_booking = '''
DELETE FROM bookings WHERE bno = :bno;
'''
cursor.execute(delete_booking,{"bno":bno});
connection.commit()
send_message_to_member(email,sender,"Your booking has been cancelled",rno)
return
def get_rides_with_available_seats_by_member(driver):
##Needed for Spec #3
##Gets all FUTURE rides that the person is offering with how many seats remaining
get_rides = '''
SELECT r.rno,(r.seats-IFNULL(SUM(b.seats),0))
FROM rides r
LEFT OUTER JOIN bookings b
ON r.rno=b.rno
WHERE r.driver=:driver
AND r.rdate > date('now')
GROUP BY b.rno
'''
cursor.execute(get_rides,{"driver":driver});
connection.commit()
return cursor.fetchall()
def check_ride_ownership(email,rno):
get_owner = '''
SELECT *
FROM rides r
WHERE driver=:email
AND rno = :rno
'''
cursor.execute(get_owner,{"email":email,"rno":rno});
connection.commit()
return cursor.fetchone()
def book_member_for_ride(rno,email,seatsBooked,cost,src,dst):
##Needed for Spec #3
bno = get_max_booking_id()[0]+1
book_member = '''
INSERT INTO bookings(bno,email,rno,cost,seats,pickup,dropoff) VALUES
(:bno,:email,:rno,:cost,:seats,:src,:dst)
'''
cursor.execute(book_member,{"bno":bno,"email":email,"rno":rno,"cost":cost,"seats":seatsBooked,"src":src,"dst":dst});
connection.commit()
return
def book_member_for_ride_by_driver(rno,email,seatsBooked,cost,src,dst,driver):
##Needed for Spec #3
bno = get_max_booking_id()[0]+1
book_member = '''
INSERT INTO bookings(bno,email,rno,cost,seats,pickup,dropoff) VALUES
(:bno,:email,:rno,:cost,:seats,:src,:dst)
'''
cursor.execute(book_member,{"bno":bno,"email":email,"rno":rno,"cost":cost,"seats":seatsBooked,"src":src,"dst":dst});
connection.commit()
send_message_to_member(email,driver,"You have been booked for ride number: {0}".format(rno),rno)
return
def send_message_to_member(email, sender, content, rno):
##Needed for Spec #3
msgTimestamp = datetime.now();
seen = 'N'
send_message = '''
INSERT INTO inbox(email, msgTimestamp, sender, content, rno, seen) VALUES
(:email,:msgTimestamp,:sender,:content,:rno,:seen);
'''
cursor.execute(send_message,{"email":email,"msgTimestamp":msgTimestamp,"sender":sender,"content":content,"rno":rno,"seen":seen});
connection.commit()
return
def set_messages_to_seen(email):
set_seen = '''
UPDATE inbox
SET seen='Y'
WHERE email=:email;
'''
cursor.execute(set_seen,{"email":email});
connection.commit()
return
def get_unseen_messages_by_email(email):
get_unseen = '''
SELECT *
FROM inbox
WHERE email=:email
AND seen = 'N';
'''
cursor.execute(get_unseen,{"email":email});
connection.commit()
return cursor.fetchall()
def get_car_by_cno(cno):
get_car = '''
SELECT * FROM cars WHERE cno=:cno;
'''
cursor.execute(get_car,{"cno":cno})
connection.commit()
return cursor.fetchone()
def get_car_by_driver_cno(cno,driver):
get_car = '''
SELECT * FROM cars WHERE cno=:cno
AND owner=:driver;
'''
cursor.execute(get_car,{"cno":cno,"driver":driver})
connection.commit()
return cursor.fetchone()
def get_car_by_driver(driver):
get_car = '''
SELECT * FROM cars WHERE owner=:driver;
'''
cursor.execute(get_car,{"driver":driver})
connection.commit()
return cursor.fetchone()
def get_max_ride_id():
get_max = '''
SELECT MAX(rno) FROM rides r;
'''
cursor.execute(get_max)
connection.commit()
return cursor.fetchone()
def get_max_request_id():
get_max = '''
SELECT MAX(rid) FROM requests r;
'''
cursor.execute(get_max)
connection.commit()
return cursor.fetchone()
def get_max_booking_id():
get_max = '''
SELECT MAX(bno) FROM bookings b;
'''
cursor.execute(get_max)
connection.commit()
return cursor.fetchone()
``` |
{
"source": "joseramoncajide/gtm-datalayer_audit-appengine",
"score": 2
} |
#### File: joseramoncajide/gtm-datalayer_audit-appengine/main.py
```python
import datetime
import json
import logging
import os
from pprint import pformat
import jinja2
import webapp2
import time
from google.appengine.api import app_identity
from google.appengine.ext import vendor
vendor.add('lib')
from googleapiclient import discovery
from oauth2client.client import GoogleCredentials
from google.cloud import storage
from flask import Flask
app = Flask(__name__)
compute = discovery.build('compute', 'v1', credentials=GoogleCredentials.get_application_default())
def create_bucket(bucket_name):
"""Creates a new bucket."""
storage_client = storage.Client()
bucket = storage_client.create_bucket(bucket_name)
print('Bucket {} created'.format(bucket.name))
CONFIG = {
# In DRY_RUN mode, deletes are only logged. Set this to False after you've
# double-checked the status page and you're ready to enable the deletes.
'DRY_RUN': False,
# Be careful, this application could delete all instances in this project.
# Your project id can be found on the overview tab of the Google APIs
# Console: https://code.google.com/apis/console/
'GCE_PROJECT_ID': app_identity.get_application_id(),
# Instances created with these tags will never be deleted.
'SAFE_TAGS': ['production', 'safetag'],
# Instances are deleted after they have been running for TIMEOUT minutes.
'TIMEOUT': 60 * 8, # in minutes, defaulting to 8 hours
'GC_PROJECT': 'gtm-datalayer-audit',
'GC_ZONE': 'europe-west1-b',
'GC_NAME': 'gtm-datalayer-audit',
'CUSTOMER_NAME': 'conforama',
'AUDIT_COMMAND': 'allPT',
'CLOUD_STORAGE_BUCKET': 'gtm-datalayer-app-conforama',
'APP_REPOSITORY_NAME': 'gtm-datalayer-app'
}
CONFIG['SAFE_TAGS'] = [t.lower() for t in CONFIG['SAFE_TAGS']]
# Obtain App Engine AppAssertion credentials and authorize HTTP connection.
# https://developers.google.com/appengine/docs/python/appidentity/overview
# Build object for the 'v1' version of the GCE API.
# https://developers.google.com/compute/docs/reference/v1beta13/
jinja_environment = jinja2.Environment(
loader=jinja2.FileSystemLoader('templates'))
# [START list_instances]
def list_instances(compute, project, zone):
result = compute.instances().list(project=project, zone=zone).execute()
return result['items']
# [END list_instances]
# [START create_instance]
@app.route('/vm/create')
def create_vm():
# Get the latest Debian Jessie image.
image_response = compute.images().getFromFamily(
project='ubuntu-os-cloud', family='ubuntu-1604-lts').execute()
source_disk_image = image_response['selfLink']
# Configure the machine
machine_type = "zones/%s/machineTypes/n1-standard-4" % CONFIG['GC_ZONE']
startup_script = open(
os.path.join(
os.path.dirname(__file__), 'installscript.sh'), 'r').read()
config = {
'name': CONFIG['GC_NAME'],
'machineType': machine_type,
# Specify the boot disk and the image to use as a source.
'disks': [
{
'boot': True,
'autoDelete': True,
'initializeParams': {
'sourceImage': source_disk_image,
}
}
],
# Specify a network interface with NAT to access the public
# internet.
'networkInterfaces': [{
'network': 'global/networks/default',
'accessConfigs': [
{'type': 'ONE_TO_ONE_NAT', 'name': 'External NAT'}
]
}],
# Allow the instance to access cloud storage, logging and source repos.
'serviceAccounts': [{
'email': 'default',
'scopes': [
'https://www.googleapis.com/auth/devstorage.read_write',
'https://www.googleapis.com/auth/logging.write',
'https://www.googleapis.com/auth/devstorage.full_control',
'https://www.googleapis.com/auth/compute',
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/source.full_control'
]
}],
# Metadata is readable from the instance and allows you to
# pass configuration from deployment scripts to instances.
'metadata': {
'items': [{
# Startup script is automatically executed by the
# instance upon startup.
'key': 'startup-script',
'value': startup_script
}, {
'key': 'audit_command',
'value': CONFIG['AUDIT_COMMAND']
}, {
'key': 'customer_name',
'value': CONFIG['CUSTOMER_NAME']
}, {
'key': 'bucket',
'value': CONFIG['CLOUD_STORAGE_BUCKET']
}, {
'key': 'source_repo',
'value': CONFIG['APP_REPOSITORY_NAME']
}]
}
}
create_bucket(CONFIG['CLOUD_STORAGE_BUCKET'])
result = compute.instances().insert(
project=CONFIG['GC_PROJECT'],
zone=CONFIG['GC_ZONE'],
body=config).execute()
logging.debug(result)
return json.dumps(result, indent=4)
# [END create_instance]
# operation = create_instance(compute, project, zone, instance_name, bucket)
# wait_for_operation(compute, project, zone, operation['name'])
# [START wait_for_operation]
def wait_for_operation(compute, project, zone, operation):
print('Waiting for operation to finish...')
while True:
result = compute.zoneOperations().get(
project=project,
zone=zone,
operation=operation).execute()
if result['status'] == 'DONE':
print("done.")
if 'error' in result:
raise Exception(result['error'])
return result
time.sleep(1)
# [END wait_for_operation]
@app.route('/vm/start')
def start_vm():
# credentials = AppAssertionCredentials(scope='https://www.googleapis.com/auth/compute')
# http = credentials.authorize(httplib2.Http(memcache))
# compute = discovery.build('compute', 'v1', http=http)
# compute = discovery.build('compute','v1', credentials=GoogleCredentials.get_application_default())
# Start the VM!
result = compute.instances().start(instance='instance-1', zone='europe-west1-b', project='gtm-datalayer-audit').execute()
logging.debug(result)
return json.dumps(result, indent=4)
@app.route('/vm/stop')
def stop_vm():
# credentials = AppAssertionCredentials(scope='https://www.googleapis.com/auth/compute')
# http = credentials.authorize(httplib2.Http(memcache))
# compute = discovery.build('compute', 'v1', http=http)
# compute = discovery.build('compute','v1', credentials=GoogleCredentials.get_application_default())
# Start the VM!
result = compute.instances().stop(instance='instance-1', zone='europe-west1-b', project='gtm-datalayer-audit').execute()
logging.debug(result)
return json.dumps(result, indent=4)
@app.errorhandler(404)
def page_not_found(e):
"""Return a custom 404 error."""
return 'Sorry, Nothing at this URL.', 404
@app.errorhandler(500)
def application_error(e):
"""Return a custom 500 error."""
return 'Sorry, unexpected error: {}'.format(e), 500
if __name__ == '__main__':
app.run(debug=True)
# SAMPLE_NAME = 'Instance timeout helper'
# def start_instance():
# """logs all expired instances, calls delete API when not DRY_RUN"""
# instances = list_instances()
#
# for instance in instances:
# name = instance['name']
# zone = instance['zone'].split('/')[-1]
# if CONFIG['DRY_RUN']:
# logging.info("DRY_RUN, not deleted: %s", name)
# else:
# logging.info("START: %s", name)
# request = compute.instances().start(
# project=CONFIG['GCE_PROJECT_ID'],
# instance=name,
# zone=zone)
# response = request.execute()
# logging.info(response)
#
#
# def annotate_instances(instances):
# """loops through the instances and adds exclusion, age and timeout"""
# for inst in instances:
# # set _excluded
# excluded = False
# tags = inst.get('tags', {}).get('items', [])
# inst['_tags'] = tags
#
# for tag in tags:
# if tag.lower() in CONFIG['SAFE_TAGS']:
# excluded = True
# break
# inst['_excluded'] = excluded
#
# # set _age_minutes and _timeout_expired
# # _timeout_expired is never True for _excluded inst
# creation = parse_iso8601tz(inst['creationTimestamp'])
# now = datetime.datetime.now()
# delta = now - creation
# age_minutes = (delta.days * 24 * 60) + (delta.seconds / 60)
# inst['_age_minutes'] = age_minutes
# # >= comparison because seconds are truncated above.
# if not inst['_excluded'] and age_minutes >= CONFIG['TIMEOUT']:
# inst['_timeout_expired'] = True
# else:
# inst['_timeout_expired'] = False
#
#
# def list_instances():
# """returns a list of dictionaries containing GCE instance data"""
# request = compute.instances().aggregatedList(project=CONFIG['GCE_PROJECT_ID'])
# response = request.execute()
# zones = response.get('items', {})
# instances = []
# for zone in zones.values():
# for instance in zone.get('instances', []):
# instances.append(instance)
# annotate_instances(instances)
# return instances
#
#
# class MainHandler(webapp2.RequestHandler):
# """index handler, displays app configuration and instance data"""
# def get(self):
# instances = list_instances()
#
# data = {}
# data['config'] = CONFIG
# data['title'] = SAMPLE_NAME
# data['instances'] = instances
# data['raw_instances'] = json.dumps(instances, indent=4, sort_keys=True)
#
# template = jinja_environment.get_template('index.html')
# self.response.out.write(template.render(data))
#
#
# def delete_expired_instances():
# """logs all expired instances, calls delete API when not DRY_RUN"""
# instances = list_instances()
#
# # filter instances, keep only expired instances
# instances = [i for i in instances if i['_timeout_expired']]
#
# logging.info('delete cron: %s instance%s to delete',
# len(instances), '' if len(instances) == 1 else 's')
#
# for instance in instances:
# name = instance['name']
# zone = instance['zone'].split('/')[-1]
# if CONFIG['DRY_RUN']:
# logging.info("DRY_RUN, not deleted: %s", name)
# else:
# logging.info("DELETE: %s", name)
# request = compute.instances().delete(
# project=CONFIG['GCE_PROJECT_ID'],
# instance=name,
# zone=zone)
# response = request.execute()
# logging.info(response)
#
# class StartHandler(webapp2.RequestHandler):
# """delete handler - HTTP endpoint for the GAE cron job"""
# def get(self):
# start_instance()
#
# class DeleteHandler(webapp2.RequestHandler):
# """delete handler - HTTP endpoint for the GAE cron job"""
# def get(self):
# delete_expired_instances()
#
#
# app = webapp2.WSGIApplication([
# ('/cron/start', StartHandler),
# ('/cron/delete', DeleteHandler),
# ('/', MainHandler),
# ], debug=True)
#
#
# # ------------------------------------------------
# # helpers
# def parse_iso8601tz(date_string):
# """return a datetime object for a string in ISO 8601 format.
#
# This function parses strings in exactly this format:
# '2012-12-26T13:31:47.823-08:00'
#
# Sadly, datetime.strptime's %z format is unavailable on many platforms,
# so we can't use a single strptime() call.
# """
#
# dt = datetime.datetime.strptime(date_string[:-6],
# '%Y-%m-%dT%H:%M:%S.%f')
#
# # parse the timezone offset separately
# delta = datetime.timedelta(minutes=int(date_string[-2:]),
# hours=int(date_string[-5:-3]))
# if date_string[-6] == '-':
# # add the delta to return to UTC time
# dt = dt + delta
# else:
# dt = dt - delta
# return dt
``` |
{
"source": "joseramonc/dotfiles",
"score": 2
} |
#### File: dotfiles/sublime.symlink/erb_command.py
```python
import sublime, sublime_plugin
import re
ERB_BLOCKS = [['<%=', '%>'], ['<%', '%>'], ['<%#', '%>']]
ERB_REGEX = '<%(=?|-?|#?)\s{2}(-?)%>'
# matches opening bracket that is not followed by the closing one
ERB_OPENER_REGEX = '<%[\=\-\#]?(?!.*%>)'
# matches the closing bracket. I couldn't figure out a way to exclude preceeding
# opening bracket, since Python only support fixed-width negative lookbehind
ERB_CLOSER_REGEX = '-?%>'
class ErbCommand(sublime_plugin.TextCommand):
def run(self, edit):
if len(self.view.sel()) < 1:
return
# storing all new cursor positions to ensure they stay where they were before the changes
new_selections = []
# looping through each selection
for region in self.view.sel():
# searching for an opening bracket and closing bracket
opener, closer = self.find_surrounding_blocks(region)
if (opener is not None) and (closer is not None):
# if brackets found - replacing them with the next ones. Result is a new cursor position.
new_selections.append(self.replace_erb_block(edit, opener, closer, region))
else:
# if the brackets were't found - inserting new ones. Result is a new cursor position.
new_selections.append(self.insert_erb_block(edit, region))
# clearing current selections
self.view.sel().clear()
# looping through the modified selections and adding them
for selection in new_selections:
self.view.sel().add(selection)
def find_surrounding_blocks(self, region):
opener = None
closer = None
# grabbing the whole line
containing_line = self.view.line(region)
# one region to the left of the selection and one to the right
left_region = sublime.Region(containing_line.begin(), region.begin())
right_region = sublime.Region(containing_line.end(), region.end())
# searching in the left region for an opening bracket
found_openers = list(re.finditer(ERB_OPENER_REGEX, self.view.substr(left_region)))
if len(found_openers) > 0:
# if found, creating a region for it, using the last match - the rightmost bracket found
opener = sublime.Region(left_region.begin() + found_openers[-1].start(), left_region.begin() + found_openers[-1].end())
# searching for a closing brcket in the right region
found_closers = list(re.finditer(ERB_CLOSER_REGEX, self.view.substr(right_region)))
if len(found_closers) > 0:
# if found, creating a new region, using the first match - the leftmost bracket found
closer = sublime.Region(right_region.begin() + found_closers[0].start(), right_region.begin() + found_closers[0].end())
return opener, closer
def insert_erb_block(self, edit, region):
# inserting the first block in the list
default_block = ERB_BLOCKS[0]
# inserting in reverse order because line length might change
self.view.insert(edit, region.end(), " %s" % default_block[1])
inserted_before = self.view.insert(edit, region.begin(), "%s " % default_block[0])
# returning a region, shifted by the number of inserted characters before the cursor
return sublime.Region(region.begin() + inserted_before, region.end() + inserted_before)
def replace_erb_block(self, edit, opener, closer, region):
# getting the next block in the list
next_block = self.get_next_erb_block(self.view.substr(opener), self.view.substr(closer))
# calculating by how many characters the selection will change
changed_before = len(next_block[0]) - len(self.view.substr(opener))
# replacing in reverse order because line length might change
self.view.replace(edit, closer, next_block[1])
self.view.replace(edit, opener, next_block[0])
# returning a region, shifted by the number of difference of characters changed
return sublime.Region(region.begin() + changed_before, region.end() + changed_before)
def get_next_erb_block(self, opening_bracket, closing_bracket):
for i, block in enumerate(ERB_BLOCKS):
if [opening_bracket, closing_bracket] == block:
# if outside of scope - returning the first block
if i + 1 >= len(ERB_BLOCKS):
return ERB_BLOCKS[0]
else:
return ERB_BLOCKS[i + 1]
# in case we haven't found the block in the list, returning the first one
return ERB_BLOCKS[0]
``` |
{
"source": "jose-raul-barreras/AppSecPipeline-Specification",
"score": 2
} |
#### File: tools/appspider/PyAppSpider.py
```python
import json
import requests
import requests.exceptions
import requests.packages.urllib3
from xml.etree import cElementTree as ET
#from . import __version__ as version
class PyAppSpider(object):
"""An API wrapper for AppSpider Enterprise.
https://appspider.help.rapid7.com/docs/rest-api-overview
"""
token = None
success = False
loginCode = 0
clients = None
def __init__(self, host, api_version='v1', verify_ssl=True, timeout=60, proxies=None, user_agent=None, cert=None, debug=False):
"""Initialize a AppSpider Enterprise API instance.
:param host: The URL for the AppSpider Enterprise server. (e.g., http://localhost:8000/AppSpider Enterprise/)
:param api_key: The API key generated on the AppSpider Enterprise API key page.
:param user: The user associated with the API key.
:param api_version: API version to call, the default is v1.
:param verify_ssl: Specify if API requests will verify the host's SSL certificate, defaults to true.
:param timeout: HTTP timeout in seconds, default is 30.
:param proxies: Proxy for API requests.
:param user_agent: HTTP user agent string, default is "AppSpider Enterprise_api/[version]".
:param cert: You can also specify a local cert to use as client side certificate, as a single file (containing
the private key and the certificate) or as a tuple of both file's path
:param debug: Prints requests and responses, useful for debugging.
"""
version = "0.2"
self.host = host + 'AppSpiderEnterprise/rest/' + api_version + '/'
self.api_version = api_version
self.verify_ssl = verify_ssl
self.proxies = proxies
self.timeout = timeout
if not user_agent:
self.user_agent = 'pyAppSpider_api/v' + version
else:
self.user_agent = user_agent
self.cert = cert
self.debug = debug # Prints request and response information.
token = None
if not self.verify_ssl:
requests.packages.urllib3.disable_warnings() # Disabling SSL warning messages if verification is disabled.
def authenticate(self, name, password, clientId=None):
"""Returns the AppSpider authentication token and/or client associated with the login. If the account is multi-client then AppSpider returns the list of clients associated with the account.
:param name: Userid of the appspider user
:param name: Password of the appspider user
:param name: ClientID in AppSpider
"""
params = {}
if clientId:
params['clientId'] = clientId
params['name'] = name
params['password'] = password
response = self._request('POST', 'Authentication/Login', data=params)
if response.success:
self.success = response.data["IsSuccess"]
if self.success:
self.token = response.data["Token"]
self.loginCode = 1 #Authenticated
elif response.data["Reason"] == "Invalid clientId":
self.clients = response.data["Clients"]
self.loginCode = 2 #Authenticated but need to select a client id
else:
#Connection error or bad login
self.success = False
return response
###### Helper Functions ######
def get_client_name(self, clientId):
"""Retrieves the client name from a client id
:param clientId: Client ID (guid)
"""
config = self.get_config(clientId)
return config.json()["Config"]["Name"]
def get_scan_status_text(self, statusId):
"""Retrieves the client name from a client id
:param clientId: Status ID (int)
"""
statusTxt = "Unknown Code: " + str(statusId)
if statusId == 32:
statusTxt = "Completed"
elif statusId == 72:
statusTxt = "Failed"
elif statusId == 80:
statusTxt = "Paused"
elif statusId == 82:
statusTxt = "Running"
elif statusId == 119:
statusTxt = "Vuln Load Failed"
elif statusId == 122:
statusTxt = "Stopping"
return statusTxt
def edit_scan_config_xml(self, xml_file, seed_urls, scope_constraints, custom_headers):
"""Adds xml elements for scanning url and includes
:param xml_file: Scanner config xml file
:param seed_urls: seed_url
:param scope_constraints: scope_constraints
"""
tree = ET.parse(xml_file)
xmlRoot = tree.getroot()
xml_node = xmlRoot.findall("CrawlConfig/SeedUrlList")
for elem in xmlRoot.iterfind('CrawlConfig/SeedUrlList'):
for seed_url in seed_urls:
seedUrl = ET.Element("SeedUrl")
elem.append(seedUrl)
value = ET.Element("Value")
value.text = seed_url['url']
seedUrl.append(value)
for elem in xmlRoot.iterfind('CrawlConfig/ScopeConstraintList'):
for scope_constraint in scope_constraints:
scope_constraintXML = ET.Element("ScopeConstraint")
elem.append(scope_constraintXML)
#URL
url = ET.Element("URL")
url.text = scope_constraint['url']
scope_constraintXML.append(url)
#Method
method = ET.Element("Method")
if 'method' in scope_constraint:
method.text = scope_constraint['method']
else:
method.text = "All"
scope_constraintXML.append(method)
#MatchCriteria
match_criteria = ET.Element("MatchCriteria")
if "match_criteria" in scope_constraint:
match_criteria.text = scope_constraint["match_criteria"]
else:
match_criteria.text = "Wildcard"
scope_constraintXML.append(match_criteria)
#Exclusion
include = ET.Element("Exclusion")
if "include" in scope_constraint:
include.text = scope_constraint["include"]
else:
include.text = "Include"
scope_constraintXML.append(include)
http_param = ET.Element("HttpParameterList")
scope_constraintXML.append(http_param)
#Add a customer header, like an API token
for elem in xmlRoot.iterfind('HTTPHeadersConfig/CustomHeadersList'):
for custom_header in custom_headers:
customHeaders = ET.Element("CustomHeaders")
elem.append(customHeaders)
value = ET.Element("Value")
value.text = custom_header["custom_header"]
customHeaders.append(value)
return ET.tostring(xmlRoot, method="xml")
#Saves a file from string
def save_file(self, data, filename):
success = None
#If the API can't find the file it returns a json object
if "IsSuccess" in data:
success = False
else:
file = open(filename,"wb")
file.write(data)
file.close
success = True
return success
###### Scan API #######
###### Scan Management ######
def get_scans(self):
"""Retrieves the list of scans.
"""
return self._request('GET', "Scan/GetScans")
def run_scan(self, configId=None, configName=None):
"""Starts a scan. At least one parameter should be provided to start a scan
:param configId: Scan config ID (guid)
:param configName: Scan config name
"""
params = {}
if configId:
params['configId'] = configId
if configName:
params['configName'] = configName
return self._request('POST', "Scan/RunScan/", data=params)
def cancel_scan(self, scanId):
"""Cancels "Starting" or "Waiting for Cloud" scan
:param scanId: Scan ID (guid)
"""
params = {}
params['scanId'] = scanId
return self._request('POST', "/Scan/CancelScan", data=params)
def pause_scan(self, scanId):
"""Pauses a running scan
:param scanId: Scan ID (guid)
"""
params = {}
params['scanId'] = scanId
return self._request('POST', "/Scan/PauseScan", data=params)
def pause_all_scans(self):
"""Pauses all running scans
"""
return self._request('POST', "/Scan/PauseAllScans")
def resume_scan(self, scanId):
"""Resumes a scan
:param scanId: Scan ID (guid)
"""
params = {}
params['scanId'] = scanId
return self._request('POST', "/Scan/ResumeScan", data=params)
def resume_all_scans(self):
"""Resumes all scans
"""
return self._request('POST', "/Scan/ResumeAllScans")
def stop_scan(self, scanId):
"""Stops a running scan
:param scanId: Scan ID (guid)
"""
params = {}
params['scanId'] = scanId
return self._request('POST', "/Scan/StopScan", data=params)
def stop_all_scans(self):
"""Stops all scans
"""
return self._request('POST', "/Scan/StopAllScans")
def get_scan_status(self, scanId):
"""Retrieves the scan status represented by a string
:param scanId: Scan ID (guid)
"""
params = {}
params['scanId'] = scanId
return self._request('GET', "Scan/GetScanStatus", params)
def is_scan_active(self, scanId):
"""Checks to see if the specified scan is active
:param scanId: Scan ID (guid)
"""
params = {}
params['scanId'] = scanId
return self._request('GET', "Scan/IsScanActive", params)
def is_scan_finished(self, scanId):
"""Checks to see if the specified scan is finished
:param scanId: Scan ID (guid)
"""
params = {}
params['scanId'] = scanId
return self._request('GET', "Scan/IsScanFinished", params)
def scan_has_report(self, scanId):
"""Checks to see if the specified scan has a report
:param scanId: Scan ID (guid)
"""
params = {}
params['scanId'] = scanId
return self._request('GET', "Scan/HasReport", params)
###### Finding API #######
def get_vulnerabilities(self):
"""Retrieves the list of vulnerabilities filtered by the specified parameters.
"""
return self._request('GET', "Finding/GetVulnerabilities")
###### Scan Engine Operations #######
def admin_get_engines(self):
"""Retrieves the list of scan engines.
"""
return self._request('GET', "Engine/GetEngines")
def admin_save_engine(self, url, virtualName, login, password, id=None, notes=None, doNotUpdate=None):
"""Creates or updates scan engine
:param id: if id not provided new engine will be created. if id provided engine update performed.
:param url: Scan engine URL. URL scheme should be {scheme}://{domain}/{path}/default.asmx
:param virtualName: Scan engine name
:param login: Scan engine username
:param notes: Notes
:param doNotUpdate: Do not update engine property
"""
params = {}
params['url'] = url
params['virtualName'] = virtualName
params['login'] = login
params['password'] = password
if id:
params['id'] = id
if notes:
params['notes'] = notes
if doNotUpdate:
params['doNotUpdate'] = doNotUpdate
return self._request('POST', "Engine/SaveEngine", params)
def admin_delete_engine(self, ids):
"""Scan engine IDs
:param ids: Scan Engine ID (guid)
"""
params['ids'] = ids
return self._request('POST', "Engine/DeleteEngine", params)
###### Scan Engine Operations #######
def admin_get_all_engine_groups(self):
"""Retrieves the list of scan engine groups. Note that System Administrator credentials are required to work with scan engines
"""
return self._request('GET', "EngineGroup/GetAllEngineGroups")
def admin_get_engine_groups_for_client(self):
"""Retrieves the list of scan engine groups for a client. Note that System Administrator credentials are required to work with scan engines
"""
return self._request('GET', "EngineGroup/GetEngineGroupsForClient")
def admin_save_engine_group(self, name, description=None, monitoring=None, id=None):
"""Creates or updates a scan engine group
:param id: If id not provided a new engine group will be created. If an id is provided then an engine group update is performed.
:param name: Scan engine group name. Name should be unique
:param description: Scan engine group description
:param monitoring: Scan engine group is monitoring
"""
params = {}
params['name'] = name
if id:
params['id'] = id
if description:
params['description'] = description
if monitoring:
params['monitoring'] = monitoring
return self._request('POST', "EngineGroup/SaveEngineGroup", data=params)
def admin_delete_engine_group(self, ids):
"""Deletes a scan engine group
:param ids: Scan engine group IDs (guid)
"""
params = {}
params['ids'] = ids
return self._request('POST', "EngineGroup/DeleteEngineGroup", data=params)
def admin_add_engine_to_group(self, groupId, engineId):
"""Adds a scan engine to a scan engine group
:param groupId: Scan engine group ID
:param engineId: Scan engine ID
"""
params = {}
params['groupId'] = groupId
params['engineId'] = engineId
return self._request('POST', "EngineGroup/AddEngineToGroup", data=params)
def admin_delete_engine_from_group(self, groupId, engineId):
"""Deletes scan engine from scan engine group
:param groupId: Scan engine group ID
:param engineId: Scan engine ID
"""
params = {}
params['groupId'] = groupId
params['engineId'] = engineId
return self._request('POST', "EngineGroup/DeleteEngineFromGroup", data=params)
###### Report Management #######
def import_standard_report(self, reportData, scanId=None, configId=None):
"""Creates a new scan in the scan history or updates the report for the specified scan
:param scanId: Update scan report if scanId provided and create new scan details if not
:param reportData: Report file
:param configId: Config id uploaded report attached to
"""
params = {}
params['reportData'] = reportData
if scanId:
params['scanId'] = scanId
if configId:
params['configId'] = configId
return self._request('POST', "Report/ImportStandardReport", data=params)
def import_checkmarx_report(self, scanId, file):
"""Creates a new scan in the scan history or updates the report for the specified scan
:param scanId: Scan ID
:param file: Checkmarx report XML file
"""
params = {}
params['scanId'] = scanId
params['file'] = file
return self._request('POST', "Report/ImportCheckmarxReport", data=params)
def get_vulnerabilities_summary(self, scanId):
"""Gets VulnerabilitiesSummary.xml for the scan. Only scans in "Completed" and "Stopped" states may have a report
:param scanId: Scan ID
"""
params = {}
params['scanId'] = scanId
return self._request('GET', "Report/GetVulnerabilitiesSummaryXml", params)
def get_report_zip(self, scanId):
"""Gets ReportAllFiles.zip for the scan. Only scans in "Completed" and "Stopped" states may have reports
:param scanId: Scan ID
"""
params = {}
params['scanId'] = scanId
return self._request('GET', "Report/GetReportZip", params)
def get_crawled_links(self, scanId):
"""Gets CrawledLinks.xml for the scan. Only scans in "Completed" and "Stopped" states may have a report
:param scanId: Scan ID
"""
params = {}
params['scanId'] = scanId
return self._request('GET', "Report/GetCrawledLinksXml", params)
###### Scan Configuration Operations #######
def save_config(self, xml, name, engineGroupId, clientId, id=None, defendEnabled=False, monitoring=False,
monitoringDelay=0, monitoringTriggerScan=False, isApproveRequired=False, seed_url=False, constraint_url=False,
seed_urls=False, scope_constraints=False, custom_headers=False):
"""Creates a new scan configuration
:param id: If id not provided new config will be created. If id provided config update performed.
:param xml: Scan config xml file. Config name should be unique in the client.
:param defendEnabled: AppSpider Defend enabled
:param monitoring: Monitoring scanning enabled
:param monitoringDelay: Delay between monitoring scans in hours. Possible values are 1 (hour), 24 (day), 168 (week), 720 (month)
:param monitoringTriggerScan: Monitoring scan triggers attack scan if changes found
:param name: Config name
:param engineGroupId: Engine group id for scan config
:param isApproveRequired: Approve required property
"""
params = {}
#Required Parameters
params['Name'] = name
params['EngineGroupId'] = engineGroupId
params['ClientId'] = clientId
#Not required parameters
params['Id'] = id
params['DefendEnabled'] = defendEnabled
params['Monitoring'] = monitoring
params['MonitoringDelay'] = monitoringDelay
params['MonitoringTriggerScan'] = monitoringTriggerScan
params['IsApproveRequired'] = isApproveRequired
#XML Scan Config Parameters
params['Xml'] = self.edit_scan_config_xml(xml, seed_urls, scope_constraints, custom_headers)
return self._request('POST', "Config/SaveConfig", files={'Config': (None,json.dumps(params))})
def get_configs(self):
"""Retrieves all scan configs for the client
"""
return self._request('GET', "Config/GetConfigs")
def get_config(self, id):
"""Retrieves scan config for the client
:param id: Scan config ID
"""
params = {}
params['id'] = id
return self._request('GET', "Config/GetConfig", params)
def get_attachment(self, configId, fileName, fileType):
"""Retrieves auxiliary files (such as macro, traffic recording, etc), referenced in the scan configuration
:param configId: Scan config ID
:param fileName: Name of requested file
:param fileType: File type. Values are: "Authentication", "Certificate", "Crawling", "Selenium", "Traffic", "Wsdl
"""
params = {}
params['configId'] = configId
params['fileName'] = fileName
params['fileType'] = fileType
return self._request('POST', "Config/GetAttachment", data=params)
###### Blackout Operations Operations #######
def get_blackouts(self):
"""Retrieves the blackout list for the client
"""
return self._request('GET', "Blackout/GetBlackouts")
def save_blackout(self, name, startTime, targetHost, id=None, stopTime=None, isRecurring=None, recurrence=None):
"""Creates or updates a blackout window
:param name: Blackout name. Name should be unique in the client
:param startTime: Date and time the blackout starts
:param targetHost: Name of host for the blackout
:param id: Blackout id. Update blackout if id provided and create new blackout if not provided
:param stopTime: Date and time the blackout ends
:param isRecurring: Marks the blackout as a reoccurring event
:param recurrence: Sets the recurrence frequency. See the section "Recurrences Explained" for more detail.
"""
params = {}
params['name'] = name
params['startTime'] = startTime
params['targetHost'] = targetHost
if id:
params['id'] = id
if stopTime:
params['stopTime'] = id
if isRecurring:
params['isRecurring'] = id
if recurrence:
params['recurrence'] = id
return self._request('POST', "Blackout/SaveBlackout", data=params)
def delete_blackouts(self, blackoutIds):
"""Removes a blackout window
:param blackoutIds: Scan config ID
"""
params = {}
params['blackoutIds'] = blackoutIds
return self._request('POST', "Blackout/DeleteBlackouts", data=params)
# Utility
@staticmethod
def _build_list_params(param_name, key, values):
"""Builds a list of POST parameters from a list or single value."""
params = {}
if hasattr(values, '__iter__'):
index = 0
for value in values:
params[str(param_name) + '[' + str(index) + '].' + str(key)] = str(value)
index += 1
else:
params[str(param_name) + '[0].' + str(key)] = str(values)
return params
def _request(self, method, url, params=None, data=None, files=None):
"""Common handler for all HTTP requests."""
if not params:
params = {}
if data:
data = json.dumps(data)
headers = {
'User-Agent': self.user_agent,
'Authorization': 'Basic ' + str(self.token)
}
if not files:
headers['Accept'] = 'application/json'
headers['Content-Type'] = 'application/json'
if self.proxies:
proxies=self.proxies
else:
proxies = {}
try:
if self.debug:
print(method + ' ' + url)
print(params)
response = requests.request(method=method, url=self.host + url, params=params, data=data, files=files, headers=headers,
timeout=self.timeout, verify=self.verify_ssl, cert=self.cert, proxies=proxies)
if self.debug:
print(response.status_code)
print(response.text)
try:
if response.status_code == 201: #Created new object
data = response.json()
return AppSpiderResponse(message="Upload complete", data=data, success=True)
elif response.status_code == 204: #Object updates
return AppSpiderResponse(message="Object updated.", success=True)
elif response.status_code == 404: #Object not created
return AppSpiderResponse(message="Object id does not exist.", success=False)
elif 'content-disposition' in response.headers:
data = response.content
return AppSpiderResponse(message="Success", data=data, success=True, response_code=response.status_code)
else:
data = response.json()
return AppSpiderResponse(message="Success", data=data, success=True, response_code=response.status_code)
except ValueError as e:
return AppSpiderResponse(message='JSON response could not be decoded. Detailed error: ' + str(e), success=False)
except requests.exceptions.SSLError as e:
return AppSpiderResponse(message='An SSL error occurred. Detailed error: ' + str(e), success=False)
except requests.exceptions.ConnectionError as e:
return AppSpiderResponse(message=str(e) + 'A connection error occurred. Detailed error: ' + str(e), success=False)
except requests.exceptions.Timeout as e:
return AppSpiderResponse(message='The request timed out after ' + str(self.timeout) + ' seconds.',
success=False)
except requests.exceptions.RequestException as e:
return AppSpiderResponse(message='There was an error while handling the request. Detailed error: ' + str(e), success=False)
class AppSpiderResponse(object):
"""
Container for all AppSpider Enterprise API responses, even errors.
"""
def __init__(self, message, success, data=None, response_code=-1):
self.message = message
self.data = data
self.success = success
self.response_code = response_code
def __str__(self):
if self.data:
return str(self.data)
else:
return self.message
def binary(self):
return self.data
def json(self):
return self.data
def id(self):
if self.response_code == 400: #Bad Request
raise ValueError('Object not created:' + json.dumps(self.data, sort_keys=True, indent=4, separators=(',', ': ')))
return int(self.data)
def count(self):
return self.data["TotalCount"]
def is_success(self):
data = None
try:
data = self.data["IsSuccess"]
except:
data = self.data
return data
def error(self):
errorMessage = self.message
if self.data is not None:
if "ErrorMessage" in self.data:
self.data["ErrorMessage"]
return errorMessage
def data_json(self, pretty=False):
"""Returns the data as a valid JSON string."""
if pretty:
return json.dumps(self.data, sort_keys=True, indent=4, separators=(',', ': '))
else:
return json.dumps(self.data)
```
#### File: tools/checkmarx/PyCheckmarx.py
```python
from suds.client import Client
from suds.sudsobject import asdict
from suds.cache import NoCache
import base64
import re
import json
import time
from zipfile import ZipFile
import os
import uuid
import ssl
class PyCheckmarx(object):
# Internal Variables for the Class
DEBUG = False
configPath = "config/"
errorLog = []
ttlReport = 900
timeWaitReport = 60
ssl._create_default_https_context = ssl._create_unverified_context
#
# Init Function
#
def __init__(self, username, password, url):
# Get Configuration
self.getConfig(username, password, url)
# Open Connection With Checkmarx
self.Initclient = self.openConnection()
# Get the Service URL
self.serviceUrl = self.getServiceUrl(self.Initclient)
# Get the Session Id and Client Object
(self.sessionId, self.client) = self.getSessionId(self.Initclient,self.serviceUrl)
return None
##########################################
#
# Functions Related to Opening session with Checkmarx
#
##########################################
#
# Get Configuration
#
def getConfig(self, username, password, url):
self.USERNAME = username
self.PASSWORD = password
self.URL = str(url + "Cxwebinterface/CxWsResolver.asmx?wsdl")
self.cxURL = str(url)
self.APITYPE = 1
self.baseProject = None
#
# Open Connection
#
def openConnection(self):
try:
#proxy_settings = dict(http='http://localhost:8081')
#tmpClient = Client(self.URL, timeout=1200, proxy=proxy_settings)
tmpClient = Client(self.URL, timeout=1200)
if self.DEBUG:
print dir(tmpClient)
return tmpClient
except Exception as e:
raise Exception("Unable to establish connection with WSDL [%s]: %s " % (self.URL, e.message))
#
# Get Service URL
#
def getServiceUrl(self, client):
try:
CxClient = client.factory.create('CxClientType')
responseDiscovery = client.service.GetWebServiceUrl(CxClient.Jenkins,self.APITYPE)
if responseDiscovery.IsSuccesfull:
serviceUrl = responseDiscovery.ServiceURL
print "Checkmarx Service URL: " + serviceUrl
else:
raise Exception("Error establishing connection > %s" % cxSDK.ErrorMessage)
if self.DEBUG:
print "Response Discovery Object:", dir(responseDiscovery)
print "Service Url:", serviceUrl
return serviceUrl
except Exception as e:
raise Exception("Unable to get Service URL: %s" % e.message)
#
# Login in Checkmarx and retrive the Session ID
#
def getSessionId(self,client, serviceUrl):
try:
#proxy_settings = dict(http='http://localhost:8081')
#clientSDK = Client(serviceUrl + "?wsdl", cache=NoCache(), timeout=1200, proxy=proxy_settings)
clientSDK = Client(serviceUrl + "?wsdl", timeout=1200, cache=NoCache())
CxLogin = clientSDK.factory.create("Credentials")
CxLogin.User = self.USERNAME
CxLogin.Pass = <PASSWORD>
cxSDK = clientSDK.service.Login(CxLogin,1033)
if not cxSDK.IsSuccesfull:
raise Exception("Unable to Login > %s" % cxSDK.ErrorMessage)
if self.DEBUG:
print "Service Object:", dir(client)
print "Login Object:", dir(cxSDK)
print "Session ID:", cxSDK.SessionId
return (cxSDK.SessionId, clientSDK)
except Exception as e:
raise Exception("Unable to get SessionId from [%s] : %s" % (serviceUrl,e.message))
##########################################
#
# Functions Related to the functionality of the WSDL
#
##########################################
#
# Create a scan job
#
def scanProject(self, ProjectName, ServerName, SSHFilePath, PresetID=0, GITBranch="master"):
#Project Settings
ProjectSettings = self.client.factory.create("ProjectSettings")
ProjectSettings.ProjectName = ProjectName
ProjectSettings.PresetID = PresetID
ProjectSettings.projectID = 0
ProjectSettings.ScanConfigurationID = 1
ProjectSettings.IsPublic = "false"
del ProjectSettings.OpenSourceAnalysisOrigin
#Client Scan Arguements
CliScanArgs = self.client.factory.create("CliScanArgs")
CliScanArgs.IsPrivateScan = "false"
CliScanArgs.IsIncremental = "false"
CliScanArgs.IgnoreScanWithUnchangedCode = "true"
del CliScanArgs.ClientOrigin
#Scan Settings
SourceCodeSettings = self.client.factory.create("SourceCodeSettings")
SourceCodeSettings.SourceOrigin = "SourceControl"
SourceCodeSettings.SourceControlSetting.Port = "0"
SourceCodeSettings.SourceControlSetting.UseSSL = "false"
SourceCodeSettings.SourceControlSetting.UseSSH = "true"
SourceCodeSettings.SourceControlSetting.ServerName = ServerName
SourceCodeSettings.SourceControlSetting.Repository = "GIT"
SourceCodeSettings.SourceControlSetting.Protocol = "SSH"
SourceCodeSettings.SourceControlSetting.GITBranch = GITBranch
SourceCodeSettings.SourceControlSetting.SSHPublicKey = "EmptyStab"
#Load the ssh key
file = open(SSHFilePath, "r")
SourceCodeSettings.SourceControlSetting.SSHPrivateKey = file.read()
#Remove "extra" unecessary elements
del SourceCodeSettings.SourceControlSetting.PerforceBrowsingMode
del SourceCodeSettings.SourceControlSetting.GitLsViewType
#Set the client scanning arguments
CliScanArgs.PrjSettings = ProjectSettings
CliScanArgs.SrcCodeSettings = SourceCodeSettings
tmp = self.client.service.Scan(self.sessionId, CliScanArgs)
if not tmp.IsSuccesfull:
raise Exception("Unable to get data from the server.")
if self.DEBUG:
print dir(tmp)
return tmp
def get_directory(self, directory):
file_paths = []
for root, directories, files in os.walk(directory):
for filename in files:
filepath = os.path.join(root, filename)
file_paths.append(filepath)
return file_paths
def scanExistingProject(self, ProjectId, directory, incremental=True):
config = self.client.service.GetProjectConfiguration(self.sessionId, ProjectId)
localCodeContainer = self.client.factory.create("LocalCodeContainer")
tempZip = "/tmp/" + str(uuid.uuid4()) + ".zip"
file_paths = self.get_directory(directory)
print "Zipping"
with ZipFile(tempZip,'w') as zip:
for file in file_paths:
if ".git" not in file:
filename, file_extension = os.path.splitext(file)
try:
#Skip image files and static stuff
if file_extension != ".a" and file_extension != ".framework" and file_extension != ".png" and file_extension != ".jpg" and file_extension != ".gif" and file_extension != ".ttf" and file_extension != ".bin" and file_extension != ".exe" and file_extension != ".so" and file_extension != ".jar" and file_extension != ".pdf":
zip.write(file)
except:
print "File skipped: " + file
srcCode = open(tempZip, 'rb')
srcCodeInput = srcCode.read()
localCodeContainer.ZippedFile = base64.encodestring(srcCodeInput)
localCodeContainer.FileName = str(uuid.uuid4()) + ".zip"
os.remove(tempZip)
if incremental:
RunScanAndAddToProject = self.client.factory.create("RunIncrementalScan")
RunScanAndAddToProject.visibleToUtherUsers = True
RunScanAndAddToProject.isPublicScan = True
tmp = self.client.service.RunIncrementalScan(self.sessionId, config.ProjectConfig.ProjectSettings,localCodeContainer,RunScanAndAddToProject.visibleToUtherUsers, RunScanAndAddToProject.isPublicScan)
else:
RunScanAndAddToProject = self.client.factory.create("RunScanAndAddToProject")
RunScanAndAddToProject.visibleToUtherUsers = True
RunScanAndAddToProject.isPublicScan = True
tmp = self.client.service.RunScanAndAddToProject(self.sessionId, config.ProjectConfig.ProjectSettings,localCodeContainer,RunScanAndAddToProject.visibleToUtherUsers, RunScanAndAddToProject.isPublicScan)
if not tmp.IsSuccesfull:
raise Exception("Unable to get data from the server.")
if self.DEBUG:
print dir(tmp)
return tmp.RunId
def getStatusOfSingleScan(self, RunId):
ScanId = None
Message = None
inc = 0
while inc < self.ttlReport:
inc += 1
try:
status = self.client.service.GetStatusOfSingleScan(self.sessionId, RunId)
if status.CurrentStatus == "Finished":
ScanId = status.ScanId
Message = "Success"
break
elif status.CurrentStatus == "Failed" or status.CurrentStatus == "Unknown":
if "full scan should be submitted" in status.StageMessage:
Message = "FullScan"
else:
Message = "Unkown"
break
except Exception as e:
print e
print "Waiting for Checkmarx to complete."
time.sleep(self.timeWaitReport)
if self.DEBUG:
print dir(status)
return ScanId, Message
#
# Get Suppressed Issues
#
def getXMLReport(self, scanID, fileName):
CxWSReportType = self.client.factory.create("CxWSReportType")
CxReportRequest = self.client.factory.create("CxWSReportRequest")
CxReportRequest.ScanID = scanID
CxReportRequest.Type = CxWSReportType.XML
createReportResponse = self.client.service.CreateScanReport(self.sessionId, CxReportRequest)
if createReportResponse.IsSuccesfull:
if self.DEBUG:
print createReportResponse
print "Success. Creating Get Scan Report Status"
inc = 0
while inc < self.ttlReport:
inc += 1
reportStatusResponse = self.client.service.GetScanReportStatus(self.sessionId, createReportResponse.ID)
if reportStatusResponse.IsSuccesfull and reportStatusResponse.IsReady:
break
if self.DEBUG:
print "fail"
time.sleep(self.timeWaitReport)
if self.DEBUG:
print "Sucess. Creating Get Scan Report"
responseScanResults = self.client.service.GetScanReport(self.sessionId, createReportResponse.ID )
if responseScanResults.IsSuccesfull and responseScanResults.ScanResults:
XMLData = base64.b64decode(responseScanResults.ScanResults)
fileObj = open(fileName,"w+")
fileObj.write(XMLData)
fileObj.close()
#
# Get data from the Projects
#
def getProjectScannedDisplayData(self, filterOn=False):
tmp = self.client.service.GetProjectScannedDisplayData(self.sessionId)
if not tmp.IsSuccesfull:
raise Exception("Unable to get data from the server.")
if self.DEBUG:
print dir(tmp)
if not filterOn:
return self.convertToJson(tmp)
else:
return tmp.ProjectScannedList[0]
#
# Get Project Display Data
#
def getProjectsDisplayData(self, filterOn=False):
tmp = self.client.service.GetProjectsDisplayData(self.sessionId)
if not tmp.IsSuccesfull:
raise Exception("Unable to get data from the server.")
if self.DEBUG:
print dir(tmp)
if not filterOn:
return self.convertToJson(tmp)
else:
return tmp.projectList[0]
#
# Get Scan Info For All Projects
#
def getScanInfoForAllProjects(self, filterOn=False):
tmp = self.client.service.GetScansDisplayDataForAllProjects(self.sessionId)
if not tmp.IsSuccesfull:
raise Exception("Unable to get data from the server.")
if self.DEBUG:
print dir(tmp)
if not filterOn:
return self.convertToJson(tmp)
else:
return tmp
#
# Get Preset List
#
def getPresetList(self):
tmp = self.client.service.GetPresetList(self.sessionId)
if not tmp.IsSuccesfull:
raise Exception("Unable to get data from the server.")
if self.DEBUG:
print dir(tmp)
return self.convertToJson(tmp)
#
# Get Configuration List
#
def getConfigurationList(self):
tmp = self.client.service.GetConfigurationSetList(self.sessionId)
if not tmp.IsSuccesfull:
raise Exception("Unable to get data from the server.")
if self.DEBUG:
print dir(tmp)
return self.convertToJson(tmp)
#
# Get Associated Groups List
#
def getAssociatedGroups(self):
tmp = self.client.service.GetAssociatedGroupsList(self.sessionId)
if not tmp.IsSuccesfull:
raise Exception("Unable to get data from the server.")
if self.DEBUG:
print dir(tmp)
return self.convertToJson(tmp)
#
# Filter For [getProjectScannedDisplayData]
#
def filterProjectScannedDisplayData(self, projectID):
tmpProjects = self.getProjectScannedDisplayData(True)
for project in tmpProjects:
if project.ProjectID == projectID:
return self.convertToJson(project)
raise Exception("Could not find ProjectID: %s " % projectID)
#
# Filter for [getProjectsDisplayData]
#
def filterProjectsDisplayData(self,projectID):
tmpProjects = self.getProjectsDisplayData(True)
for project in tmpProjects:
if project.projectID == projectID:
return self.convertToJson(project)
raise Exception("Could not find ProjectID: %s " % projectID)
#
# Filter for [getScanInfoForAllProjects]
#
def filterScanInfoForAllProjects(self,projectID):
tmpProjects = self.getScanInfoForAllProjects(True).ScanList[0]
for project in tmpProjects:
if project.ProjectId == projectID:
return self.convertToJson(project)
raise Exception("Could not find ProjectID: %s " % projectID)
#
# Get Suppressed Issues
#
def getSupressedIssues(self, scanID):
CxWSReportType = self.client.factory.create("CxWSReportType")
CxReportRequest = self.client.factory.create("CxWSReportRequest")
CxReportRequest.ScanID = scanID
CxReportRequest.Type = CxWSReportType.XML
createReportResponse = self.client.service.CreateScanReport(self.sessionId, CxReportRequest)
print createReportResponse
if createReportResponse.IsSuccesfull:
if self.DEBUG:
print createReportResponse
print "Success. Creating Get Scan Report Status"
inc = 0
while inc < self.ttlReport:
inc += 1
reportStatusResponse = self.client.service.GetScanReportStatus(self.sessionId, createReportResponse.ID)
if reportStatusResponse.IsSuccesfull and reportStatusResponse.IsReady:
break
if self.DEBUG:
print "fail"
time.sleep(self.timeWaitReport)
if self.DEBUG:
print "Sucess. Creating Get Scan Report"
responseScanResults = self.client.service.GetScanReport(self.sessionId, createReportResponse.ID )
if responseScanResults.IsSuccesfull and responseScanResults.ScanResults:
XMLData = base64.b64decode(responseScanResults.ScanResults)
print XMLData
issues = re.findall('FalsePositive="([a-zA-Z]+)" Severity="([a-zA-Z]+)"', XMLData)
if self.DEBUG:
print responseScanResults
print issues
mediumSupressIssues = 0
lowSupressIssues = 0
highSupressIssues = 0
otherSupressIssues = 0
for a,b in issues:
if a == "True":
if b == "Medium":
mediumSupressIssues += 1
elif b == "High":
highSupressIssues += 1
elif b == "Low":
lowSupressIssues += 1
else:
otherSupressIssues += 1
if self.DEBUG:
print highSupressIssues
print mediumSupressIssues
print lowSupressIssues
return {"highSupressIssues": highSupressIssues, "mediumSupressIssues": mediumSupressIssues, "lowSupressIssues": lowSupressIssues}
else:
raise Exception("Unable to Get Report")
else:
raise Exception("Unable to get Supressed")
#
# Convert Suds object into serializable format.
#
def recursive_asdict(self,d):
out = {}
for k, v in asdict(d).iteritems():
if hasattr(v, '__keylist__'):
out[k] = self.recursive_asdict(v)
elif isinstance(v, list):
out[k] = []
for item in v:
if hasattr(item, '__keylist__'):
out[k].append(self.recursive_asdict(item))
else:
out[k].append(item)
else:
out[k] = v
return out
#
# Return Subs Object into Serializable format Handler
#
def convertToJson(self, data):
try:
tmp = self.recursive_asdict(data)
return json.dumps(tmp)
except Exception as e:
raise Exception("Unable to convert to JSON: %s" % e.message)
``` |
{
"source": "jose-raul-barreras/letscode",
"score": 4
} |
#### File: jose-raul-barreras/letscode/matrixElementsSum.py
```python
def matrixElementsSum(matrix):
"""
https://codefights.com/arcade/intro/level-2/xskq4ZxLyqQMCLshr
After becoming famous, CodeBots decided to move to a new building and live
together. The building is represented by a rectangular matrix of rooms,
each cell containing an integer - the price of the room. Some rooms are
free (their cost is 0), but that's probably because they are haunted, so
all the bots are afraid of them. That is why any room that is free or is
located anywhere below a free room in the same column is not considered
suitable for the bots.
Help the bots calculate the total price of all the rooms that are suitable
for them.
Example
matrix = [[0, 1, 1, 2],
[0, 5, 0, 0],
[2, 0, 3, 3]]
the output should be matrixElementsSum(matrix) = 9.
Here's the rooms matrix with unsuitable rooms marked with 'x':
[[x, 1, 1, 2],
[x, 5, x, x],
[x, x, x, x]]
Thus, the answer is 1 + 5 + 1 + 2 = 9.
>>> matrix = [[0, 1, 1, 2], [0, 5, 0, 0], [2, 0, 3, 3]]
>>> matrixElementsSum(matrix)
9
>>> matrix = [[1,1,1,0], [0,5,0,1], [2,1,3,10]]
>>> matrixElementsSum(matrix)
9
>>> matrix = [[1,1,1], [2,2,2], [3,3,3]]
>>> matrixElementsSum(matrix)
18
>>> matrix = [[0]]
>>> matrixElementsSum(matrix)
0
"""
count = 0
fact = [1 for i in range(len(matrix[0]))]
for i in range(len(matrix)):
for j in range(len(matrix[i])):
if matrix[i][j] == 0:
fact[j] = 0
count += matrix[i][j]*fact[j]
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
``` |
{
"source": "joseraulgallardo97/spyre",
"score": 3
} |
#### File: spyre/examples/simple_app_example.py
```python
from spyre import server
class SimpleApp(server.App):
title = "Simple App"
inputs = [{
"type": "text",
"key": "words",
"label": "write words here",
"value": "hello world",
"action_id": "simple_html_output"
}]
outputs = [{
"type": "html",
"id": "simple_html_output"
}]
def getHTML(self, params):
words = params["words"]
return "Here's what you wrote in the textbox: <b>%s</b>" % words
app = SimpleApp()
app.launch()
```
#### File: tutorial/quickstart/connections.py
```python
from spyre import server
import matplotlib.image as mpimg
class InputExample(server.Launch):
title = "Connections"
inputs = [{
"input_type": 'radiobuttons',
"options": [
{"label": "Simple App", "value": 1, "checked": True},
{"label": "Multiple Outputs", "value": 2},
{"label": "Inputs with actions 1", "value": 3},
{"label": "Inputs with actions 2", "value": 4},
],
"variable_name": 'slide_selector',
"action_id": "image_output",
}]
controls = [{
"control_type": "hidden",
"control_id": "button1",
"label": "Button",
}]
outputs = [{
"output_type": "image",
"output_id": "image_output",
"control_id": "button1",
"on_page_load": True,
}]
def getImage(self, params):
slide_selector = int(params['slide_selector'])
absolute_path = ''
if slide_selector == 2:
img = mpimg.imread(absolute_path + 'slide2.png')
elif slide_selector == 3:
img = mpimg.imread(absolute_path + 'slide3.png')
elif slide_selector == 4:
img = mpimg.imread(absolute_path + 'slide4.png')
else:
img = mpimg.imread(absolute_path + 'slide1.png')
return img
def noOutput(self, input_params):
pass
app = InputExample()
app.launch(port=9096)
``` |
{
"source": "joserc87/project-euler",
"score": 4
} |
#### File: project-euler/031/main.py
```python
def posibilidades (tiposMonedas, cantidad, under=200):
if cantidad==0:
return 1
elif cantidad==1:
return 1
numPosibilidades=0
for m in tiposMonedas:
if m <= cantidad and m <= under:
numPosibilidades+=posibilidades (tiposMonedas, cantidad-m, m)
return numPosibilidades
numPosibilidades=posibilidades ([200, 100, 50, 20, 10, 5, 2, 1], 200)
print numPosibilidades
```
#### File: project-euler/033/main.py
```python
import math
""" \brief Calcula tantos primos como se indique y los guarda en un
vector
\param primos Un vector de primos
\param hasta El mayor número que queremos comprobar si es primo. Si
hasta es primo, se añadirá al vector
"""
def calcularPrimos (primos, hasta):
cambia=False
i=0L
if len(primos)==0:
primos.append (2L)
cambia=True
i=3L
else:
i=primos [-1]+2L
while i<=hasta:
esPrimo=True
raiz=math.sqrt(i)//1
for iter in primos:
if iter > raiz:
break;
elif i%iter==0:
esPrimo=False
if esPrimo:
primos.append (i)
cambia=True
i+=2
return cambia
####################################################################################################
"""
\brief Calcula los divisores primos de un número. Hace uso de calcularPrimos
\param n El número base
\param primos Una lista de primos, posiblemente incompleta
"""
def divisoresPrimos (n, primos):
divisores=[]
cambia=True
while cambia and n!=1:
for iter in primos:
if n==1:
break
while n%iter==0:
divisores.append (iter)
n/=iter
cambia=calcularPrimos (primos, n)
return divisores
####################################################################################################
"""
\brief Calcula los divisores primos de un número. Hace uso de calcularPrimos
\param n El número base
\param primos Una lista de primos, posiblemente incompleta
"""
def descomposicion (n, primos):
divisores=[]
cambia=True
while cambia and n!=1:
for iter in primos:
if n==1:
break
numNs=0;
while n%iter==0:
numNs+=1
n/=iter
if numNs>0:
divisores.append ((iter,numNs))
cambia=calcularPrimos (primos, n*2)
return divisores
####################################################################################################
"""
\brief Indica si dos números son iguales (listas)
"""
def iguales (listaA, listaB):
sonIguales=True
if len (listaA) != len (listaB):
sonIguales=False
else:
for a in listaA:
if not a in listaB:
sonIguales=False
break;
return sonIguales
####################################################################################################
"""
\brief Eleva un número a una potencia
"""
def elevar (lis, e):
nuevaLista=[]
for l in lis:
tup=(l[0],l[1]*e)
nuevaLista.append (tup);
return nuevaLista
####################################################################################################
def findMaxCommDiv (num, den):
"reducir la fracción:"
divisores = range(num, 1, -1)
for divisor in divisores:
if num%divisor==0 and den%divisor==0:
return divisor;
return 1;
####################################################################################################
"""
\brief Comprueba que dos numeros comparten al menos un dígito común
\param a El primer número, entre 10 y 98
\param b El segundo número, entre a+1 y 99
"""
def isCurious (a, b):
a1 = a//10
a2 = a%10
b1 = b//10
b2 = b%10
return (a2 != 0 or b2 != 0) and ((a1 == b1 and (1.0*b2*a)/b == a2) or \
(a1 == b2 and (1.0*b1*a)/b == a2) or \
(a2 == b1 and (1.0*b2*a)/b == a1) or \
(a2 == b2 and (1.0*b1*a)/b == a1));
prodNumerador = 1
prodDenominador = 1
numeradores = range (11, 99)
for numerador in numeradores:
denominadores = range (numerador+1, 100)
for denominador in denominadores:
if (isCurious(numerador, denominador)):
print 'La fraccion ' + repr(numerador) + '/' + repr(denominador) + ' es curiosa'
prodNumerador *= numerador
prodDenominador *= denominador
divisor = findMaxCommDiv(prodNumerador, prodDenominador)
prodNumerador /= divisor
prodDenominador /= divisor;
"reducir la fracción:"
print 'La solucion es ' + repr(prodDenominador)
```
#### File: joserc87/project-euler/037.py
```python
from util import PrimeFactory
def find_truncable_primes(n):
"""
The numbers will start with [2, 3, 5, 7] and end with [3, 7] (if it ended in
2 or 5 it would be multilpe of 2 or 5).
We can build backwards:
- primes that end in [3, 7]: [13, 17, 23, 37, 43, 47, 53, 67, 73, 83,
97]
- primes that start with [2, 3, 5, 7]: [23, 29, 31, 37, 53, 59, 71, 73,
79]
- s would be [23, 37, 53, 73]
- primes that end in [13, 17, 23, 37, 43, 47, 53, 67, 73, 83, 97]: [233]
"""
s = []
prime_factory = PrimeFactory()
r = [7, 3] # Right, endings
l = [2, 3, 5, 7] # Left, starters
n = 2
while True:
prime_factory._calc_primes(10**n)
r = [p for p in prime_factory.primes if (p % 10**(n-1)) in r and
p < 10**n and p >= 10**(n-1)]
l = [p for p in prime_factory.primes if (p // 10) in l and
p < 10**n and p >= 10**(n-1)]
v = [x for x in r if x in l]
s += v
if l == [] or r == []:
break
# We are told that there are 11 primes
if len(s) == 11:
break
n += 1
return s
def solve(n):
return sum(find_truncable_primes(n))
if __name__ == "__main__":
print(solve(1000000))
```
#### File: joserc87/project-euler/039.py
```python
def possible_right_triangles(max_perimeter):
"""
Returns the possible triangles for each perimeter (list of list of tuples)
"""
triangles = [[] for _ in range(max_perimeter+1)]
squares = [i*i for i in range(max_perimeter//2 + 1)]
# Find the triangles (where b>=a)
for b, b2 in enumerate(squares):
for a, a2 in enumerate(squares):
if a > b:
break
if a*b != 0:
c2 = a2 + b2
if c2 in squares:
c = squares.index(c2)
perimeter = a + b + c
if perimeter <= max_perimeter:
triangles[perimeter] += [(a, b, c)]
return triangles
def perimeter_with_more_triangles(max_perimeter):
triangles = possible_right_triangles(max_perimeter)
lens = [len(l) for l in triangles]
return lens.index(max(lens))
if __name__ == '__main__':
triangles = possible_right_triangles(1000)
print(perimeter_with_more_triangles(1000))
```
#### File: joserc87/project-euler/045.py
```python
class GeometryHelper(object):
"""
Helper class to calculate triangle, pentagonal or hexagonal numbers. We can
use the brackets operator (TPH[X]) as well as the contains operator (X in
TPH).
"""
def __init__(self):
# Helper[0] == 0 to make our lifes easier, as the numbers start from 1
self.numbers = [0]
def formulae(self, n):
"""Returns the n-th number (n>=1)"""
return 0
def _build(self, last):
next_item = len(self.numbers)
self.numbers += [self.formulae(n) for n in range(next_item, last+1)]
def _build_until_number(self, last_p):
next_item = len(self.numbers)
while self.numbers[-1] < last_p:
self.numbers.append(self.formulae(next_item))
next_item += 1
def __contains__(self, p):
self._build_until_number(p)
return p in self.numbers
def __getitem__(self, n):
if n >= len(self.numbers):
self._build(n)
return self.numbers[n]
class TriangleHelper(GeometryHelper):
def formulae(self, n):
"""Returns the n-th triangle number (n>1)"""
return (n*(n+1))//2
class PentagonalHelper(GeometryHelper):
def formulae(self, n):
"""Returns the n-th triangle number (n>1)"""
return (n*(3*n-1))//2
class HexagonalHelper(GeometryHelper):
def formulae(self, n):
"""Returns the n-th triangle number (n>1)"""
return n*(2*n-1)
# Test
T = TriangleHelper()
P = PentagonalHelper()
H = HexagonalHelper()
assert [T[i] for i in range(1, 6)] == [1, 3, 6, 10, 15]
assert [P[i] for i in range(1, 6)] == [1, 5, 12, 22, 35]
assert [H[i] for i in range(1, 6)] == [1, 6, 15, 28, 45]
assert 15 in T
assert 16 not in T
assert 35 in P
assert 36 not in P
assert 45 in H
assert 46 not in H
def main():
# Simply iterate over the triangle numbers and check if the result number is
# also a pentagonal and hexagonal number
# We already know that T[285] = P[165] = H[143] = 40755:
assert T[285] == P[165] == H[143] == 40755
assert 40755 in T
assert 40755 in P
assert 40755 in H
# So we can start from 286
n = 286
while True:
t = T[n]
if t in P and t in H:
return t
n += 1
if __name__ == '__main__':
print(main())
```
#### File: joserc87/project-euler/046.py
```python
from util import PrimeFactory
primes = PrimeFactory()
# Array with the squares, for efficiency
squares = [0]
def get_squares_until(n):
i = len(squares)
while squares[-1] < n:
squares.append(i*i)
i += 1
return [s for s in squares if s < n]
def find_goldbach(n):
'''Finds the prime p and the square s so that n == p + 2*s'''
for square in get_squares_until(n):
p = (n - square*2)
if p in primes:
return (p, square)
# Goldbach conjeture is false!
return None
def main():
n = 9
while True:
if n not in primes:
if find_goldbach(n) is None:
return n
n += 2
if __name__ == '__main__':
print(main())
```
#### File: joserc87/project-euler/047.py
```python
from collections import deque
from util import PrimeFactory
def distinctive_factors(factors):
for i, factors1 in enumerate(factors):
for factors2 in [f for j, f in enumerate(factors) if j > i]:
for f in factors1:
if f in factors2:
return False
return True
def find_n_cons_nums(n):
"""Finds the first n consecutive numbers that have n disctintive prime
factors"""
primes = PrimeFactory()
# First number that can be decomposed is 4
i = 4
factors = deque([primes.get_prime_factors(f) for f in range(i, i+n)])
while sum([0 if len(f) == n else 1 for f in factors]) != 0 or \
not distinctive_factors(factors):
factors.rotate()
i += 1
factors[-1] = primes.get_prime_factors(i + n - 1)
return i
def main():
assert find_n_cons_nums(2) == 14
assert find_n_cons_nums(3) == 644
print(find_n_cons_nums(4))
if __name__ == '__main__':
main()
```
#### File: project-euler/test/primes_test.py
```python
import unittest
from util import PrimeFactory
class TestPrimeFactory(unittest.TestCase):
def test_calc_primes(self):
sut = PrimeFactory()
sut._calc_primes(20)
self.assertEqual(sut.last_number_checked, 21)
self.assertEqual(sut.primes, [2, 3, 5, 7, 11, 13, 17, 19])
def test_is_prime(self):
primes = PrimeFactory()
self.assertTrue(primes.is_prime(2))
self.assertTrue(primes.is_prime(3))
self.assertTrue(primes.is_prime(5))
self.assertTrue(primes.is_prime(7))
self.assertTrue(primes.is_prime(11))
self.assertTrue(primes.is_prime(13))
self.assertTrue(primes.is_prime(17))
def test_contains(self):
primes = PrimeFactory()
self.assertTrue(2 in primes)
self.assertTrue(3 in primes)
self.assertTrue(5 in primes)
self.assertTrue(7 in primes)
self.assertTrue(11 in primes)
self.assertTrue(13 in primes)
self.assertTrue(17 in primes)
def test_get_prime_factors(self):
primes = PrimeFactory()
self.assertEquals([(2, 2), (7, 1), (23, 1)],
primes.get_prime_factors(644))
self.assertEquals([(3, 1), (5, 1), (43, 1)],
primes.get_prime_factors(645))
self.assertEquals([(2, 1), (17, 1), (19, 1)],
primes.get_prime_factors(646))
if __name__ == '__main__':
unittest.main()
```
#### File: project-euler/test/util_test.py
```python
import unittest
from util import get_digits, get_int_from_digits, is_palindromic, get_rotations
class TestDigitMethods(unittest.TestCase):
def test_get_digits(self):
self.assertEqual(
get_digits(1232, 10),
[1, 2, 3, 2])
self.assertEqual(
get_digits(0b10010101, 2),
[1, 0, 0, 1, 0, 1, 0, 1])
def test_get_int_from_digits(self):
self.assertEqual(
get_int_from_digits([4, 5, 3, 9, 0]),
45390)
self.assertEqual(
get_int_from_digits([1, 0, 1, 1, 1, 0, 0], 2),
0b1011100)
def test_is_palidromic(self):
self.assertTrue(is_palindromic(1234321, 10))
self.assertTrue(is_palindromic(123321, 10))
self.assertTrue(is_palindromic(0b101101101, 2))
self.assertTrue(is_palindromic(0b10111101, 2))
def test_get_rotations(self):
self.assertEqual(
get_rotations(1234),
[1234, 4123, 3412, 2341])
self.assertEqual(
get_rotations(0b10110011, 2),
[0b10110011, 0b11011001, 0b11101100, 0b01110110,
0b00111011, 0b10011101, 0b11001110, 0b01100111])
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "josercruz01/jchat",
"score": 3
} |
#### File: josercruz01/jchat/jchat.py
```python
import json
import os
from peewee import *
db = SqliteDatabase(os.environ.get('JCHAT_DATABASE') or "test.db")
# Constants
MSG_TYPE_SITE = "status"
MSG_TYPE_MESSAGE = "message"
STATUS_TYPE_ONLINE = "online"
STATUS_TYPE_OFFLINE = "offline"
# Base Database Model.
class BaseModel(Model):
class Meta:
database = db
# Represents a message.
class Message(BaseModel):
message_id = CharField(unique=True)
content = TextField()
last_updated = IntegerField()
# Represents a cache of all messages sent.
class MessageCache(BaseModel):
message_id = CharField(unique=True)
last_updated = IntegerField()
# Represents a site.
class Site(BaseModel):
site_id = CharField(unique=True)
messages = IntegerField(default=0)
emails = IntegerField(default=0)
last_updated = IntegerField(default=0)
# Represents an operator.
class Operator(BaseModel):
operator_id = CharField()
status = CharField()
site = ForeignKeyField(Site, related_name='operators')
last_updated = IntegerField(default=0)
# Represents a visitor.
class Visitor(BaseModel):
visitor_id = CharField()
site = ForeignKeyField(Site, related_name='visitors')
# Returns true if at least one operator is online for the site.
def is_online(site):
online = (Operator.select().where(
(Operator.site == site.id) &
(Operator.status == STATUS_TYPE_ONLINE)
).count())
return online > 0
# Return the total visitors connected to the site.
def total_visitors(site):
total = (Visitor.select().where(
Visitor.site == site.id
).count())
return total
# Return the total operators connected to the site.
def total_operators(site):
total = (Operator.select().where(
Operator.site == site.id
).count())
return total
# Returns an operator associated with the operator id.
def get_operator(site, operator_id):
operator = Operator.get((Operator.site == site.id) &
(Operator.operator_id == operator_id))
return operator
def site_str(site, operators, visitors):
return "%s,messages=%s,emails=%s,operators=%s,visitors=%s" % (
site.site_id,
site.messages,
site.emails,
operators,# site.num_operators,
visitors,#site.num_visitors,
)
def all_sites():
sites = Site.select()
for site in sites:
operators = site.operators.count()
visitors = site.visitors.count()
yield site_str(site, operators, visitors)
# Represents the state of the current chat.
#
# Attributes:
# sites: A list of all sites attached to this chat instance.
class JChat(object):
def __init__(self):
db.connect()
self.sites = {}
def print_all(self):
for site in all_sites():
print site
# Pre-processes a message. Saves it in the database to make it
# queryable and to sort based on timestamp.
def pre_process(self, message, text):
message_id = message["id"]
timestamp = message["timestamp"]
try:
db_message = Message.get( (Message.message_id == message_id) &
(Message.last_updated == timestamp))
return
except DoesNotExist:
db_message = Message.create(message_id=message_id,
last_updated=timestamp,
content=text)
db_message.save()
# Processes a message. Depending on the type it either sends
# the message to the site if the site is online or sends
# an email otherwise.
def process(self, event_message):
message = json.loads(event_message.content)
message_id = message["id"]
timestamp = message["timestamp"]
try:
db_message = MessageCache.get(MessageCache.message_id == message_id)
return
except DoesNotExist:
db_message = MessageCache.create(message_id=message_id, last_updated=timestamp)
db_message.save()
message_type = message["type"]
if message_type == MSG_TYPE_SITE:
self._process_status(message)
elif message_type == MSG_TYPE_MESSAGE:
self._process_message(message)
else:
raise ValueError("Message type '%s' is not valid." % (message_type,))
# Returns or creates a site.
def _get_or_create_site(self, site_id):
try:
return Site.get(Site.site_id == site_id)
except DoesNotExist:
site = Site.create(site_id=site_id, messages=0, emails=0)
return site
# Sends a message to the site if the site is online or sends an email
# to the site othersise.
def _process_message(self, message):
site = self._get_or_create_site(message["site_id"])
site.last_updated = message["timestamp"]
if is_online(site):
site.messages += 1
else:
site.emails +=1
site.save()
# Create visitor if not exists.
visitor_id = message["from"]
try:
visitor = Visitor.get( (Visitor.visitor_id == visitor_id) &
(Visitor.site == site.id)
)
except DoesNotExist:
visitor = Visitor.create(site=site.id, visitor_id=visitor_id)
visitor.save()
# Marks a site as online/offline based on the message data.
def _process_status(self, message):
timestamp = message["timestamp"]
site = self._get_or_create_site(message["site_id"])
site.last_updated = timestamp
site.save()
# Create operator.
operator_id = message["from"]
status = message["data"]["status"]
operator = None
try:
operator = Operator.get( (Operator.site == site.id) &
(Operator.operator_id == operator_id)
)
operator.status = status
except DoesNotExist:
operator = Operator.create(operator_id=operator_id,
site=site.id,
status=status)
operator.last_updated = timestamp
operator.save()
def get_site(self, site_id):
site = Site.get(Site.site_id == site_id)
return site
def total_sites(self):
return Site.select().count()
# Parses an input file containing a list of JSON files that
# represent a message from either a site coming online or a client
# sending a chat message to a site.
# Method assumes input file has correct JSON on each line and throws
# an exception if this condition is not met.
#
# Raises:
# ValueError: If the input file has not correct JSON messages.
def parse(filename):
chat = JChat()
with open(filename) as f:
line_number = 0
for message in f:
line_number += 1
json_message = None
try:
json_message = json.loads(message)
except ValueError, e:
raise ValueError("Error parsing file '%s' on line %s. Text '%s' is "
" not valid JSON" % (filename, line_number, message), e)
chat.pre_process(json_message, message)
messages = Message.select().order_by(Message.last_updated)
for message in messages:
json_message = json.loads(message.content)
chat.process(message)
return chat
```
#### File: josercruz01/jchat/migrate_database.py
```python
import jchat
import os
TABLES = [jchat.Site, jchat.Visitor, jchat.Operator, jchat.Message, jchat.MessageCache]
def cleanup():
for table in TABLES:
table.drop_table(fail_silently=True)
def migrate():
jchat.db.connect()
jchat.db.create_tables(TABLES)
if __name__ == "__main__":
cleanup()
migrate()
``` |
{
"source": "joserebelo/youtube-dl-server",
"score": 2
} |
#### File: joserebelo/youtube-dl-server/youtube-dl-server.py
```python
from __future__ import unicode_literals
import json
import os
from collections import ChainMap
from itertools import chain
from operator import itemgetter
from queue import Queue
from bottle import route, run, Bottle, request, static_file, template
from threading import Thread
from pathlib import Path
from ydl_server.logdb import JobsDB, Job, Actions, JobType
from ydl_server import jobshandler, ydlhandler
from ydl_server.config import app_defaults
app = Bottle()
@app.route(['/', '/index'])
def front_index():
return template('./ydl_server/templates/index.html',
ydl_version=ydlhandler.get_ydl_version())
@app.route('/logs')
def front_logs():
return template('./ydl_server/templates/logs.html',
ydl_version=ydlhandler.get_ydl_version())
@app.route('/finished')
def front_finished():
return template('./ydl_server/templates/finished.html',
ydl_version=ydlhandler.get_ydl_version())
@app.route('/api/finished')
def api_list_finished():
root_dir = Path(app_vars['YDL_OUTPUT_TEMPLATE']).parent
matches = root_dir.glob('*')
files = [{'name': f1.name,
'modified': f1.stat().st_mtime * 1000,
'children': sorted([{
'name': f2.name,
'modified': f2.stat().st_mtime * 1000
} for f2 in f1.iterdir() if not f2.name.startswith('.')] if f1.is_dir() else [], key=itemgetter('modified'), reverse=True)
} for f1 in matches if not f1.name.startswith('.')]
files = sorted(files, key=itemgetter('modified'), reverse=True)
return {
"success": True,
"files": files
}
@app.route('/api/finished/:filename#.*#')
def api_serve_finished_file(filename):
root_dir = Path(app_vars['YDL_OUTPUT_TEMPLATE']).parent
return static_file(filename, root=root_dir)
@app.route('/static/:filename#.*#')
def server_static(filename):
return static_file(filename, root='./ydl_server/static')
@app.route('/api/downloads/stats', method='GET')
def api_queue_size():
db = JobsDB(readonly=True)
jobs = db.get_all()
return {
"success": True,
"stats": {
"queue": ydlhandler.queue.qsize(),
"pending": len([job for job in jobs if job['status'] == "Pending"]),
"running": len([job for job in jobs if job['status'] == "Running"]),
"completed": len([job for job in jobs if job['status'] == "Completed"]),
"failed": len([job for job in jobs if job['status'] == "Failed"])
}
}
@app.route('/api/downloads', method='GET')
def api_logs():
db = JobsDB(readonly=True)
return json.dumps(db.get_all())
@app.route('/api/downloads', method='DELETE')
def api_logs_purge():
jobshandler.put((Actions.PURGE_LOGS, None))
return {"success": True}
@app.route('/api/downloads', method='POST')
def api_queue_download():
url = request.forms.get("url")
options = {'format': request.forms.get("format")}
if not url:
return {"success": False, "error": "'url' query parameter omitted"}
job = Job(url, Job.PENDING, "", JobType.YDL_DOWNLOAD, request.forms.get("format"), url)
jobshandler.put((Actions.INSERT, job))
print("Added url " + url + " to the download queue")
return {"success": True, "url": url, "options": options}
@app.route('/api/metadata', method='POST')
def api_metadata_fetch():
url = request.forms.get("url")
return ydlhandler.fetch_metadata(url)
@app.route("/api/youtube-dl/update", method="GET")
def ydl_update():
job = Job("Youtube-dl Update", Job.PENDING, "", JobType.YDL_UPDATE, None, None)
jobshandler.put((Actions.INSERT, job))
return {"success": True}
JobsDB.check_db_latest()
JobsDB.init_db()
ydlhandler.start()
print("Started download thread")
jobshandler.start(ydlhandler.queue)
print("Started jobs manager thread")
print("Updating youtube-dl to the newest version")
job = Job("Youtube-dl Update", Job.PENDING, "", JobType.YDL_UPDATE, None, None)
jobshandler.put((Actions.INSERT, job))
ydlhandler.resume_pending()
app_vars = ChainMap(os.environ, app_defaults)
app.run(host=app_vars['YDL_SERVER_HOST'],
port=app_vars['YDL_SERVER_PORT'],
debug=app_vars['YDL_DEBUG'])
ydlhandler.finish()
jobshandler.finish()
ydlhandler.join()
jobshandler.join()
``` |
{
"source": "jose/RepairThemAll",
"score": 2
} |
#### File: script/core/Benchmark.py
```python
import json
import subprocess
class Benchmark(object):
"""Benchmark"""
def __init__(self, name):
self.name = name
pass
def _get_project_info(self, bug):
try:
return bug.maven_info
except AttributeError:
pass
cmd = """cd %s;
mvn com.github.tdurieux:project-config-maven-plugin:1.0-SNAPSHOT:info -q;
""" % (bug.working_directory)
info = json.loads(subprocess.check_output(cmd, shell=True))
bug.maven_info = info
return info
def checkout(self, bug, working_directory):
pass
def compile(self, bug, working_directory):
pass
def run_test(self, bug, working_directory):
pass
def classpath(self, bug):
pass
def compliance_level(self, bug):
pass
def source_folders(self, bug):
pass
def test_folders(self, bug):
pass
def __str__(self):
return self.name
```
#### File: script/core/RepairTool.py
```python
import os
import json
import time
import random
import shutil
import datetime
from config import WORKING_DIRECTORY, REPAIR_ROOT
from config import DATA_PATH
from config import REPAIR_TOOL_FOLDER
LOCK_FILE = "LOCK_BUGS_INIT"
def is_lock():
return os.path.exists(os.path.join(REPAIR_ROOT, LOCK_FILE))
def wait_lock():
while is_lock():
secs = random.randrange(2, 8)
time.sleep(secs)
def lock():
f = open(os.path.join(REPAIR_ROOT, LOCK_FILE), "w+")
f.close()
pass
def unlock():
path = os.path.join(REPAIR_ROOT, LOCK_FILE)
if os.path.exists(path):
os.remove(path)
class RepairTool(object):
def __init__(self, name, config_name):
self.data = None
self.main = None
self.jar = None
self.name = name
self.config_name = config_name
self.repair_begin = None
self.parseData()
self.seed = 0
pass
def parseData(self):
path = os.path.join(DATA_PATH, 'repair_tools', self.config_name + '.json')
if os.path.exists(path):
with open(path) as data_file:
self.data = json.load(data_file)
self.main = self.data["main"]
self.jar = os.path.join(REPAIR_TOOL_FOLDER, self.data["jar"])
def init_bug(self, bug, bug_path):
if os.path.exists(bug_path):
shutil.rmtree(bug_path)
try:
wait_lock()
lock()
bug.checkout(bug_path)
finally:
unlock()
bug.compile()
self.repair_begin = datetime.datetime.now().__str__()
# bug.run_test()
def get_info(self, bug, bug_path):
pass
def repair(self, bug):
bug_path = os.path.join(WORKING_DIRECTORY, "%s_%s" % (bug.project, bug.bug_id))
self.init_bug(bug, bug_path)
self.get_info(bug, bug_path)
pass
def __str__(self):
return self.name
```
#### File: core/repair_tools/Nopol.py
```python
import os
import shutil
import json
import subprocess
import datetime
from config import JAVA8_HOME
from config import JAVA_ARGS
from config import OUTPUT_PATH
from config import WORKING_DIRECTORY
from config import Z3_PATH
from core.RepairTool import RepairTool
from core.runner.RepairTask import RepairTask
from core.utils import add_repair_tool
class Nopol(RepairTool):
"""Nopol"""
def __init__(self, name="Nopol", mode="repair", oracle="angelic", statement_type="pre_then_cond", seed=7, synthesis="smt"):
super(Nopol, self).__init__(name, "nopol")
self.solver = self.data["solver"]
self.synthesis = synthesis
self.flocal = "gzoltar"
self.mode = mode
self.oracle = oracle
self.statement_type = statement_type
self.seed = seed
def repair(self, repair_task):
""""
:type repair_task: RepairTask
"""
bug = repair_task.bug
bug_path = os.path.join(WORKING_DIRECTORY,
"%s_%s_%s_%s" % (self.name, bug.benchmark.name, bug.project, bug.bug_id))
repair_task.working_directory = bug_path
self.init_bug(bug, bug_path)
try:
classpath = ":".join(bug.bin_folders() + bug.test_bin_folders())
if classpath != ":":
classpath += ":"
classpath += bug.classpath()
classpath += ":" + self.jar
cmd = """cd %s;
export JAVA_TOOL_OPTIONS="-Dfile.encoding=UTF8 -Duser.language=en-US -Duser.country=US -Duser.language=en";
TZ="America/New_York"; export TZ;
export PATH="%s:$PATH";
export JAVA_HOME="%s";
time java %s -cp %s:%s/../lib/tools.jar %s \\
--mode %s \\
--type %s \\
--oracle %s \\
--synthesis %s \\
--flocal %s \\
--json \\
--solver %s \\
--solver-path %s \\
--complianceLevel %s \\
--source %s \\
--classpath "%s";
echo "\\n\\nNode: `hostname`\\n";
echo "\\n\\nDate: `date`\\n";
""" % (bug_path,
JAVA8_HOME,
JAVA8_HOME,
JAVA_ARGS,
self.jar,
JAVA8_HOME,
self.main,
self.mode,
self.statement_type,
self.oracle,
self.synthesis,
self.flocal,
self.solver,
os.path.join(Z3_PATH, "z3"),
str(bug.compliance_level()),
":".join(bug.source_folders()),
classpath)
log_path = os.path.join(repair_task.log_dir(), "repair.log")
if not os.path.exists(os.path.dirname(log_path)):
os.makedirs(os.path.dirname(log_path))
log = file(log_path, 'w')
log.write(cmd)
log.flush()
subprocess.call(cmd, shell=True, stdout=log, stderr=subprocess.STDOUT)
with open(log_path) as data_file:
return data_file.read()
finally:
path_results = os.path.join(bug_path, "output.json")
if os.path.exists(path_results):
repair_task.status = "FINISHED"
shutil.copy(path_results, os.path.join(repair_task.log_dir(), "detailed-result.json"))
with open(path_results) as fd:
repair_task.results = json.load(fd)
result = {
"repair_begin": self.repair_begin,
"repair_end": datetime.datetime.now().__str__(),
'patches': []
}
if 'patch' in repair_task.results:
result['patches'] = repair_task.results["patch"]
with open(os.path.join(repair_task.log_dir(), "result.json"), "w") as fd2:
json.dump(result, fd2, indent=2)
if len(result['patches']) > 0:
repair_task.status = "PATCHED"
else:
repair_task.status = "ERROR"
cmd = "rm -rf %s;" % (bug_path)
#subprocess.call(cmd, shell=True)
pass
def init(args):
return Nopol(seed=args.seed, statement_type=args.statement_type)
def init_dynamoth(args):
return Nopol(name="DynaMoth", seed=args.seed, statement_type=args.statement_type, synthesis="dynamoth")
def nopol_args(parser):
parser.add_argument("--statement-type", "-t",
help="The targeted statement", default="pre_then_cond", choices=("condition", "precondition", "pre_then_cond"))
parser.add_argument('--version', action='version', version='Astor 7ba58a78d')
parser.add_argument("--seed", "-s", help="The random seed", default=7, type=int)
pass
parser = add_repair_tool("Nopol", init, 'Repair the bug with Nopol')
nopol_args(parser)
parser = add_repair_tool("DynaMoth", init_dynamoth, 'Repair the bug with DynaMoth')
nopol_args(parser)
```
#### File: script/core/Support.py
```python
def getGridTime(timeout, overhead = 0.33):
''' computes the timeout of the grid based on the timeout (from the tool) received as parameter.
Moreover it adds an overhead with is a percentage of the original timeout'''
timetool = int(timeout)
timetooloverhead = timetool + (timetool * overhead)
hr_st = str(int(timetooloverhead // 60))
minutes = int(timetooloverhead % 60)
mt_st = str(minutes) if minutes >=10 else ("0" + str(minutes))
timestring = hr_st + ":" + mt_st
return timestring
```
#### File: RepairThemAll/script/RepairThemAll.py
```python
import subprocess
import argparse
import sys
import os
from config import REPAIR_ROOT
parser = argparse.ArgumentParser(prog="RepairThemAll", description='RepairThemAll interface')
def run():
program = None
if sys.argv[1] == "repair":
program = "repair.py"
elif sys.argv[1] == "info":
program = "info.py"
elif sys.argv[1] == "checkout":
program = "checkout.py"
subprocess.call("python %s %s" % (os.path.join(REPAIR_ROOT, "script", program), program), shell=True)
if __name__ == "__main__":
run()
``` |
{
"source": "josericardojr/BinG",
"score": 3
} |
#### File: BinG/Prolog/PrologConsult.py
```python
from Prolog.PrologFactData import *
from Prolog.PrologRuleData import *
class PrologConsult:
def __init__(self):
self.fact_data = PrologFactData()
self.rule_data = PrologRuleData()
def set_fact(self, f, vertexs):
for v in vertexs:
if f.check_fact(vertexs[v]):
self.fact_data.setup_fact(f.fact_name, vertexs[v].ID, f.get_value(vertexs[v]))
def set_rule(self, r):
self.rule_data.setup_rule(r)
```
#### File: BinG/Prolog/PrologFact.py
```python
class PrologFact:
def __init__(self, fact):
self.fact = fact
def get_fact(self):
return self.fact
```
#### File: BinG/Schema/SchemaRule.py
```python
from xml.etree import ElementTree
from xml.etree import ElementTree
class SchemaRule:
def __init__(self, rules):
self.rule_name = rules.attrib['name']
rule_inputs = rules.find('inputs')
r = rules.find('return')
self.return_var = r.attrib['name']
inputs = []
for inp in rule_inputs:
for att in inp.attrib:
inputs.append(inp.attrib[att])
self.inputs = inputs
rule_facts = rules.find('facts')
facts = {}
for f in rule_facts:
facts[f.attrib['name']] = []
for inp in f:
facts[f.attrib['name']].append(inp.attrib['name'])
find = False
for f1 in facts:
for f2 in facts[f1]:
if self.return_var == f2:
find = True
if not find:
SchemaRule.error_feedback(self.rule_name + ' do not have any fact\'s input named: ' + self.return_var)
self.facts = facts
def get_return_var(self):
return self.return_var
@staticmethod
def error_feedback(s):
print('<>' * 3)
print('ERROR: ' + s)
```
#### File: BinG/XML_Data/Vertex.py
```python
from xml.etree import ElementTree
class Vertex:
def __init__(self, me):
self.me = me
self.ID = me.find('ID').text
``` |
{
"source": "josericardojr/MorphWing",
"score": 4
} |
#### File: Python/Reader/Attributes.py
```python
class Attributes:
def __init__(self, name, value):
self.vname = name
self.vvalue = value
def name(self):
return self.vname
def value(self):
return self.vvalue
def myprint(self):
print('{0}: {1}'.format(self.name, self.value))
``` |
{
"source": "JoseRivas1998/Minecraft-Pi-Games",
"score": 3
} |
#### File: JoseRivas1998/Minecraft-Pi-Games/findTheDiamond.py
```python
from mcpi.minecraft import Minecraft
from mcpi import block
import math, random
# Euclidean distance between points (x1, y1, z1) and (x2, y2, z2)
def distance(x1, y1, z1, x2, y2, z2):
dx = x2 - x1
dy = y2 - y1
dz = z2 - z1
return math.sqrt((dx ** 2) + (dy ** 2) + (dz ** 2))
# Returns a random x, y, z point beteen (x0, y0, z0) and (x1, y1, z1) inclusive
def getRandPoint(x0, y0, z0, x1, y1, z1):
x = random.randint(x0, x1)
y = random.randint(y0, y1)
z = random.randint(z0, z1)
return x, y, z
# Finds x, y, z location of a non air block within a range
def getNonAir(mc, x0, y0, z0, x1, y1, z1):
x, y, z = 0, 0, 0
isAir = True
while(isAir):
x, y, z = getRandPoint(x0, y0, z0, x1, y1, z1)
blkid = mc.getBlockWithData(x, y, z).id
isAir = blkid == block.AIR.id
return x, y, z
# Creates diamond block at random location, returns the coordiantes of the block
def createDiamond(mc, ppos):
playerHeight = mc.getHeight(ppos.x, ppos.z)
ymin = playerHeight - 5
ymax = playerHeight + 5
xmin = -124
xmax = 124
zmin = -124
zmax = 124
x, y, z = getNonAir(mc, xmin, ymin, zmin, xmax, ymax, zmax)
mc.setBlock(x, y, z, block.DIAMOND_BLOCK.id)
return x, y, z
# Main game function
def playGame(mc):
mc.postToChat("Right click on blocks to find the diamond block!")
mc.postToChat("Placing diamond block...")
ppos = mc.player.getTilePos()
dx, dy, dz = createDiamond(mc, ppos)
print(dx, dy, dz)
found = False
prevDist = -1
mc.postToChat("Go!")
while(not found):
for hit in mc.events.pollBlockHits():
# Get hit position
hx, hy, hz = hit.pos.x, hit.pos.y, hit.pos.z
if(hx == dx and hy == dy and hz == dz):
mc.postToChat("You found it!")
found = True
else:
# Get the distance between the block hit and the diamond
dist = distance(dx, dy, dz, hx, hy, hz)
if(prevDist == -1): # If no distance has been calculated
mc.postToChat("Keep looking and you find the block")
else:
if(dist > prevDist):
mc.postToChat("Getting colder!")
else:
mc.postToChat("Getting warmer!")
# copy distance to previous distance
prevDist = dist
mc = Minecraft.create()
playGame(mc)
``` |
{
"source": "JoseRoberts87/project-deap",
"score": 2
} |
#### File: project-deap/test/test_file_handler.py
```python
import pandas as pd
from deap import file_reader as fr
def test_get_extension():
result = fr.get_extension('some.csv')
assert result == 'csv'
def test_get_extension_fail():
result = fr.get_extension('some')
assert result == None
def test_file_extensions_blob():
result = fr.file_extentions()
assert result.get('blob')
def test_file_extensions_matrix():
result = fr.file_extentions()
assert result.get('matrix')
def test_file_extensions_fail():
result = fr.file_extentions()
assert result.get('some') == None
def test_ext_handler():
result = fr.ext_handler('json')
assert result == pd.read_json
``` |
{
"source": "joseroma/bulo-blocker",
"score": 3
} |
#### File: joseroma/bulo-blocker/telegram-bot.py
```python
from __future__ import print_function
import traceback
import telebot
import time
import pprint as pp
import gspread
from oauth2client.service_account import ServiceAccountCredentials
import pandas as pd
from telebot import types
import ast
import time
import telebot
bot_token = '<PASSWORD>'
bot = telebot.TeleBot(token=bot_token)
scope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive']
opciones = {"/bulo": "Enviar Bulo", "/info": "Información", "/ayuda": "Ayuda!", "/fastbulo": "¿Mas rápido...?"}
fuentes = {"/1": "WhatsApp familiar", "/2": "Lista de difusion", "/3": "Telegram", "/4": "Otras redes sociales"}
crossIcon = u"\u274C"
print("Servidor iniciado!")
def makeKeyboard(stringList):
markup = types.InlineKeyboardMarkup()
for key, value in stringList.items():
markup.add(types.InlineKeyboardButton(text=value, callback_data="['value', '" + value + "', '" + key + "']"))
return markup
@bot.message_handler(commands=['hola', 'start'])
def send_welcome(message):
bot.reply_to(message, '¡Hola! \nTe damos la bienvenida al Buloblocker de Greenpeace España. Utilizando este bot, ya estás luchando contra la desinformación. \n¡Felicidades! ')
bot.send_message(chat_id=message.chat.id,
text="....",
reply_markup=makeKeyboard(opciones),
parse_mode='HTML')
@bot.message_handler(commands=['info'])
def send_info(message):
bot.reply_to(message, "La desinformación, es decir, la información falsa se expande, de manera intencionada, por nuestros móviles, redes sociales o conversaciones. Estos mensajes 'hackean' nuestros cerebros y nuestras democracias, porque generan premisas falsas, contaminan el debate y polarizan nuestra sociedad. El histórico negacionismo sobre el cambio climático, la acusación de intereses ocultos en los grupos ecologistas y otras tantas historias más forman parte de esta estrategia de desinformación que pretende mentir sobre el impacto del actual modelo económico sobre el planeta. "
"\nQueremos parar esa 'desinformación verde'. Desde Greenpeace os pedimos que nos ayudéis a buscar los bulos que circulan y que compartáis los desmentidos. \nMás información: https://es.greenpeace.org/es/noticias/buloblocker/")
@bot.message_handler(commands=['ayuda'])
def send_help(message):
bot.reply_to(message, ""
"Por si estas un poco perdido, te recordamos como hablar a BuloBlocker!\n"
"Saluda con un /hola \n"
"En caso de querer recordar los comandos vuelve a usar /ayuda \n"
"Infórmate sobre esta iniciativa con /info\n"
"Mandanos un Bulo siguiendo los pasos de /bulo\n"
"Echale un ojo a la guía sobre como enviar bulos de forma rápida /fastbulo ")
@bot.message_handler(commands=['fastbulo'])
def send_fast_bulo(message):
bot.reply_to(message, "Si tienes pensado hablarme a menudo, esto te interesa:\n"
"Puedes saltarte muchos pasos a la hora de enviar un enlac, y que me puedas mandar contenido dudoso mucho mas rápido. \n"
"De hecho, puedes saltarte el saludo (/hola), incluso decirme que vas a mandar un enlace (/bulo) y simplemente mandármela. "
"Adelante, manda el enlace, ¡ya verás que rápido! \n")
@bot.message_handler(commands=['bulo'])
def start_bulo(message):
bot.reply_to(message, "Adelante!, envía por el chat el enlace" )
@bot.message_handler(commands=['test'])
def handle_command_adminwindow(message):
bot.send_message(chat_id=message.chat.id,
text="Here are the values of stringList",
reply_markup=makeKeyboard(opciones),
parse_mode='HTML')
@bot.callback_query_handler(func=lambda call: True)
def handle_query(call):
opciones = { "/bulo": "Enviar Bulo", "/info": "Información", "/ayuda": "Ayuda!",
"/fastbulo": "¿Mas rápido...?"}
if call.data.startswith("['value'"):
valueFromCallBack = ast.literal_eval(call.data)[1]
keyFromCallBack = ast.literal_eval(call.data)[2]
#bot.answer_callback_query(callback_query_id=call.id, show_alert=True, text="You Clicked " + valueFromCallBack + " and key is " + keyFromCallBack)
if keyFromCallBack.startswith("/bulo"):
start_bulo(call.message)
if keyFromCallBack.startswith("/info"):
send_info(call.message)
if keyFromCallBack.startswith("/ayuda"):
send_help(call.message)
if keyFromCallBack.startswith("/fastbulo"):
send_fast_bulo(call.message)
if call.data.startswith("['value'") and ast.literal_eval(call.data)[1] in ["WhatsApp familiar", "Lista de difusion", "Telegram", "Otras redes sociales"]:
valueFromCallBack = ast.literal_eval(call.data)[1]
credentials = ServiceAccountCredentials.from_json_keyfile_name('BuloBlocker-451d2c1b42d2.json', scope)
gc = gspread.authorize(credentials)
wks = gc.open('Respuestas Formulario Muckrakers').worksheet("bulos recibidos del bot")
df = pd.DataFrame(wks.get_all_records())
print(df)
try:
lista_var_temp = [call.message.chat.first_name +" "+ call.message.chat.last_name, call.message.chat.id, call.message.text,
"Sin especificar", "Sin especificar",
valueFromCallBack, "Sin especificar", "Sin especificar",
time.strftime("%d/%m/%y") + " " + time.strftime("%H:%M:%S")]
except:
try:
lista_var_temp = [call.message.chat.first_name ,
call.message.chat.id, call.message.text,
"Sin especificar", "Sin especificar",
valueFromCallBack, "Sin especificar", "Sin especificar",
time.strftime("%d/%m/%y") + " " + time.strftime("%H:%M:%S")]
except:
lista_var_temp = ["Sin especificar" + " " + "Sin especificar",
call.message.chat.id, call.message.text,
"Sin especificar", "Sin especificar",
valueFromCallBack, "Sin especificar", "Sin especificar",
time.strftime("%d/%m/%y") + " " + time.strftime("%H:%M:%S")]
wks.append_row(lista_var_temp)
print(call.message)
bot.send_message(call.message.chat.id, "Gracias por enviarnos esta desinformación o contenido dudoso. \n"
"Nuestro personal de campañas, lo estudiará para realizar la verificación. "
"Puedes consultar nuestra biblioteca de desmentidos o Greenchecking. http://greenpeace.es/biblioteca-desmentidos "
"\n\nDifúndela y ayúdanos a parar la desinformación")
@bot.message_handler(func=lambda msg: msg.text is not None and '://' in msg.text)
def send_bulo(message):
"""
# PRIMERO VAMOS A REVISAR
credentials = ServiceAccountCredentials.from_json_keyfile_name('BuloBlocker-451d2c1b42d2.json', scope)
gc = gspread.authorize(credentials)
wks = gc.open('Respuestas Formulario Muckrakers').worksheet("bulos desmentidos")
df = pd.DataFrame(wks.get_all_records())
if any(df['enlace del contenido'].isin([message.text])):
desmentido_rows = df[df['enlace del contenido'].isin([message.text])]
desmentido = list(desmentido_rows["enlace del desmentido"])[0]
#Comentar que este enlace ya lo recibimos y que nos diga como lo ha recibido
bot.send_message(message.chat.id, "En enlace proporcionado contiene desinformación que ya ha sido desmentida en nuestro Greenchecking\n"
"Revisa nuestro desmentido en: \n"+ desmentido + "\n\nNo te olvides de indicarnos cómo recibiste el enlace\n")
bot.send_message(chat_id=message.chat.id, text= message.text,
reply_markup=makeKeyboard(fuentes), parse_mode='HTML')
else:
"""
bot.send_message(message.chat.id, "¿Quieres ayudarnos?\n\n¿Cómo recibiste el enlace?\n")
bot.send_message(chat_id=message.chat.id, text=message.text,
reply_markup=makeKeyboard(fuentes), parse_mode='HTML')
# Comprobamos si hemos el fact check de esta noticia
#if any(message.text in string for string in df['ENLACE A LA PLATAFORMA DONDE ESTÁ PUBLICADA LA NOTICIA']):
# bot.reply_to(message, "Gracias por tu colaboración este ya lo tenemos")
# print("Gracias por tu colaboración este ya lo tenemos")
def telegram_polling():
try:
bot.polling()#none_stop=True, timeout=60)
except:
traceback_error_string=traceback.format_exc()
with open("Error.Log", "a") as myfile:
myfile.write("\r\n\r\n" + time.strftime("%c")+"\r\n<<ERROR polling>>\r\n" + traceback_error_string + "\r\n<<ERROR polling>>")
bot.stop_polling()
#time.sleep(10)
telegram_polling()
telegram_polling()
``` |
{
"source": "JoseRoman/IndicoIo-python",
"score": 3
} |
#### File: indicoio/images/features.py
```python
import json
import requests
import numpy as np
from indicoio import JSON_HEADERS
base_url = lambda c: "http://api.indico.io/%s" % c
def facial_features(face):
data_dict = json.dumps({"face": face})
response = requests.post(base_url("facialfeatures"), data=data_dict, headers=JSON_HEADERS)
response_dict = json.loads(response.content)
return response_dict['response']
```
#### File: indicoio/images/fer.py
```python
import json
import requests
import numpy as np
from indicoio import JSON_HEADERS
base_url = "http://api.indico.io/fer"
def fer(face):
data_dict = json.dumps({"face": face})
response = requests.post(base_url, data=data_dict, headers=JSON_HEADERS)
return json.loads(response.content)
```
#### File: indicoio/utils/__init__.py
```python
import inspect
import numpy as np
class TypeCheck(object):
"""
Decorator that performs a typecheck on the input to a function
"""
def __init__(self, accepted_structures, arg_name):
"""
When initialized, include list of accepted datatypes and the
arg_name to enforce the check on. Can totally be daisy-chained.
"""
self.accepted_structures = accepted_structures
self.is_accepted = lambda x: type(x) in accepted_structures
self.arg_name = arg_name
def __call__(self, fn):
def check_args(*args, **kwargs):
arg_dict = dict(zip(inspect.getargspec(fn).args, args))
full_args = dict(arg_dict.items() + kwargs.items())
if not self.is_accepted(full_args[self.arg_name]):
raise DataStructureException(
fn,
full_args[self.arg_name],
self.accepted_structures
)
return fn(*args, **kwargs)
return check_args
class DataStructureException(Exception):
"""
If a non-accepted datastructure is passed, throws an exception
"""
def __init__(self, callback, passed_structure, accepted_structures):
self.callback = callback.__name__
self.structure = str(type(passed_structure))
self.accepted = [str(structure) for structure in accepted_structures]
def __str__(self):
return """
function %s does not accept %s, accepted types are: %s
""" % (self.callback, self.structure, str(self.accepted))
@TypeCheck((list, dict, np.ndarray), 'array')
def normalize(array, distribution=1, norm_range=(0, 1), **kwargs):
"""
First arg is an array, whether that's in the form of a numpy array,
a list, or a dictionary that contains the data in its values.
Second arg is the desired distribution which would be applied before
normalization.
Supports linear, exponential, logarithmic and raising to whatever
power specified (in which case you just put a number)
Third arg is the range across which you want the data normalized
"""
# Handling dictionary array input
# Note: lists and numpy arrays behave the same in this program
dict_array = isinstance(array, dict)
if dict_array:
keys = array.keys()
array = np.array(array.values()).astype('float')
else: # Decorator errors if this isn't a list or a numpy array
array = np.array(array).astype('float')
# Handling various distributions
if type(distribution) in [float, int]:
array = np.power(array, distribution)
else:
array = getattr(np, distribution)(array, **kwargs)
# Prep for normalization
x_max, x_min = (np.max(array), np.min(array))
def norm(element,x_min,x_max):
base_span = (element - x_min)*(norm_range[-1] - norm_range[0])
return norm_range[0] + base_span / (x_max - x_min)
norm_array = np.vectorize(norm)(array, x_min, x_max)
if dict_array:
return dict(zip(keys, norm_array))
return norm_array
```
#### File: IndicoIo-python/tests/test_run.py
```python
import unittest
import numpy as np
from indicoio import political, sentiment, fer, facial_features, language
class FullAPIRun(unittest.TestCase):
def test_political(self):
political_set = set(['Libertarian', 'Liberal', 'Conservative', 'Green'])
test_string = "Guns don't kill people, people kill people."
response = political(test_string)
self.assertTrue(isinstance(response, dict))
self.assertEqual(political_set, set(response.keys()))
def test_posneg(self):
posneg_set = set(['Sentiment'])
test_string = "Worst song ever."
response = sentiment(test_string)
self.assertTrue(isinstance(response, dict))
self.assertEqual(posneg_set, set(response.keys()))
def test_good_fer(self):
fer_set = set(['Angry', 'Sad', 'Neutral', 'Surprise', 'Fear', 'Happy'])
test_face = np.linspace(0,50,48*48).reshape(48,48).tolist()
response = fer(test_face)
self.assertTrue(isinstance(response, dict))
self.assertEqual(fer_set, set(response.keys()))
def test_bad_fer(self):
fer_set = set(['Angry', 'Sad', 'Neutral', 'Surprise', 'Fear', 'Happy'])
test_face = np.linspace(0,50,56*56).reshape(56,56).tolist()
response = fer(test_face)
self.assertTrue(isinstance(response, dict))
self.assertEqual(fer_set, set(response.keys()))
def test_good_facial_features(self):
test_face = np.linspace(0,50,48*48).reshape(48,48).tolist()
response = facial_features(test_face)
self.assertTrue(isinstance(response, list))
self.assertEqual(len(response), 48)
def test_language(self):
language_set = set([
'English',
'Spanish',
'Tagalog',
'Esperanto',
'French',
'Chinese',
'French',
'Bulgarian',
'Latin',
'Slovak',
'Hebrew',
'Russian',
'German',
'Japanese',
'Korean',
'Portuguese',
'Italian',
'Polish',
'Turkish',
'Dutch',
'Arabic',
'Persian (Farsi)',
'Czech',
'Swedish',
'Indonesian',
'Vietnamese',
'Romanian',
'Greek',
'Danish',
'Hungarian',
'Thai',
'Finnish',
'Norwegian',
'Lithuanian'
])
language_dict = language('clearly an english sentence')
self.assertEqual(language_set, set(language_dict.keys()))
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "joseroma/python-api-rest",
"score": 3
} |
#### File: joseroma/python-api-rest/app.py
```python
from flask import Flask, jsonify, abort, request, make_response
import mysql.connector
app = Flask(__name__)
# Queries
query = ("SELECT * FROM ACTOR;")
query1 = ("SELECT p.oid_pelicula, p.titulo,p.fecha_lanzamiento,p.genero, a.oid_ACTOR, a.nombre, a.apellido FROM ACTOR a, PELICULA_has_ACTOR m, PELICULA p WHERE p.oid_pelicula = m.PELICULA_oid_pelicula AND a.oid_ACTOR = m.ACTOR_oid_ACTOR;")
query2 = ("SELECT * FROM DIRECTOR;")
query3 = ("SELECT p.oid_pelicula, p.titulo,p.fecha_lanzamiento,p.genero, a.oid_director, a.name FROM DIRECTOR a, PELICULA_has_DIRECTOR m, PELICULA p WHERE p.oid_pelicula = m.PELICULA_oid_pelicula AND a.oid_director = m.DIRECTOR_oid_director;")
query4 = ("SELECT * FROM PELICULA;")
# Conections
cnx = mysql.connector.connect(user='root', password='password',host='127.0.0.1',database='filmAffinity3')
cnx1 = mysql.connector.connect(user='root', password='password',host='127.0.0.1', database='filmAffinity3')
cnx2 = mysql.connector.connect(user='root', password='password',host='127.0.0.1', database='filmAffinity3')
cnx3 = mysql.connector.connect(user='root', password='password',host='127.0.0.1', database='filmAffinity3')
cnx4 = mysql.connector.connect(user='root', password='password',host='127.0.0.1', database='filmAffinity3')
# Get list from connection
cursor = cnx.cursor()
cursor.execute(query)
cursor1 = cnx1.cursor()
cursor1.execute(query1)
cursor2 = cnx2.cursor()
cursor2.execute(query2)
cursor3 = cnx3.cursor()
cursor3.execute(query3)
cursor4 = cnx4.cursor()
cursor4.execute(query4)
actors = []
actors_pelicula = []
directors = []
directors_movies = []
movies_ = []
movies_directors = []
for res in cursor:
actors.append({
"id":res[0],
"name":res[1],
"lastname":res[2],
"birth_date":res[3]
})
for res in cursor1:
actors_pelicula.append({
"id_film": res[0],
"title_film": res[1],
"date_film": res[2],
"genre_film": res[3],
"id_actor": res[4],
"name_actor": res[5],
"lastname_actor": res[6]
})
for res in cursor2:
directors.append({
"id": res[0],
"name": res[1]
})
for res in cursor3:
directors_movies.append({
"id_film": res[0],
"title_film": res[1],
"date_film": res[2],
"genre_film": res[3],
"id_director": res[4],
"name_director": res[5]
})
for res in cursor4:
movies_.append({
"id": res[0],
"title": res[1],
"year": res[3],
"genre": res[4]
})
#Close cursors and connections
cursor.close()
cnx.close()
cursor1.close()
cnx1.close()
cursor2.close()
cnx2.close()
cursor3.close()
cnx3.close()
cursor4.close()
cnx4.close()
@app.route('/')
def hello_world():
return 'Welcome to the REST-API !'
##############
#GET METHODS
##############
#-----------
# ACTORS
#-----------
@app.route('/api/actors', methods=['GET'])
def get_actors():
name = request.args.get('name')
if name:
actor = [actor for actor in actors if actor['name'] == name]
return jsonify({"Actors": actor})
else:
return jsonify({"Actors": actors})
@app.route('/api/actors/<int:id>', methods=['GET'])
def get_actors_(id):
actor = [actor for actor in actors if actor['id'] == id]
return jsonify({"Actor": actor[0]})
@app.route('/api/actors/<int:id>/movies', methods=['GET'])
def get_actors_movies(id):
pel= [pel for pel in actors_pelicula if pel['id_actor'] == id]
return jsonify({"MoviesOfActor": pel})
@app.route('/api/actors/<int:id>/movies/<int:id2>', methods=['GET'])
def get_actors_movies_specific(id,id2):
pel = [pel for pel in actors_pelicula if pel['id_film'] == id2]
if pel:
pelo = [pelo for pelo in pel if pelo['id_actor'] == id]
if pelo:
res = jsonify({"MoviesOfActor": pelo})
else:
res = jsonify({"Ese actor no ha participado en esa pelicula": pel})
else:
res = jsonify({"Esa pelicula no existe": pel})
return res
#-----------
# DIRECTORS
#-----------
@app.route('/api/directors', methods=['GET'])
def get_directors():
name =request.args.get('name')
#http://localhost:5000/api/directors?name=Poland
if name:
director = [director for director in directors if director['name'] == name]
return jsonify({"Directors": director})
#http://localhost:5000/api/directors
else:
return jsonify({"Directors": directors})
@app.route('/api/directors/<int:id>', methods=['GET'])
def get_directors_id(id):
director = [director for director in directors if director['id'] == id]
return jsonify({"Director": director})
@app.route('/api/directors/<int:id>/movies', methods=['GET'])
def get_director_movies(id):
dir = [dir for dir in directors_movies if dir['id_director'] == id]
return jsonify({"MoviesOfActor": dir})
@app.route('/api/directors/<int:id>/movies/<int:id2>', methods=['GET'])
def get_directors_movies_specific(id, id2):
dir = [dir for dir in directors_movies if dir['id_film'] == id2]
if dir:
diro = [diro for diro in dir if diro['id_director'] == id]
if diro:
res = jsonify({"MoviesOfActor": diro})
else:
res = jsonify({"Ese director no ha participado en esa pelicula": dir})
else:
res = jsonify({"Esa pelicula no existe": dir})
return res
#-----------
# MOVIES
#-----------
@app.route('/api/movies', methods=['GET'])
def get_movies():
title =request.args.get('title')
genre = request.args.get('genre')
year = request.args.get('year')
#http://localhost:5000/api/wizards?house=Gryffindor
if genre:
mov = [mov for mov in movies_ if mov['genre'] == genre]
return jsonify({"Movies": mov})
elif(title):
mov = [mov for mov in movies_ if mov['title'] == title]
return jsonify({"Movies": mov})
elif(year):
mov = [mov for mov in movies_ if mov['year'] == year]
return jsonify({"Movies": mov})
elif(genre and title and year):
mov = [mov for mov in movies_ if mov['genre'] == genre and mov['year'] == year and mov['title'] == title ]
return jsonify({"Movies": mov})
#http://localhost:5000/api/wizards
else:
return jsonify({"Movies": movies_})
@app.route('/api/movies/<int:id>', methods=['GET'])
def get_movies_id(id):
mov = [mov for mov in movies_ if mov['id'] == id]
return jsonify({"Director": mov})
@app.route('/api/movies/<int:id>/director', methods=['GET'])
def get_director_of_movies(id):
dir = [dir for dir in directors_movies if dir['id_film'] == id]
return jsonify({"MoviesOfActor": dir})
@app.route('/api/movies/<int:id>/actors', methods=['GET'])
def get_actors_of_movies(id):
act = [act for act in actors_pelicula if act['id_film'] == id]
return jsonify({"MoviesOfActor": act})
@app.route('/api/movies/<int:id>/actors/<int:id2>', methods=['GET'])
def get_actors__of_movies_specific(id, id2):
act = [act for act in actors_pelicula if act['id_actor'] == id2]
if act:
act_val = [act_val for act_val in act if act_val['id_film'] == id]
if act_val:
res = jsonify({"MoviesOfActor": act_val})
else:
res = jsonify({"Ese actor no ha participado en ninguna de estas pelis": act})
else:
res = jsonify({"Ese actor no existe": "prueba otro mejor"})
return res
##############
#POST METHODS
##############
@app.route('/api/actors', methods=['POST'])
def create_actor():
if not request.json or not 'name' in request.json:
abort(400)
actor = {
'id': request.json['id'],
'name': request.json['name'],
'lastname': request.json['lastname']
}
actors.append(actor)
return jsonify({'actor': actors})
@app.route('/api/movies', methods=['POST'])
def create_movie():
if not request.json or not 'title' in request.json or not 'genre' in request.json or not 'year' in request.json:
abort(400)
movie = {
'id': request.json['id'],
'title': request.json['title'],
'year': request.json['year'],
'genre': request.json['genre']
}
movies_.append(movie)
return jsonify({'actor': movies_})
@app.route('/api/directors', methods=['POST'])
def create_director():
if not request.json or not 'name' in request.json:
abort(400)
director = {
'id': request.json['id'],
'name': request.json['name']
}
directors.append(director)
return jsonify({'directors': directors})
######################
#PUT METHODS (UPDATE)
######################
@app.route('/api/actors/<int:id>', methods=['PUT'])
def update_actor(id):
act = [act for act in actors if act['id'] == id]
if len(act) == 0:
abort(404)
actors[0]['name'] = request.json.get('name', act[0]['name'])
actors[0]['lastname'] = request.json.get('lastname', act[0]['lastname'])
return jsonify({'Actor': actors[0]})
@app.route('/api/directors/<int:id>', methods=['PUT'])
def update_director(id):
act = [act for act in directors if act['id'] == id]
if len(act) == 0:
abort(404)
directors[0]['name'] = request.json.get('name', act[0]['name'])
return jsonify({'Director modified ': directors[0]})
@app.route('/api/movies/<int:id>', methods=['PUT'])
def update_movie(id):
act = [act for act in movies_ if act['id'] == id]
if len(act) == 0:
abort(404)
movies_[0]['title'] = request.json.get('title', act[0]['title'])
movies_[0]['year'] = request.json.get('year', act[0]['year'])
movies_[0]['genre'] = request.json.get('genre', act[0]['genre'])
return jsonify({'Movie modified ': movies_[0]})
#################
#DELETE METHODS
#################
@app.route('/api/actors/<int:id>', methods=['DELETE'])
def delete_actor(id):
actor = [actor for actor in actors if actor['id'] == id]
if len(actor) == 0:
abort(404)
actors.remove(actor[0])
return jsonify({'deleted': True})
@app.route('/api/directors/<int:id>', methods=['DELETE'])
def delete_actor(id):
director = [director for director in directors if director['id'] == id]
if len(director) == 0:
abort(404)
directors.remove(director[0])
return jsonify({'deleted': True})
@app.route('/api/movies/<int:id>', methods=['DELETE'])
def delete_actor(id):
movie = [movie for movie in movies_ if movie['id'] == id]
if len(movie) == 0:
abort(404)
movies_.remove(movie[0])
return jsonify({'deleted': True})
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({'error': 'Not found'}), 404)
if __name__ == '__main__':
app.run()
#localhost:5000
``` |
{
"source": "joseronierison/github_integration",
"score": 2
} |
#### File: github_integration/github_integration/celery.py
```python
from __future__ import absolute_import
import logging
import os
from django.apps import apps
from celery import Celery
from github_integration import utils
from github_integration.models import Commit, Repository
from users.models import User
logger = logging.getLogger(__name__)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "github_integration.settings.local")
app = Celery('github_integration_tasks')
app.config_from_object('django.conf:settings', namespace='CELERY')
app.autodiscover_tasks(lambda: [n.name for n in apps.get_app_configs()])
@app.task
def setup_repo(user_id, repo_name, repo_full_name):
logger.info("[repo:%s,user_id:%d] Setting up repo.", repo_name, user_id)
user = User.objects.get(id=user_id)
repo = utils.github_client(user).get_user().get_repo(repo_name)
commits = repo.get_commits().reversed
return [save_commit.apply([repo_full_name,
utils.build_commit_body(commit)]) for commit in commits]
@app.task
def save_commit(repo_full_name, commit):
logger.info("[repo_full_name:%s,commit_sha:%s] Adding new commit.",
repo_full_name, commit['sha'])
repository = Repository.objects.get(full_name=repo_full_name)
return Commit.objects.create(
repository=repository,
sha=commit['sha'],
message=commit['message'],
author=commit['author'],
url=commit['url'],
)
```
#### File: github_integration/github_integration/utils.py
```python
from github import Github
def github_client(user):
github = user.social_auth.get(provider='github')
token = github.extra_data['access_token']
return Github(token)
def build_commit_body(commit):
return {
'sha': commit.sha,
'message': commit.commit.message,
'author': commit.author.name,
'url': commit.html_url,
}
def build_repo_body(repo):
return {
'name': repo.name,
'description': repo.description,
'full_name': repo.full_name,
'url': repo.html_url,
'created_at': repo.created_at,
}
``` |
{
"source": "JoserraLP/SpotifyDownloader",
"score": 3
} |
#### File: SpotifyDownloader/music_playlist_manager/user.py
```python
from spotipy.oauth2 import SpotifyClientCredentials
from spotipy.client import Spotify
from music_playlist_manager.secrets import SPOTIPY_CLIENT_SECRET, SPOTIPY_CLIENT_ID
from music_playlist_manager.constants import DEFAULT_USERNAME, YOUTUBE_PLAYLISTS
from pytube import Playlist
class SpotifyUser:
"""
Spotify user class
"""
def __init__(self, username: str = DEFAULT_USERNAME):
"""
SpotifyUser initializer. Connect to Spotify API.
:param username: Spotify username. Default to 'joserralp'
:type username: str
"""
# Store username
self._username = username
# Auth with Spotipy
auth_manager = SpotifyClientCredentials(client_id=SPOTIPY_CLIENT_ID, client_secret=SPOTIPY_CLIENT_SECRET)
# Create Spotify instance
self._sp = Spotify(auth_manager=auth_manager)
def get_current_playlists(self):
"""
Get playlists from the current Spotify user.
:return: list with playlist info
"""
# Initialize playlists list
playlists = list()
# Retrieve user playlists
user_playlists = self._sp.user_playlists(self._username)
# Iterate over the playlists
for user_playlist in user_playlists['items']:
playlist_tracks = []
# Get playlist tracks
tracks_info = self._sp.playlist(user_playlist['id'], fields=('tracks',))['tracks']
# Iterate over the tracks in order to store its artists, name, release date and duration
for _, item in enumerate(tracks_info['items']):
# Get current track
track = item['track']
# Append data
playlist_tracks.append({
'artists': [artist['name'] for artist in track['artists']],
'name': track['name'],
'release_date': track['album']['release_date'],
'duration': track['duration_ms'] / 1000
})
# Store the playlist info such as the name, url and its tracks
playlists.append({
'url': user_playlist['external_urls']['spotify'],
'name': user_playlist['name'],
'tracks': playlist_tracks
})
return playlists
class YoutubeUser:
"""
Youtube user class
"""
def __init__(self):
"""
YoutubeUser initializer. Retrieve the playlists
"""
self._playlists = list()
def get_current_playlist(self):
"""
Get playlists from the current Youtube user.
:return: list with playlist info
"""
youtube_playlists = YOUTUBE_PLAYLISTS
for name, url in youtube_playlists.items():
playlist = Playlist(url)
playlist_tracks = list()
for track in playlist.videos:
artists, track_name = track.title.split('-', 1)
playlist_tracks.append({
'artists': artists,
'name': track_name
})
self._playlists.append({
'url': url,
'name': name,
'tracks': playlist_tracks,
'playlist': playlist
})
return self._playlists
``` |
{
"source": "JoseRubin09/EscapemetProyect",
"score": 3
} |
#### File: JoseRubin09/EscapemetProyect/api.py
```python
import requests
import time
import datetime
from frases import gg, gratz
def api_call():
url = "https://api-escapamet.vercel.app/"
response = requests.request("GET", url)
return response.json()
def try_again():
continuar = 1
while continuar == 1:
opcion = input('Deseas volver a intentarlo. Escribe (si) o (no):\n>>').lower()
while not ("".join(opcion.split(" "))).isalpha():
opcion = input("Ingreso invalido, ingrese si o no: \n>>").lower()
if opcion == 'si':
return 1
elif opcion == 'no':
return 0
else:
print('Ingrese una opcion valida')
continuar = 1
def to_be_continue():
time.sleep(3)
def buen_continue():
sigue_partida = input('''
Para continuar la partida:
presione (C)
>> ''').lower()
while sigue_partida != 'c':
sigue_partida = input('''
Coloque la letra (C):>> ''').lower()
if sigue_partida == 'c':
pass
def primer_discurso(new_game):
show_time = new_game.mostrar_tiempo()
primera_narra = (f'''
Hoy 5 de marzo de 2021, la Universidad sigue en cuarentena(esto no es novedad), lo que sí es
novedad es que se robaron un Disco Duro de la Universidad del cuarto de redes que tiene toda
la información de SAP de estudiantes, pagos y asignaturas. Necesitamos que nos ayudes a
recuperar el disco, para eso tienes: {show_time} minutos antes de que el servidor se caiga
y no se pueda hacer más nada.''')
print(primera_narra)
def segundo_discurso(new_game):
show_avatar = new_game.mostrar_avatar()
segunda_narra = (f'''
Bienvenido {show_avatar}, gracias por tu disposición a ayudarnos a resolver
este inconveniente, te encuentras actualmente ubicado en la biblioteca, revisa
el menú de opciones para ver qué acciones puedes realizar. Recuerda que el tiempo
corre más rápido que un trimestre en este reto.''')
print(segunda_narra)
def end_game(new_game):
show_avatar = new_game.mostrar_avatar()
show_time = new_game.mostrar_tiempo()
ultima_narra = (f'''
¡Felicidades! Has logrado evitar una catástrofe en la Unimet, entonces lograste
todos los objetivos {show_avatar} tenias {show_time} minutos y lo lograste en tal tiempo increible tu record
quedara para la historia vuelve a jugar en otra dificultad para mejorarlo!''')
print(ultima_narra)
def se_acabo(new_game, instanteInicial):
print(gg)
instanteFinal = datetime.datetime.now().minute
tiempo = instanteFinal - instanteInicial # Devuelve un objeto timedelta
# segundos = tiempo.seconds
minutos = tiempo
# print(tiempo)
# print(segundos)
# print(minutos)
new_game.agrego_tiempo(minutos)
add = (new_game.guardar_record()+"\n")
with open("Database.txt","a+") as db:
datos = db.write(add)
print("Estos son tus records en minutos: ")
end_game(new_game)
print(new_game.mostrar_terminado())
time.sleep(3)
def ganador(new_game, instanteInicial):
instanteFinal = datetime.datetime.now().minute
tiempo = instanteFinal - instanteInicial # Devuelve un objeto timedelta
# segundos = tiempo.seconds
minutos = tiempo
# print(tiempo)
# print(segundos)
# print(minutos)
new_game.agrego_tiempo(str(minutos))
print(gratz)
print(f"Lo lograste en {tiempo} minutos felicidades!")
add = (new_game.guardar_record()+"\n")
with open("Database.txt","a+") as db:
datos = db.write(add)
print("Estos son tus records en minutos: ")
print(new_game.mostrar_terminado())
time.sleep(10)
```
#### File: JoseRubin09/EscapemetProyect/main.py
```python
from api import *
from closeup_dibujos import *
from cuartos import Cuartos
import datetime
from dibujos_cuartos import *
from frases import *
from instrucciones import *
from jugador import Jugador
from objetos import Objetos
from partida import Partida
from games_all import *
import time
def records():
print(record)
print('Elige una de estas opciones')
opciones = input('''
1. Top 5
''')
if opciones == '1':
print('Top 5: ')
with open("Database.txt") as db:
datos = db.readlines()
# print(datos)
cantidad = 0
for i,x in enumerate(datos):
leaderboard = x[:-1].split(',')
datos[i] = leaderboard
for j in leaderboard[5:]:
cantidad = cantidad + 1
# print(cantidad)
# print(datos)
y = 0
while y < cantidad:
# print(scores)
for i,usuario in enumerate(datos):
scores = usuario[5:]
if i == 0:
maximo = 0
top = 30
for score in scores:
if int(score) < top:
top = int(score)
maximo = i
y = y + 1
# print(maximo)
# print(top)
usuarios = datos[maximo]
# tiempos = usuarios[5:]
usuarios.remove(str(top))
# print(usuarios)
print(f"--------{y}-------\nUsername:{usuarios[0]}\nTiempo:{top}")
if y == 5:
break
buen_continue()
def partida_nueva():
print(nueva)
opcion = input('''
Eliga la dificultad de su partida:
1. Facil
2. Media
3. Dificil
4. Menu\n >> ''')
while (not opcion.isnumeric()) or (int(opcion) < 1) or (int(opcion) > 4):
opcion = input("Ingreso invalido, ingrese una opcion valida: ")
if opcion == '1':
print(easy)
dificultad = 'Facil'
vidas = float(5)
pistas = 5
tiempo = 30
elif opcion == '2':
print(medio)
dificultad = 'Media'
vidas = float(3)
pistas = 3
tiempo = 20
elif opcion == '3':
print(hard)
dificultad = 'Dificil'
vidas = float(1)
pistas = 2
tiempo = 10
elif opcion == '4':
print('Adios')
menu_juego()
else:
return 0
try:
with open("Database.txt","a+") as db:
datos = db.readlines()
except FileNotFoundError:
pass
username = input('Username: ')
attempt = 0
no_existe = 0
for i,dato in enumerate(datos):
user = dato[:-1].split(",") # aquí, con el "dato[:-1]" tomo toda la información de la línea sin el salto de línea. Con .split(",") separo el string cada vez que haya una coma y almaceno los strings resultantes en la variable persona (que será una lista)
# print(user)
sigue_pregunta = 1
while sigue_pregunta == 1:
if username == user[0]:
print(user[i])
old_password = input(f'Bienvenido {username} introduce tu contrasena: ')
if old_password == user[1]:
print('Welcome back suerte esta vez')
time.sleep(2)
dificultad = dificultad
vidas = vidas
pistas = pistas
tiempo = tiempo
username = user[0]
contrasena = old_password
edad = user[2]
avatar = user[3]
inventario = ''
tiempo_partidas = ''
new_game = Jugador(dificultad, vidas, pistas, tiempo, username, contrasena, edad, avatar, inventario, tiempo_partidas)
# print(nuevo_jugador.mostrar())
no_existe = 1
sigue_pregunta = 0
return new_game
elif attempt == 2:
print('Lo siento vuelve a crear la partida')
time.sleep(3)
menu_juego()
elif attempt < 2:
print(f'Wrong! De verdad eres {username}?')
attempt = attempt + 1
time.sleep(2)
sigue_pregunta = 1
else:
sigue_pregunta = 0
if no_existe == 0:
while True:
try:
contrasena = input('Contrasena: ')
edad = int(input('Edad: '))
break
except:
print('Ingresaste un dato invalido')
avatar = input('''Elige el Avatar de tu jugador:
1. Scharifker
2. <NAME>
3. Pelusa
4. Gandhi
5. Ghost
6. Richtofen
7. <NAME>.jr\n >>''')
while (not opcion.isnumeric()) or (int(opcion) < 1) or (int(opcion) > 7):
opcion = input("Ingreso invalido, ingrese una opcion valida: ")
if avatar == '1':
avatar = 'Sharifker'
elif avatar == '2':
avatar = '<NAME>'
elif avatar == '3':
avatar = 'Pelusa'
elif avatar == '4':
avatar = 'Gandhi'
elif avatar == '5':
avatar = 'Ghost'
elif avatar == '6':
avatar = 'Richtofen'
elif avatar == '7':
avatar = '<NAME>.jr'
#agrego al inventario objetos que no tengo hecho los juegos de una vez para probar el juego
inventario = ''
tiempo_partidas = ''
new_game = Jugador(dificultad, vidas, pistas, tiempo, username, contrasena, edad, avatar, inventario, tiempo_partidas)
# print(nuevo_jugador.mostrar())
return new_game
def comienza_partida(new_game):
print(new_game.mostrar())
primer_discurso(new_game)
print(ready)
time.sleep(3)
segundo_discurso(new_game)
time.sleep(3)
# x posicion en donde se encuentra el jugador
x = 1
#funcion para llamar la api
api = api_call()
continuar = 1
#tiempo en el que comienza el jugador
instanteInicial = datetime.datetime.now().minute
#conteo de cuartos
lab = 0
biblio = 0
plaza = 0
pasillo = 0
servers = 0
#Variables para verificar si ganaron o no ciertos juegos
boolean_count = 0
pizarra_count = 0
math_count = 0
cripto_count = 0
random_count = 0
while continuar == 1:
#Comienza a correr el tiempo
instanteFinal = datetime.datetime.now().minute
end = instanteFinal - instanteInicial
if end == new_game.ded_time():
se_acabo(new_game,instanteInicial)
print('Se te acabo el tiempo lo siento....................')
time.sleep(5)
menu_juego()
dic = api[x]
name = dic.get('name')
cosas = dic.get('objects')
room = Cuartos(name)
print(dibujos_rooms[x])
print(room.mostrar())
print('Puedes ver el Menu apretando la letra (M)')
movimiento = input('Hacia donde te diriges?\n>> ')
if movimiento.lower() == 'w':
center_obj = cosas[0]
name = center_obj.get('name')
position = center_obj.get('position')
objeto_center = Objetos(name,position)
print(objeto_center.mostrar())
center_game = center_obj.get('game')
name_game = center_game.get('name')
# juego = Juegos(name_game)
# print(juego.mostrar())
if x == 2:
print('NOOOOO, pisaste el saman perdiste una vida cuidado!!')
new_game.quito_vida(1)
if new_game.game_over() == True:
se_acabo(new_game,instanteInicial)
menu_juego()
if center_game.get('message_requirement') != None:
print(f'Bienvenido a ',center_game.get('name'),center_game.get('message_requirement'))
time.sleep(3)
valido_premio = new_game.check_inventario(center_game.get('award'))
valido_requirement = new_game.check_inventario(center_game.get('requirement'))
# print(valido_requirement)
if valido_premio == True:
print(dibujos_closeup[x][0])
print('Ya pasaste por aqui y ganaste, tienes en tu INVENTARIO -->',center_game.get('award'))
time.sleep(2)
elif valido_requirement == True or center_game.get('requirement') == False or x == 0 or x == 2:
print(dibujos_closeup[x][0])
if x == 0:
#SOUP HARD
if pizarra_count == 0:
logrado = sopa_letras(name_game,center_game,new_game, instanteInicial)
if logrado == 1:
pizarra_count == 1
else:
print('Buu')
x = 0
elif pizarra_count == 1:
print('No puedes volver a jugar, ya ganaste este juego tienes talento')
time.sleep(2)
x = 0
elif x == 1:
#AHORCADO HARD
ahorcado(name_game, center_game, new_game, instanteInicial)
pass
elif x == 2:
list_awards = center_game.get('requirement')
# print(list_awards)
first_award = list_awards[0]
# print(first_award)
second_award = list_awards[1]
print(second_award)
valido_1 = new_game.check_inventario(first_award)
valido_2 = new_game.check_inventario(second_award)
if valido_1 == True and valido_2 == True:
#Solve logic
solve_logic(name_game,center_game,new_game, instanteInicial)
pass
else:
print('No puedes entrar a jugar el juego del saman pero ya te quite 1 vida r.i.p')
time.sleep(2)
elif x == 3:
#LOGICA BOOLEANA
if boolean_count == 0:
logrado = logic_bool(name_game,center_game,new_game, instanteInicial)
if logrado == 1:
boolean_count = 1
print('Adelante tu puedes')
time.sleep(2)
x = 0
elif logrado == 0:
print('Lo siento tienes que completar el juego para pasar')
time.sleep(2)
pass
elif boolean_count == 1:
print('Adelante ya habias destruido el candado y completado el juego')
time.sleep(2)
x = 0
else:
pass
elif valido_requirement == False and x != 4:
print(f'Lo siento no puedes pasar, necesitas -->', center_game.get('requirement'))
buen_continue()
if x == 4:
list_awards = center_game.get('requirement')
first_award = list_awards[0]
second_award = list_awards[1]
valido_1 = new_game.check_inventario(first_award)
valido_2 = new_game.check_inventario(second_award)
if valido_1 == True and valido_2 == True:
refranes(new_game, instanteInicial)
else:
print('Todavia te falta algo, sigue buscando buena suerte')
time.sleep(2)
pass
elif movimiento.lower() == 'a':
left_obj = cosas[1]
name = left_obj.get('name')
position = left_obj.get('position')
objeto_izq = Objetos(name,position)
left_game = left_obj.get('game')
name_game = left_game.get('name')
print(objeto_izq.mostrar())
if left_game.get('message_requirement') != None:
print(f'Bienvenido a ',left_game.get('name'),left_game.get('message_requirement'))
time.sleep(3)
valido_requirement = new_game.check_inventario(left_game.get('requirement'))
valido_premio = new_game.check_inventario(left_game.get('award'))
if valido_premio == True:
print(dibujos_closeup[x][1])
print('Ya pasaste por aqui y ganaste, tienes en tu INVENTARIO -->',left_game.get('award'))
buen_continue()
pass
elif valido_requirement == True or left_game.get('requirement') == False:
print(dibujos_closeup[x][1])
if x == 0:
python_game(name_game,left_game,new_game, instanteInicial)
pass
elif x == 1:
#preguntas matematica
if math_count == 0:
logrado = preguntas_mate(name_game,left_game,new_game, instanteInicial)
if logrado == 1:
math_count = 1
else:
print('Aprende a derivar vale')
time.sleep(2)
x = 1
else:
print('No te dejare volver a derivar que sufrimiento chao')
time.sleep(2)
elif x == 2:
#QUIZZIS
millonario(name_game,left_game,new_game, instanteInicial)
pass
elif x == 4:
#Palabras mezcladas
p_mezcladas(name_game,left_game,new_game, instanteInicial)
pass
else:
print(f'Lo siento no puedes pasar, necesitas -->', left_game.get('requirement'))
buen_continue()
elif movimiento.lower() == 'd':
right_obj = cosas[2]
name = right_obj.get('name')
position = right_obj.get('position')
objeto_right = Objetos(name,position)
right_game = right_obj.get('game')
name_game = right_game.get('name')
print(objeto_right.mostrar())
if right_game.get('message_requirement') != None:
print(f'Bienvenido a ',right_game.get('name'),right_game.get('message_requirement'))
time.sleep(3)
valido_requirement = new_game.check_inventario(right_game.get('requirement'))
valido_premio = new_game.check_inventario(right_game.get('award'))
if valido_premio == True:
print(dibujos_closeup[x][2])
print('Ya pasaste por aqui y ganaste, ya obtuviste -->',right_game.get('award'))
time.sleep(5)
pass
elif valido_requirement == True or right_game.get('requirement') == False:
print(dibujos_closeup[x][2])
if x == 0:
contra = input('Contraseña: ')
while not ("".join(contra.split(" "))).isalpha():
contra = input("Ingreso invalido, ingrese el contra :\n >> ")
if contra == 'escapandoando':
adivinanzas(name_game,right_game,new_game, instanteInicial)
pass
else:
print('Contraseña invalida, fuera de aqui')
time.sleep(2)
pass
elif x == 1:
#Criptograma
if cripto_count == 0:
logrado = criptograma(name_game, right_game,new_game, instanteInicial)
if logrado == 1:
cripto_count = 1
else:
print('Codigo Cesar, no es tan hard buscalo en google')
time.sleep(2)
else:
print('Ya pasaste por aqui revisa tu inventario dude')
time.sleep(2)
elif x == 2:
#MEmoria con EMOJIS should be easy
memoria(name_game, right_game,new_game, instanteInicial)
pass
elif x == 4:
#RAndom number generator
if random_count == 0:
logrado = random_number(name_game, right_game,new_game, instanteInicial)
if logrado == 1:
random_count = 1
else:
print('Este juego es muy dificil confirmo')
elif random_count == 1:
print('Para que vas a volver a entrar aqui alo')
time.sleep(2)
else:
print(f'Lo siento no puedes pasar, necesitas -->', right_game.get('requirement'))
time.sleep(3)
elif movimiento == ' ':
if x == 0:
lab = lab + 1
x = 4
elif x == 1:
biblio = biblio + 1
x = 3
elif x == 2:
plaza = plaza + 1
x = 1
elif x == 3:
pasillo = pasillo + 1
if boolean_count == 1:
# print('Felicidades por desbloquear un nuevo cuarto atento con lo que obtienes aqui!')
x = 0
else:
print('Lo siento tienes que completar el juego para pasar')
time.sleep(3)
x = 3
elif x == 4:
servers = servers + 1
print('Pa onde vas tu? ')
time.sleep(2)
x = 4
elif movimiento.lower() == 's':
if x == 0:
lab = lab + 1
x = 3
elif x == 1:
biblio = biblio + 1
x = 2
elif x == 2:
plaza = plaza + 1
print('Pa onde vas tu? ')
buen_continue()
x = 2
elif x == 3:
pasillo = pasillo + 1
x = 1
elif x == 4:
x = 0
servers = servers + 1
elif movimiento.lower() == 'm':
print(new_game.mostrar())
print(f'''
<------- (S) Te devuelves de cuarto (S) <-------
-------> (SPACE) Avanzas al siguiente cuarto (SPACE) ------->
Para los objetos: (W) Centro (W)
(A) Izquierda (A)
(D) Derecha (D)
(I) Para ver tu inventario (I)
Acuerdate que puedes usar la palabra (pista) en algunos juegos!\n''')
sigue_partida = input('''
Este es el menu lee cuidadosamente y para salirte del juego
utiliza la letra (N) para seguir en el juego (Y)''')
while not ("".join(sigue_partida.split(" "))).isalpha():
sigue_partida = input("Ingreso invalido:\n >> ")
if sigue_partida == 'y':
pass
elif sigue_partida == 'n':
menu_juego()
elif movimiento.lower()== 'i':
new_game.veo_inventario()
else:
('No te moviste pa ningun lado')
time.sleep(3)
continuar = 1
def menu_juego():
while True:
opcion = input("""
Elige una opcion:
1. Nueva Partida
2. Instrucciones
3. Records
4. Para salir \n >> """)
while (not opcion.isnumeric()) or (int(opcion) < 1):
opcion = input("Ingreso invalido, ingrese una opcion valida: ")
if opcion == '1':
new_game = partida_nueva()
comienza_partida(new_game)
elif opcion == '2':
instrucciones()
elif opcion == '3':
records()
elif opcion == '4':
break
else:
print('Opcion invalida')
def main():
print(bienvenido)
menu_juego()
if __name__ == '__main__':
main()
```
#### File: JoseRubin09/EscapemetProyect/partida.py
```python
class Partida:
'''
vidas: float numero que con un metodo le quitare o agregare vidas
pistas: int que le quitare a medida que usa las pistas
tiempo: str idk
'''
def __init__(self, dificultad, vidas, pistas, tiempo):
self.dificultad = dificultad
self.vidas = vidas
self.pistas = pistas
self.tiempo = tiempo
def mostrar(self):
return(f"Vidas: {self.vidas}\nPistas: {self.pistas}\nTiempo: {self.tiempo}\n")
def mostrar_tiempo(self):
return(f'{self.tiempo}')
def quito_vida(self,num):
self.vidas = self.vidas - num
return(f'{self.vidas}')
def game_over(self):
if self.vidas <= 0:
return True
def quito_pista(self,num):
self.pistas = int(self.pistas) - num
if self.pistas == 0:
('Lo siento no te quedan mas pistas suerte jeje')
return False
else:
print(f'Perdiste una pista te quedan: {self.pistas} en todo el juego')
return True
def agrego_vida(self,num):
self.vidas = self.vidas + num
def ded_time(self):
return (self.tiempo)
``` |
{
"source": "JoseSalamancaCoy/RM_DATA",
"score": 2
} |
#### File: makesens/tratamiento/tratamiento.py
```python
import pandas as pd
import numpy as np
import os
from pandas.io.json import json_normalize
import json
from datetime import datetime, timedelta
from scipy.ndimage import gaussian_filter1d
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_boston
from sklearn.metrics import mean_squared_error
from sklearn.linear_model import LinearRegression
import statsmodels.formula.api as sfm
from sklearn import preprocessing
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import make_pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import mean_squared_error, r2_score
#from sklearn.externals import joblib
from sklearn.preprocessing import LabelEncoder
from numpy.core.umath_tests import inner1d
################################### load data ###################################
def loaddata_racimo(name,station,window,resample = '1T'):
"""
Parameters:
* name --> name of the file to load
* station --> number associated with the station
* window --> window width for smoothing
* resample --> resampling
Retunr:
* DataFrame
"""
data=pd.read_csv(name,delimiter=',')
data=data.drop(data[data.id_parametro =='alert'].index)
data.valor=pd.to_numeric(data.valor)
pivot_data=pd.pivot_table(data,index='fecha_hora_med', columns='id_parametro',
values='valor').reset_index().set_index("fecha_hora_med")
#keep really needed cols and important variables ("pm10_a","pm25_a","t","p","h")
cols_keep=["pm10","pm10_a","pm25","pm25_a","t","p","h"]
pivot_data=pivot_data[cols_keep]
pivot_data.index = pd.DatetimeIndex(pivot_data.index) - pd.Timedelta(hours = 5)
pivot_data = pivot_data.resample(resample).mean()
pivot_data.index = pivot_data.index.strftime('%Y-%m-%d %H:%M:%S')
for i in pivot_data.columns:
pivot_data[i]=rolling(pivot_data[i],window)
return pivot_data
def loaddata_amb(name,format):
"""
Parameters:
* name --> name of the file to load
Retunr:
* DataFrame
"""
if format == 'csv':
data_AMB = pd.read_csv(name, header= 0)
if format == 'xlsx':
data_AMB = pd.read_excel(name, header= 0)
data_AMB.columns = list(data_AMB.iloc[1])
data_AMB.index = list(data_AMB["Date&Time"])
data_AMB = data_AMB[3:-8] #removi la fila 1 (la de las unidades)
data_AMB.index = pd.DatetimeIndex(data_AMB.index,dayfirst = True)
data_AMB.index = data_AMB.index.astype(str)
data_AMB = data_AMB.drop(['Date&Time'], axis=1)
for i in data_AMB.columns:
data_AMB=clean_nan(data_AMB,i)
return data_AMB
def loaddata_davis(name):
Davis = pd.read_csv(name, sep=" ", header=None)
Davis.columns = ["{}_{}".format(Davis.iloc[0][i], Davis.iloc[1][i]).replace("nan_","") for i in Davis.columns]
Davis = Davis.drop(index=[0,1])
_Time=[]
for i in Davis["Time"]:
if(len(i) == 4): _Time.append("0"+i)
else: _Time.append(i)
Davis["Date_Time"] = Davis["Date"] +"_" + _Time #String Date Time
Davis["Date_Time"] = [datetime.strptime(i, "%d/%m/%y_%H:%M") + timedelta(days=33,hours=1+5,minutes=14) for i in Davis["Date_Time"]] #Lista de DateTime y correccion de tiempo
Davis.drop(columns = ["Date", "Time"], inplace=True) #Elimina Columnas originales de Date y Time
Davis.index = Davis["Date_Time"]
Davis.drop(columns = ["Date_Time"], inplace = True)
keys_floats = list(Davis.columns[0:6].values) + list(Davis.columns[7:9]) + list(Davis.columns[10:-1].values)
Davis[keys_floats] = Davis[keys_floats].astype("float")
return Davis
def loaddata_eva(name):
data = pd.read_csv(name, sep=',')
data = data[['timestamp', 'sensor', 'pm10', 'pm2.5', 'pm1', 'noise', 'illuminance',
'irradiance', 'temperature', 'humidity', 'presure']]
data["timestamp"] = data["timestamp"].astype("datetime64")
dat2 = data.drop(data[data.sensor != 2.0 ].index)
dat1 = data.drop(data[data.sensor == 2.0 ].index)
dat2.drop(columns = [ 'noise', 'illuminance', 'irradiance', 'temperature', 'humidity', 'presure'], inplace = True)
result1 = pd.DataFrame(index=data['timestamp'])
result2 = pd.DataFrame(index=data['timestamp'])
for i in dat1.columns[2:]:
var1 = Get_var(dat1,i)
result1 = pd.merge(result1,var1,left_index=True,right_index=True)
for i in dat2.columns[2:]:
var2 = Get_var(dat2,i)
result2 = pd.merge(result2,var2,left_index=True,right_index=True)
result1.columns = ['pm10_2', 'pm2.5_2', 'pm1_2', 'noise', 'illuminance',
'irradiance', 'temperature', 'humidity', 'presure']
result2.columns = ['pm10_n', 'pm2.5_n', 'pm1_n']
result = pd.DataFrame()
result = pd.merge(result1,result2,left_index=True,right_index=True)
result =result.reindex(columns = ['pm10_2', 'pm2.5_2', 'pm1_2','pm10_n', 'pm2.5_n', 'pm1_n', 'noise', 'illuminance', 'irradiance', 'temperature', 'humidity', 'presure'])
result = result.resample('1T').mean()
result = result.drop_duplicates()
return result
###########################################################################################################
def json_to_csv_eva(name):
contenido = os.listdir(name)
index = []
result = pd.DataFrame()
for i in range(0,len(contenido)):
f =open(name + str(contenido[i]))
json_file = json.load(f)
a = pd.json_normalize(json_file['data'])
col = []
if 'sensor' in a.columns:
for j in range(0,len(a['data_type'])):
col.append(a['data_type'][j] + '_' + str(a['sensor'][j]))
data =pd.DataFrame(columns =col)
data.loc[0] = list(a['value'])
index.append(json_file['timestamp'])
else:
continue
if i == 0:
result = data
else:
result = pd.concat([result,data],axis= 0)
result.index = index
return result
##########################################################################################################
def Get_var(Data,name_variable):
variable = json_normalize( [json.loads(i) for i in Data[name_variable].dropna()] )
variable["Date_Time"] = list(Data[["timestamp",name_variable]].dropna()["timestamp"])
variable.index= variable['Date_Time']
return variable['value']
def rolling(y,n): #Rolling data to smooth values
rolling = y.rolling(window = n , center = True , min_periods = 1)
return rolling.median()
def clean_nan(data,var):
b = np.array(data[var])
b = np.where(b == 'NoData',np.nan,b)
b = np.where(b == '---',np.nan,b)
data[var] = list(b)
return data
def cutdata(datas:list,start_date,end_date):
result = []
for i in datas:
mask = (i.index >= start_date) & (i.index <= end_date)
i=i[mask]
result.append(i)
return result
def renamecol(datas:list,station:list):
result = []
for i in range(0,len(datas)):
result.append(datas[i].rename(columns = {'pm10':'pm10_'+ str(station[i]),'pm10_a':'pm10_a'+str(station[i]), 'pm25':'pm25_'+str(station[i]), 'pm25_a':'pm25_a'+str(station[i]),'t': 't_'+str(station[i]), 'p': 'p_'+str(station[i]), 'h': 'h_'+str(station[i])}))
return result
def Merge(datas:list):
result = pd.DataFrame()
for i in range(0,len(datas)-1):
if i == 0:
result = pd.merge(datas[i],datas[i+1],left_index=True,right_index=True)
else:
result = pd.merge(result,datas[i+1],left_index=True,right_index=True)
result = result.dropna()
return result
######################################################################################################################
def LinearModel(variables,porcentage):
"""
Parameters:
- Variables:list -->
- Porcentage:float -->
Returns:
- RMSE:float --> mean square error
- coefficients:list -->
- Intercept:float -->
"""
Y = variables[0]
X = pd.DataFrame({str(i):variables[i] for i in range(1,len(variables))})
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size = porcentage, random_state=9)
lin_reg_mod = LinearRegression()
lin_reg_mod.fit(X_train, y_train)
pred = lin_reg_mod.predict(X_test)
test_set_rmse = (np.sqrt(mean_squared_error(y_test, pred)))
coef = lin_reg_mod.coef_
intercept = lin_reg_mod.intercept_
Yc = sum([variables[i] * coef[i-1] for i in range(1,len(variables))] ) + intercept
return lin_reg_mod.coef_, lin_reg_mod.intercept_ , Yc
#return Yc
def RamdonForest(variables,porcentage):
"""
Parameters:
- Variables:list -->
- Porcentage:float -->
Returns:
- MAE:float --> mean absolute error
- RMSE:float --> mean square error
- Accuracy:float -->
"""
Y = variables[0]
X = pd.DataFrame({str(i):variables[i] for i in range(1,len(variables))})
train_features,test_features,train_labels,test_labels=train_test_split(X,Y,test_size=porcentage,random_state=0)
rf=RandomForestRegressor(n_estimators=800,random_state=0)
rf.fit(train_features,train_labels)
predictions=rf.predict(test_features)
errors=abs(predictions-test_labels)
mape=100*abs(errors/test_labels)
rmse=np.sqrt(np.mean(errors**2))
accuracy=100-np.mean(mape)
mae = np.mean(errors)
#return mae,rmse, accuracy, rf.predict(X)
return pd.Series(rf.predict(X),variables[0].index)
``` |
{
"source": "josesalasdev/apisrun",
"score": 2
} |
#### File: api/engines/mongo.py
```python
from api import configs
import motor.motor_asyncio
class MongoEngine(object):
"""Mongo client"""
_instance = None
def __new__(cls):
if MongoEngine._instance is None:
client = motor.motor_asyncio.AsyncIOMotorClient(configs.ENGINE_URI)
MongoEngine._instance = client[configs.ENGINE_DB_NAME]
return MongoEngine._instance
db = MongoEngine()
``` |
{
"source": "josesalasdev/apisrun-cli",
"score": 2
} |
#### File: apisrun-cli/apisrun/main.py
```python
import click
from apisrun.resources.up import up as command_up
from apisrun.resources.list import list as command_list
from apisrun.resources.down import down as command_down
__version__ = "0.0.3"
@click.group()
@click.version_option(__version__)
@click.pass_context
def cli(ctx):
"""CLI group"""
pass
cli.add_command(command_up)
cli.add_command(command_list)
cli.add_command(command_down)
```
#### File: apisrun/utils/commons.py
```python
import yaml
import os
import socket
from contextlib import closing
def validate_path_file(path_file) -> bool:
"""Validate th path of a file."""
if os.path.exists(path_file) and os.path.isfile(path_file):
if os.access(path_file, os.R_OK):
return True
return False
def load_yaml(path_file: str):
"""Transform a yml to dict"""
yaml_file = open(path_file, "r")
return yaml.load(yaml_file, yaml.Loader)
def validate_errors_apisrun_yml(data) -> bool:
"""Validate apisrun data yml."""
if len(data) < 1 and type(data) is not dict:
return True
error = False
# TODO: Refactor
for _, endpoints in data.items():
if type(endpoints) is not list or len(endpoints) < 1:
error = True
for v in endpoints:
if type(v) is not dict or len(v) < 3:
error = True
return error
def singleton(class_):
"""Singleton"""
instances = {}
def getinstance(*args, **kwargs):
if class_ not in instances:
instances[class_] = class_(*args, **kwargs)
return instances[class_]
return getinstance
def has_open_port(host, port):
"""Validate if the host port is open."""
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
if sock.connect_ex((host, port)) == 0:
return False
return True
``` |
{
"source": "josesalasdev/apisrun",
"score": 2
} |
#### File: apisrun/tests/conftest.py
```python
from unittest.mock import MagicMock
class AsyncMock(MagicMock):
async def __call__(self, *args, **kwargs):
return super().__call__(self, *args, **kwargs)
```
#### File: tests/test_validators/test_base.py
```python
import pytest
from fastapi import HTTPException
from api.validators.base import ValidatorMixin
from tests.conftest import AsyncMock
@pytest.mark.asyncio
async def test_validator_mixin_in_node_method_with_model_already_exists(mocker):
mock_repo = mocker.patch(
"api.repositories.repository.count", new_callable=AsyncMock
)
mock_repo.side_effect = [1, 0]
v = ValidatorMixin()
with pytest.raises(HTTPException):
await v.node("users", "/users")
@pytest.mark.asyncio
async def test_validator_mixin_in_node_method_with_path_already_exists(mocker):
mock_repo = mocker.patch(
"api.repositories.repository.count", new_callable=AsyncMock
)
mock_repo.side_effect = [1, 1]
v = ValidatorMixin()
with pytest.raises(HTTPException):
await v.node("users", "/users")
@pytest.mark.asyncio
async def test_validator_mixin_in_node_method_with_validation_ok(mocker):
mock_repo = mocker.patch(
"api.repositories.repository.count", new_callable=AsyncMock
)
mock_repo.side_effect = [0, 0]
v = ValidatorMixin()
expect = await v.node("users", "/users")
assert expect is None
``` |
{
"source": "JoseSalgado1024/anonimizar_columnas",
"score": 3
} |
#### File: anonimizar_columnas/helpers/helpers.py
```python
import json
import os
import time
import random
import socket
import hashlib
try:
lib = __import__('pandas')
globals()['pd'] = lib
except ImportError:
pandas_import_error_msg = \
'''
Este script utiliza la libreria de Python Pandas.
Por favor ejecuta:
$ sudo -H pip install pandas
o si se esta utilizando un VirtualEnv:
(py_venv) $ pip install pandas.
'''
print (pandas_import_error_msg)
exit(1)
SAMPLE_CONF = 'config.sample.json'
SAMPLE_DATA = 'data.sample.csv'
TEXT_TYPE = (str, unicode)
LOCAL = os.path.dirname(os.path.abspath(__file__))[:-len('/helpers')]
def clean_str(_somestr):
allowed_chars = 'abcdef01234567890'
cleaned_str = ''
for c in _somestr:
if c in allowed_chars:
cleaned_str += c
return cleaned_str
def hash_cache(_origin, _hash):
cache_file = 'conversion_ids.keys'
if isinstance(_origin, tuple):
_origin = _origin[0]
try:
lines = open(cache_file, 'r').readlines()
cache_lines = {}
for line in lines:
try:
k = line.split(':')[0]
v = line.split(':')[1]
cache_lines.update({k: v})
except Exception as e:
print ('Err Msg: \"{}\".'.format(e))
except IOError:
cache_lines = {}
if _origin in cache_lines.keys():
# do something
return clean_str(cache_lines[_origin])
else:
# Do other thing!
cache_lines.update({_origin: _hash})
with open(cache_file, 'w') as cache:
for k, v in cache_lines.items():
cache.write('{}:{}\n'.format(k, clean_str(v)))
def resolve_kwargs(_conf, _kwargs):
"""
Función de renderizado de KWARGS
Args:
- defaults:
- _kwargs:
Return:
- Dict.
"""
if not isinstance(_conf, dict) or not isinstance(_kwargs, dict):
raise TypeError('Argumentos no validos.')
_tmp_configs = {}
for k, v in _conf.items():
if k in _kwargs.keys() and isinstance(_kwargs.get(k), v.__class__):
if isinstance(v, dict):
_tmp_configs.update({k: resolve_kwargs(v, _kwargs.get(k))})
else:
_tmp_configs.update({k: _kwargs[k]})
else:
_tmp_configs.update({k: v})
return _tmp_configs
def load_config(config_path=SAMPLE_CONF):
"""
:param config_path:
:return:
"""
if not isinstance(config_path, TEXT_TYPE):
print ('config_path debe ser una instancia de STR o UNICODE.')
return None
if config_path == SAMPLE_CONF:
# Load Sample
config_full_path = os.path.join(LOCAL, 'samples', config_path)
else:
# Load Custom Config
config_full_path = config_path
try:
return json.load(open(config_full_path, 'rb'))
except ValueError:
print ('No es posible decodificar la configuracion: {}, no JSON parseable.'.format(config_path))
return None
except IOError:
print ('No es posible localizar la configuracion: {}.'.format(config_path))
return None
def anonymize_cols(_pddf=None, columns=None):
"""
Convierte en un Hash los valores del las columnas descriptas en columns
:param _pddf:
:return:
"""
if not isinstance(_pddf, pd.DataFrame):
print ('_pddf debe ser una instancia de Pandas.DataFrame')
return None
if not isinstance(columns, list):
print ('columns debe ser una instancia de LIST.')
return None
headers_count = len(columns)
for col in columns:
try:
_pddf[col] = _pddf[col].apply(lambda x: generate_unique_id(x))
headers_count -= 1
except Exception as e:
print (e)
print ('Fallo el procesamiento de la columna:\"{}\", err: NOT-FOUND.'.format(col))
if headers_count > 0:
print ('No fue posible procesar todas las columnas')
return _pddf
def load_input(_input_filename=None):
"""
Carga y valida el input CSV.
:param _input_filename:
Return:
- Pandas.DataFrame: Carga exitosa.
- None: Fallo la cara del recurso.
"""
if _input_filename == SAMPLE_DATA:
# Load Sample
_input_filename = os.path.join(LOCAL, 'samples', _input_filename)
# Validar path de entrada:
if not os.path.exists(_input_filename):
print ('No es posible localizar el archivo: {}.'.format(os.path.basename(_input_filename)))
with open(_input_filename, 'rb') as tmp_f:
tmp_lines = tmp_f.readlines()
if len(tmp_lines) > 0:
csv_headers = tmp_lines[0].replace('\n', '').replace(' ', '').split(',')
try:
return pd.read_csv(_input_filename, skipinitialspace=True, usecols=csv_headers)
except:
pass
def generate_unique_id(*args):
"""
source: StackOverFlow.
"""
t = long(time.time() * 1000)
r = long(random.random() * 100000000000000000)
try:
a = socket.gethostbyname(socket.gethostname())
except Exception as e:
print (e)
a = random.random() * 100000000000000000
_uid = str(t) + ' ' + str(r) + ' ' + str(a) + ' ' + str(args)
_uid = hashlib.md5(_uid).hexdigest()
cached_hash = hash_cache(args, _uid)
if cached_hash:
return cached_hash
else:
return _uid
def is_a_valid_conf(_conf=None):
"""
Valida una configuracion.
Args:
- _conf:
- description: configuracion provista que debe ser validada.
- type: Dict.
Return:
- bool:
- True: La config es valida.
- False: No es valida la conf.
"""
if not isinstance(_conf, dict):
print ('_conf debe ser una instancia de DICT.')
return False
required = \
{
'key': 'columns',
'type': list,
'content': TEXT_TYPE
}
# exists required.key?
if not required['key'] in _conf.keys():
print ('{} es requerida!'.format(required['key']))
return False
if not isinstance(_conf[required['key']], required['type']):
print ('{} debe contener {}'.format(required['key'], required['type']))
return False
if False in [isinstance(e, required['content']) for e in _conf['columns']]:
print ('_conf[\'columns\'] debe ser una {} de {}'.format(required['type'],
required['content']))
return False
return True
def write_csv(df, output_fn=None):
"""
Escribe un OUTPUT a un archhivo de tipo CSV.
Args:
- df: Pandas.DataFrame.
- Data procesada.
- output_fn: str o unicode
- Nombre que recibira el archivo CSV de salida.
Return:
- Str: Nombre del Archivo de salida.
"""
if not output_fn:
output_fn = '{}.csv'.format(generate_unique_id('abc')[0:15])
for column in df.columns:
for idx in df[column].index:
x = df.get_value(idx, column)
try:
x = unicode(x.encode('utf-8', 'ignore'),
errors='ignore') if type(x) == unicode else unicode(str(x), errors='ignore')
df.set_value(idx, column, x)
except Exception as e:
print ('encoding error: {0} {1}'.format(idx, column))
print ('Err Msg: \"{}\".'.format(e))
df.set_value(idx, column, '')
continue
try:
df.to_csv(output_fn, index=False)
return output_fn
except Exception as e:
print ('Ocurrio un fallo al intentar grabar el archivo {}'.format(output_fn))
print ('Err Msg: \"{}\".'.format(e))
``` |
{
"source": "JoseSalgado1024/b42fly",
"score": 2
} |
#### File: uav_data/api/serializers.py
```python
from rest_framework.serializers import *
from .models import *
class ExtraFieldSerializer(ModelSerializer):
class Meta:
model = ExtraField
fields = ('key', 'value', )
class PolicySerializer(ModelSerializer):
class Meta:
model = Policy
fields = '__all__'
class PolicyListSerializer(PolicySerializer):
def to_representation(self, instance):
return \
{
'identifier': instance.identifier,
'name': instance.name,
'web_url': '/api/v1/policy/%s' % instance.identifier,
}
class GeometryCollectionSerializer(ModelSerializer):
class Meta:
model = GeometryCollection
fields = '__all__'
def to_representation(self, instance):
points = PointFeatureLCSerializer(PointFeature.objects.filter(geometry_collection=instance.identifier),
many=True)
areas = AreaFeatureSerializerLC(AreaFeature.objects.filter(geometry_collection=instance.identifier), many=True)
extras = ExtraFieldSerializer(ExtraField.objects.filter(geometry_collection=instance.identifier), many=True)
response = ModelSerializer.to_representation(self, instance)
response.update({
'extras': extras.data,
'points': points.data,
'areas': areas.data
})
return response
class GeometryCollectionListSerializer(GeometryCollectionSerializer):
def to_representation(self, instance):
return \
{
'identifier': instance.identifier,
'name': instance.name,
'web_url': '/api/v1/collection/%s' % instance.identifier
}
class PointFeatureSerializer(ModelSerializer):
class Meta:
model = PointFeature
fields = '__all__'
def to_representation(self, instance):
response = ModelSerializer.to_representation(self, instance)
response.update({'policy': PolicyListSerializer(instance.policy).data,
'geometry_collection': GeometryCollectionListSerializer(instance.geometry_collection).data})
return response
class PointFeatureLCSerializer(PointFeatureSerializer):
def to_representation(self, instance):
return \
{
'identifier': instance.identifier,
'name': instance.name,
'web_url': '/api/v1/point/%s' % instance.identifier,
}
class AreaFeatureSerializerRUD(ModelSerializer):
class Meta:
model = AreaFeature
fields = '__all__'
class AreaFeatureSerializerLC(AreaFeatureSerializerRUD):
def to_representation(self, instance):
return \
{
'identifier': instance.identifier,
'name': instance.name,
'web_url': '/api/v1/area/%s' % instance.identifier,
}
``` |
{
"source": "JoseSalgado1024/in_downloader",
"score": 2
} |
#### File: in_downloader/include/commands.py
```python
import click
@click.group()
def in_downloader():
pass
@click.command()
def initdb():
click.echo('Initialized the database')
import requests
r = requests.get('')
r.text
r.headers
@click.command()
def dropdb():
click.echo('Dropped the database')
```
#### File: in_downloader/include/toolbox.image.py
```python
from helpers import resolve_kwargs, guid
from os import path, makedirs
import cv2
import numpy as np
def split_image():
pass
def crop_image(image, **kwargs):
defaults = resolve_kwargs({"enable": True,
"destination_folder": "cropped_images",
"fn_mark": "cropped",
"crop_x0": 0,
"crop_x1": 0,
"crop_y0": 0,
"crop_y1": 0}, kwargs)
if isinstance(image, list):
return filter(None, [crop_image(i, kwargs) for i in image])
img = get_image(image)
if img:
img_x, img_y, img_channels = img.shape
if img_x >= img_y:
x_start = int((img_x - img_y) / 2)
x_end = x_start + img_y
y_start = 0
y_end = img_y
else:
y_start = int((img_y - img_x) / 2)
y_end = y_start + img_x
x_start = 0
x_end = img_x
img = img[x_start:x_end, y_start:y_end]
if defaults['enable']:
crop_dest_folder = defaults.get('destination_folder')
crop_fn = '{fn}{fn_mark}.{f}'.format(
fn=defaults.get('fn', guid()),
fn_mark=defaults.get('fn_mark'),
f=defaults.get('format', 'png').lower()
)
if not path.exists(crop_dest_folder):
makedirs(crop_dest_folder)
crp_img_filename = path.join(crop_dest_folder, crop_fn)
try:
cv2.imwrite(crp_img_filename, img)
return crp_img_filename
except Exception as e:
print e
def get_image(image):
"""
Normalizar entra de imagenes.
Args:
- image:
Return:
- image.np.ndarray
"""
if isinstance(image, np.ndarray):
print 'Loading from np.array.'
return image
elif isinstance(image, (str, unicode)):
print 'Loading from file.'
if not path.exists():
print 'No existe {}.'.format(image)
return None
try:
return cv2.imread(image)
except:
print 'Fallo carga de imagen {}.'.format(image)
return None
elif isinstance(image, list):
print 'Loading from list.'
return filter(None, [get_image(img) for img in image])
else:
return None
def resize_image(_image_fn, **kwargs):
if not isinstance(_image_fn, (str, unicode)):
print 'Fallo: \'_image_fn\' debe ser una instancia de unicode o str. {}'.format(_image_fn)
defaults = resolve_kwargs({"enable": False,
"resize_x": 50,
"resize_y": 50,
"square_image": True,
"destination_folder": "resided_images",
"fn_mark": "resided"}, kwargs)
respose = {
'status': False,
'msg': ['No ejecutado.'],
'input_img': _image_fn,
'output_img': ''
}
if not isinstance(_image_fn, (str, unicode)):
return respose
if not path.exists(_image_fn):
respose.update({'msg': ['No existe la imagen {}.'.format(_image_fn)]})
print 'Croppenado: \'{}\'.'.format(_image_fn)
try:
img = cv2.imread(_image_fn)
except Exception as load_image_error:
respose.update({'msg': ['Fallo la carga de la imagen{}, error: {}.'.format(_image_fn, load_image_error)]})
return respose
if defaults['square_image']:
img_x, img_y, img_channels = img.shape
if img_x >= img_y:
x_start = int((img_x - img_y) / 2)
x_end = x_start + img_y
y_start = 0
y_end = img_y
else:
y_start = int((img_y - img_x) / 2)
y_end = y_start + img_x
x_start = 0
x_end = img_x
img = img[x_start:x_end, y_start:y_end]
if defaults['enable']:
crop_dest_folder = defaults.get('destination_folder')
crop_img_format = _image_fn.split('.')[len(_image_fn.split('.')) - 1]
crop_fn = '{fn}{fn_mark}.{f}'.format(
fn=_image_fn[:-len(crop_img_format) - 1],
fn_mark=defaults['fn_mark'],
f=crop_img_format
)
if not path.exists(crop_dest_folder):
makedirs(crop_dest_folder)
crp_img_filename = path.join(crop_dest_folder, crop_fn)
respose.update({'image_cropped': {
'path': crp_img_filename,
'fn': crop_fn
},
'msg': ['Imagen {} re-encuadrada a {}x{}'.format(crop_fn,
x_end,
y_end)]})
_tmp_img = cv2.imwrite(crp_img_filename, img)
else:
_tmp_img = img
cv2.imwrite('resize/image.png', cv2.resize(_tmp_img, (defaults['resize_w'], defaults['resize_h'])))
respose.update({'msg': ['Realizado con exito.'],
'output_img': 'resize/image.png'})
return respose
print get_image(['str'])
``` |
{
"source": "JoseSalgado1024/otto",
"score": 4
} |
#### File: JoseSalgado1024/otto/main.py
```python
from itertools import combinations
from random import randint
import argparse
import math
import sys
# Basics
NUMBERS_DICTIONARY = '0123456789'
SPECIAL_CHARS_DICTIONARY = '.+-*/\\|°¬!"#$&/()=?¡¨*[]{}´+,.;:-_<>@'
WHITE_SPACE = ' '
CHARS_DICTIONARY = 'abcdefghijklmnñopqrstuvwxyz'
DEFAULT_KEYS_AMOUNT = 10
DEFAULT_KEYS_LENGTH = 16
def binomial_coefficient(n: int, k: int) -> int:
"""Calculate Binomial coefficient"""
n_fac = math.factorial(n)
k_fac = math.factorial(k)
n_minus_k_fac = math.factorial(n - k)
return round(n_fac/(k_fac*n_minus_k_fac))
def _default_dictionary(use_uppercase=True, use_numbers=True, use_special_chars=True, use_whitespaces=''):
"""
Build a dictionary by default.
Args:
- use_uppercase: Include upper cases in dictionary.
- Type: bool
- Default: True.
- use_numbers: Include numbers in default dictionary
- Type: bool
- Default: True.
- use_special_chars: Include specials characters in default dictionary.
- Type: bool
- Default: True.
"""
_dict = CHARS_DICTIONARY
_dict += CHARS_DICTIONARY.upper() if use_uppercase else ''
_dict += NUMBERS_DICTIONARY if use_numbers else ''
_dict += WHITE_SPACE if use_whitespaces else ''
_dict += SPECIAL_CHARS_DICTIONARY if use_special_chars else ''
return _dict
def generate_by_random(**kwargs):
"""Generates keys randomly"""
_dictionary_ = kwargs.get('dictionary')
if any(x is None for x in [_dictionary_, kwargs.get('length'), kwargs.get('amount')]):
raise KeyError('Random Dict generator: missing required argument.')
generated_keys = []
for i in range(kwargs.get('amount')):
while True:
nk = ''
while len(nk) < kwargs.get('length'):
nk += _dictionary_[randint(0, len(_dictionary_)-1)]
if nk not in generated_keys:
generated_keys.append(nk)
sys.stdout.write(f'{nk}\n')
break
def generate_by_combinations(**kwargs):
"""Generates Keys using itertools.combinations."""
_dictionary_ = kwargs.get('dictionary')
if any(x is None for x in [_dictionary_, kwargs.get('length'), kwargs.get('amount')]):
raise KeyError('Combination Dict generator: missing required argument.')
for idx, key in enumerate(combinations(_dictionary_, kwargs.get('length'))):
if idx > kwargs.get('amount'):
break
sys.stdout.write(''.join(list(key)) + '\n')
RECORD_GENERATION_METHODS = {
'combinations': generate_by_combinations,
'randomize': generate_by_random
}
def prepare_chars_dictionary(_dictionary_, **kwargs):
"""
Convert a str to list char.
Args:
- char_dict: str(strict)
Return:
- char list.
"""
if not isinstance(_dictionary_, str) or len(_dictionary_) == 0 or _dictionary_ == 'undefined':
# Build default dict
_dictionary_ = _default_dictionary(use_numbers=kwargs.get('use_numbers', True),
use_uppercase=kwargs.get('use_uppercase', True),
use_special_chars=kwargs.get('use_special_chars', False),
use_whitespaces=kwargs.get('use_whitespaces', False))
else:
if kwargs.get('use_uppercase') is True:
_dictionary_ += _dictionary_.upper()
return [x for x in _dictionary_]
if __name__ == '__main__':
"""Main"""
parser = argparse.ArgumentParser(description="Char dictionary generator", allow_abbrev=True)
# KeyGen Ags setup
parser.add_argument('--dictionary', '-d', help='Dictionary elements.', default='undefined', type=str)
parser.add_argument('--amount', '-a', help='Amount of keys.', default=DEFAULT_KEYS_AMOUNT, type=str)
parser.add_argument('--length', '-l', help='Keys length', default=DEFAULT_KEYS_LENGTH, type=int)
parser.add_argument('--uppercase', '-u', help='Use upper case', action='store_true')
parser.add_argument('--numbers', '-n', help='Add numbers to dictionary.', action='store_true')
parser.add_argument('--symbols', '-s', help='Add special chars to dictionary.', action='store_true')
parser.add_argument('--whitespace', '-w', help='Add White spaces to dictionary.', action='store_true')
parser.add_argument('--randomize', '-r', action='store_true', default=False,
help='Generation method, if -r or --randomize is present'
'random generation selected else, permutations method selected.')
arguments = parser.parse_args()
# Get Args
_dictionary = arguments.dictionary
_uppercase = arguments.uppercase
_numbers = arguments.numbers
_amount = int(arguments.amount)
_length = int(arguments.length)
_special = arguments.symbols
_space = arguments.whitespace
# Prepare Chars Dictionary
dictionary = prepare_chars_dictionary(_dictionary_=_dictionary,
use_uppercase=_uppercase,
use_numbers=_numbers,
use_special_chars=_special,
use_whitespaces=_space)
method = 'randomize' if arguments.randomize else 'combinations'
try:
if int(_amount) > binomial_coefficient(len(dictionary), _length):
raise IndexError(f'With provided dictionary it is impossible generate {_amount} keys.')
RECORD_GENERATION_METHODS[method](dictionary=dictionary, length=_length, amount=_amount)
except MemoryError:
print('You do not have enough memory for do this table.')
except ValueError:
print(f'Record length({_length}) can\'t be greater than dictionary length({len(dictionary)}).')
``` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.